gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa (PEP8 asserts)
import mock
import urlparse
from framework.celery_tasks import handlers
from website.files.models.osfstorage import OsfStorageFile
from website.preprints.tasks import format_preprint
from website.util import permissions
from framework.auth import Auth
from framework.exceptions import PermissionsError
from website import settings
from osf.models import NodeLog, Subject
from tests.base import OsfTestCase
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
PreprintFactory,
PreprintProviderFactory,
SubjectFactory
)
from tests.utils import assert_logs
from api_tests import utils as api_test_utils
from website.project.views.contributor import find_preprint_provider
class TestPreprintFactory(OsfTestCase):
def setUp(self):
super(TestPreprintFactory, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
self.preprint = PreprintFactory(creator=self.user)
self.preprint.save()
def test_is_preprint(self):
assert_true(self.preprint.node.is_preprint)
def test_preprint_is_public(self):
assert_true(self.preprint.node.is_public)
class TestSetPreprintFile(OsfTestCase):
def setUp(self):
super(TestSetPreprintFile, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
self.read_write_user = AuthUserFactory()
self.read_write_user_auth = Auth(user=self.read_write_user)
self.project = ProjectFactory(creator=self.user)
self.file = OsfStorageFile.create(
node=self.project,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.file_two = OsfStorageFile.create(
node=self.project,
path='/pandapanda.txt',
name='pandapanda.txt',
materialized_path='/pandapanda.txt')
self.file_two.save()
self.project.add_contributor(self.read_write_user, permissions=[permissions.WRITE])
self.project.save()
self.preprint = PreprintFactory(project=self.project, finish=False)
@assert_logs(NodeLog.MADE_PUBLIC, 'project')
@assert_logs(NodeLog.PREPRINT_INITIATED, 'project', -2)
def test_is_preprint_property_new_file_to_published(self):
assert_false(self.project.is_preprint)
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
self.project.reload()
assert_false(self.project.is_preprint)
with assert_raises(ValueError):
self.preprint.set_published(True, auth=self.auth, save=True)
self.preprint.provider = PreprintProviderFactory()
self.preprint.set_subjects([[SubjectFactory()._id]], auth=self.auth, save=True)
self.project.reload()
assert_false(self.project.is_preprint)
self.preprint.set_published(True, auth=self.auth, save=True)
self.project.reload()
assert_true(self.project.is_preprint)
def test_project_made_public(self):
assert_false(self.project.is_public)
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_false(self.project.is_public)
with assert_raises(ValueError):
self.preprint.set_published(True, auth=self.auth, save=True)
self.preprint.provider = PreprintProviderFactory()
self.preprint.set_subjects([[SubjectFactory()._id]], auth=self.auth, save=True)
self.project.reload()
assert_false(self.project.is_public)
self.preprint.set_published(True, auth=self.auth, save=True)
self.project.reload()
assert_true(self.project.is_public)
def test_add_primary_file(self):
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_equal(self.project.preprint_file, self.file)
assert_equal(type(self.project.preprint_file), type(self.file))
@assert_logs(NodeLog.PREPRINT_FILE_UPDATED, 'project')
def test_change_primary_file(self):
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_equal(self.project.preprint_file, self.file)
self.preprint.set_primary_file(self.file_two, auth=self.auth, save=True)
assert_equal(self.project.preprint_file._id, self.file_two._id)
def test_add_invalid_file(self):
with assert_raises(AttributeError):
self.preprint.set_primary_file('inatlanta', auth=self.auth, save=True)
def test_preprint_created_date(self):
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_equal(self.project.preprint_file._id, self.file._id)
assert(self.preprint.date_created)
assert_not_equal(self.project.date_created, self.preprint.date_created)
def test_non_admin_update_file(self):
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_equal(self.project.preprint_file._id, self.file._id)
with assert_raises(PermissionsError):
self.preprint.set_primary_file(self.file_two, auth=self.read_write_user_auth, save=True)
assert_equal(self.project.preprint_file._id, self.file._id)
class TestPreprintServicePermissions(OsfTestCase):
def setUp(self):
super(TestPreprintServicePermissions, self).setUp()
self.user = AuthUserFactory()
self.write_contrib = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(self.write_contrib, permissions=[permissions.WRITE])
self.preprint = PreprintFactory(project=self.project, is_published=False)
def test_nonadmin_cannot_set_subjects(self):
initial_subjects = list(self.preprint.subjects.all())
with assert_raises(PermissionsError):
self.preprint.set_subjects([[SubjectFactory()._id]], auth=Auth(self.write_contrib), save=True)
self.preprint.reload()
assert_equal(initial_subjects, list(self.preprint.subjects.all()))
def test_nonadmin_cannot_set_file(self):
initial_file = self.preprint.primary_file
file = OsfStorageFile.create(
node=self.project,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
file.save()
with assert_raises(PermissionsError):
self.preprint.set_primary_file(file, auth=Auth(self.write_contrib), save=True)
self.preprint.reload()
self.preprint.node.reload()
assert_equal(initial_file._id, self.preprint.primary_file._id)
def test_nonadmin_cannot_publish(self):
assert_false(self.preprint.is_published)
with assert_raises(PermissionsError):
self.preprint.set_published(True, auth=Auth(self.write_contrib), save=True)
assert_false(self.preprint.is_published)
def test_admin_can_set_subjects(self):
initial_subjects = list(self.preprint.subjects.all())
self.preprint.set_subjects([[SubjectFactory()._id]], auth=Auth(self.user), save=True)
self.preprint.reload()
assert_not_equal(initial_subjects, list(self.preprint.subjects.all()))
def test_admin_can_set_file(self):
initial_file = self.preprint.primary_file
file = OsfStorageFile.create(
node=self.project,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
file.save()
self.preprint.set_primary_file(file, auth=Auth(self.user), save=True)
self.preprint.reload()
self.preprint.node.reload()
assert_not_equal(initial_file._id, self.preprint.primary_file._id)
assert_equal(file._id, self.preprint.primary_file._id)
def test_admin_can_publish(self):
assert_false(self.preprint.is_published)
self.preprint.set_published(True, auth=Auth(self.user), save=True)
assert_true(self.preprint.is_published)
def test_admin_cannot_unpublish(self):
assert_false(self.preprint.is_published)
self.preprint.set_published(True, auth=Auth(self.user), save=True)
assert_true(self.preprint.is_published)
with assert_raises(ValueError) as e:
self.preprint.set_published(False, auth=Auth(self.user), save=True)
assert_in('Cannot unpublish', e.exception.message)
class TestPreprintProvider(OsfTestCase):
def setUp(self):
super(TestPreprintProvider, self).setUp()
self.preprint = PreprintFactory(provider=None, is_published=False)
self.provider = PreprintProviderFactory(name='WWEArxiv')
def test_add_provider(self):
assert_not_equal(self.preprint.provider, self.provider)
self.preprint.provider = self.provider
self.preprint.save()
self.preprint.reload()
assert_equal(self.preprint.provider, self.provider)
def test_remove_provider(self):
self.preprint.provider = None
self.preprint.save()
self.preprint.reload()
assert_equal(self.preprint.provider, None)
def test_find_provider(self):
self.preprint.provider = self.provider
self.preprint.save()
self.preprint.reload()
assert ('branded', 'WWEArxiv') == find_preprint_provider(self.preprint.node)
def test_top_level_subjects(self):
subj_a = SubjectFactory(provider=self.provider, text='A')
subj_b = SubjectFactory(provider=self.provider, text='B')
subj_aa = SubjectFactory(provider=self.provider, text='AA', parent=subj_a)
subj_ab = SubjectFactory(provider=self.provider, text='AB', parent=subj_a)
subj_ba = SubjectFactory(provider=self.provider, text='BA', parent=subj_b)
subj_bb = SubjectFactory(provider=self.provider, text='BB', parent=subj_b)
subj_aaa = SubjectFactory(provider=self.provider, text='AAA', parent=subj_aa)
some_other_provider = PreprintProviderFactory(name='asdfArxiv')
subj_asdf = SubjectFactory(provider=some_other_provider)
assert set(self.provider.top_level_subjects) == set([subj_a, subj_b])
def test_all_subjects(self):
subj_a = SubjectFactory(provider=self.provider, text='A')
subj_b = SubjectFactory(provider=self.provider, text='B')
subj_aa = SubjectFactory(provider=self.provider, text='AA', parent=subj_a)
subj_ab = SubjectFactory(provider=self.provider, text='AB', parent=subj_a)
subj_ba = SubjectFactory(provider=self.provider, text='BA', parent=subj_b)
subj_bb = SubjectFactory(provider=self.provider, text='BB', parent=subj_b)
subj_aaa = SubjectFactory(provider=self.provider, text='AAA', parent=subj_aa)
some_other_provider = PreprintProviderFactory(name='asdfArxiv')
subj_asdf = SubjectFactory(provider=some_other_provider)
assert set(self.provider.all_subjects) == set([subj_a, subj_b, subj_aa, subj_ab, subj_ba, subj_bb, subj_aaa])
class TestOnPreprintUpdatedTask(OsfTestCase):
def setUp(self):
super(TestOnPreprintUpdatedTask, self).setUp()
self.user = AuthUserFactory()
if len(self.user.fullname.split(' ')) > 2:
# Prevent unexpected keys ('suffix', 'additional_name')
self.user.fullname = 'David Davidson'
self.user.middle_names = ''
self.user.suffix = ''
self.user.save()
self.auth = Auth(user=self.user)
self.preprint = PreprintFactory()
self.preprint.node.add_tag('preprint', self.auth, save=False)
self.preprint.node.add_tag('spoderman', self.auth, save=False)
self.preprint.node.add_unregistered_contributor('BoJack Horseman', 'horse@man.org', Auth(self.preprint.node.creator))
self.preprint.node.add_contributor(self.user, visible=False)
self.preprint.node.save()
self.preprint.node.creator.given_name = u'ZZYZ'
if len(self.preprint.node.creator.fullname.split(' ')) > 2:
# Prevent unexpected keys ('suffix', 'additional_name')
self.preprint.node.creator.fullname = 'David Davidson'
self.preprint.node.creator.middle_names = ''
self.preprint.node.creator.suffix = ''
self.preprint.node.creator.save()
self.preprint.set_subjects([[SubjectFactory()._id]], auth=Auth(self.preprint.node.creator), save=False)
def tearDown(self):
handlers.celery_before_request()
super(TestOnPreprintUpdatedTask, self).tearDown()
def test_format_preprint(self):
res = format_preprint(self.preprint)
assert set(gn['@type'] for gn in res) == {'creator', 'contributor', 'throughsubjects', 'subject', 'throughtags', 'tag', 'workidentifier', 'agentidentifier', 'person', 'preprint', 'workrelation', 'creativework'}
nodes = dict(enumerate(res))
preprint = nodes.pop(next(k for k, v in nodes.items() if v['@type'] == 'preprint'))
assert preprint['title'] == self.preprint.node.title
assert preprint['description'] == self.preprint.node.description
assert preprint['is_deleted'] == (not self.preprint.is_published or not self.preprint.node.is_public or self.preprint.node.is_preprint_orphan)
assert preprint['date_updated'] == self.preprint.date_modified.isoformat()
assert preprint['date_published'] == self.preprint.date_published.isoformat()
tags = [nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'tag']
through_tags = [nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'throughtags']
assert sorted(tag['@id'] for tag in tags) == sorted(tt['tag']['@id'] for tt in through_tags)
assert sorted(tag['name'] for tag in tags) == ['preprint', 'spoderman']
subjects = [nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'subject']
through_subjects = [nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'throughsubjects']
assert sorted(subject['@id'] for subject in subjects) == sorted(tt['subject']['@id'] for tt in through_subjects)
assert sorted(subject['name'] for subject in subjects) == [s.bepress_text for h in self.preprint.subject_hierarchy for s in h]
people = sorted([nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'person'], key=lambda x: x['given_name'])
expected_people = sorted([{
'@type': 'person',
'given_name': u'BoJack',
'family_name': u'Horseman',
}, {
'@type': 'person',
'given_name': self.user.given_name,
'family_name': self.user.family_name,
}, {
'@type': 'person',
'given_name': self.preprint.node.creator.given_name,
'family_name': self.preprint.node.creator.family_name,
}], key=lambda x: x['given_name'])
for i, p in enumerate(expected_people):
expected_people[i]['@id'] = people[i]['@id']
assert people == expected_people
creators = sorted([nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'creator'], key=lambda x: x['order_cited'])
assert creators == [{
'@id': creators[0]['@id'],
'@type': 'creator',
'order_cited': 0,
'cited_as': u'{}'.format(self.preprint.node.creator.fullname),
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == self.preprint.node.creator.given_name][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}, {
'@id': creators[1]['@id'],
'@type': 'creator',
'order_cited': 1,
'cited_as': u'BoJack Horseman',
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == u'BoJack'][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}]
contributors = [nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'contributor']
assert contributors == [{
'@id': contributors[0]['@id'],
'@type': 'contributor',
'cited_as': u'{}'.format(self.user.fullname),
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == self.user.given_name][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}]
agentidentifiers = {nodes.pop(k)['uri'] for k, v in nodes.items() if v['@type'] == 'agentidentifier'}
assert agentidentifiers == set([
'mailto:' + self.user.username,
'mailto:' + self.preprint.node.creator.username,
self.user.profile_image_url(),
self.preprint.node.creator.profile_image_url(),
]) | set(urlparse.urljoin(settings.DOMAIN, user.profile_url) for user in self.preprint.node.contributors if user.is_registered)
related_work = next(nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'creativework')
assert set(related_work.keys()) == {'@id', '@type'} # Empty except @id and @type
doi = next(nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'workidentifier' and 'doi' in v['uri'])
assert doi['creative_work'] == related_work
workidentifiers = [nodes.pop(k)['uri'] for k, v in nodes.items() if v['@type'] == 'workidentifier']
assert workidentifiers == [urlparse.urljoin(settings.DOMAIN, self.preprint._id + '/')]
relation = nodes.pop(nodes.keys()[0])
assert relation == {'@id': relation['@id'], '@type': 'workrelation', 'related': {'@id': related_work['@id'], '@type': related_work['@type']}, 'subject': {'@id': preprint['@id'], '@type': preprint['@type']}}
assert nodes == {}
def test_format_preprint_nones(self):
self.preprint.node.tags = []
self.preprint.date_published = None
self.preprint.node.preprint_article_doi = None
self.preprint.set_subjects([], auth=Auth(self.preprint.node.creator), save=False)
res = format_preprint(self.preprint)
assert self.preprint.provider != 'osf'
assert set(gn['@type'] for gn in res) == {'creator', 'contributor', 'workidentifier', 'agentidentifier', 'person', 'preprint'}
nodes = dict(enumerate(res))
preprint = nodes.pop(next(k for k, v in nodes.items() if v['@type'] == 'preprint'))
assert preprint['title'] == self.preprint.node.title
assert preprint['description'] == self.preprint.node.description
assert preprint['is_deleted'] == (not self.preprint.is_published or not self.preprint.node.is_public or self.preprint.node.is_preprint_orphan)
assert preprint['date_updated'] == self.preprint.date_modified.isoformat()
assert preprint.get('date_published') is None
people = sorted([nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'person'], key=lambda x: x['given_name'])
expected_people = sorted([{
'@type': 'person',
'given_name': u'BoJack',
'family_name': u'Horseman',
}, {
'@type': 'person',
'given_name': self.user.given_name,
'family_name': self.user.family_name,
}, {
'@type': 'person',
'given_name': self.preprint.node.creator.given_name,
'family_name': self.preprint.node.creator.family_name,
}], key=lambda x: x['given_name'])
for i, p in enumerate(expected_people):
expected_people[i]['@id'] = people[i]['@id']
assert people == expected_people
creators = sorted([nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'creator'], key=lambda x: x['order_cited'])
assert creators == [{
'@id': creators[0]['@id'],
'@type': 'creator',
'order_cited': 0,
'cited_as': self.preprint.node.creator.fullname,
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == self.preprint.node.creator.given_name][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}, {
'@id': creators[1]['@id'],
'@type': 'creator',
'order_cited': 1,
'cited_as': u'BoJack Horseman',
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == u'BoJack'][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}]
contributors = [nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'contributor']
assert contributors == [{
'@id': contributors[0]['@id'],
'@type': 'contributor',
'cited_as': self.user.fullname,
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == self.user.given_name][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}]
agentidentifiers = {nodes.pop(k)['uri'] for k, v in nodes.items() if v['@type'] == 'agentidentifier'}
assert agentidentifiers == set([
'mailto:' + self.user.username,
'mailto:' + self.preprint.node.creator.username,
self.user.profile_image_url(),
self.preprint.node.creator.profile_image_url(),
]) | set(urlparse.urljoin(settings.DOMAIN, user.profile_url) for user in self.preprint.node.contributors if user.is_registered)
workidentifiers = {nodes.pop(k)['uri'] for k, v in nodes.items() if v['@type'] == 'workidentifier'}
# URLs should *always* be osf.io/guid/
assert workidentifiers == set([urlparse.urljoin(settings.DOMAIN, self.preprint._id) + '/'])
assert nodes == {}
def test_format_preprint_is_deleted(self):
CASES = {
'is_published': (True, False),
'is_published': (False, True),
'node.is_public': (True, False),
'node.is_public': (False, True),
'node._is_preprint_orphan': (True, True),
'node._is_preprint_orphan': (False, False),
'node.is_deleted': (True, True),
'node.is_deleted': (False, False),
}
for key, (value, is_deleted) in CASES.items():
target = self.preprint
for k in key.split('.')[:-1]:
if k:
target = getattr(target, k)
orig_val = getattr(target, key.split('.')[-1])
setattr(target, key.split('.')[-1], value)
res = format_preprint(self.preprint)
preprint = next(v for v in res if v['@type'] == 'preprint')
assert preprint['is_deleted'] is is_deleted
setattr(target, key.split('.')[-1], orig_val)
def test_format_preprint_is_deleted_true_if_qatest_tag_is_added(self):
res = format_preprint(self.preprint)
preprint = next(v for v in res if v['@type'] == 'preprint')
assert preprint['is_deleted'] is False
self.preprint.node.add_tag('qatest', auth=self.auth, save=True)
res = format_preprint(self.preprint)
preprint = next(v for v in res if v['@type'] == 'preprint')
assert preprint['is_deleted'] is True
class TestPreprintSaveShareHook(OsfTestCase):
def setUp(self):
super(TestPreprintSaveShareHook, self).setUp()
self.admin = AuthUserFactory()
self.auth = Auth(user=self.admin)
self.provider = PreprintProviderFactory(name='Lars Larson Snowmobiling Experience')
self.project = ProjectFactory(creator=self.admin, is_public=True)
self.subject = SubjectFactory()
self.subject_two = SubjectFactory()
self.file = api_test_utils.create_test_file(self.project, self.admin, 'second_place.pdf')
self.preprint = PreprintFactory(creator=self.admin, filename='second_place.pdf', provider=self.provider, subjects=[[self.subject._id]], project=self.project, is_published=False)
@mock.patch('website.preprints.tasks.on_preprint_updated.s')
def test_save_unpublished_not_called(self, mock_on_preprint_updated):
self.preprint.save()
assert not mock_on_preprint_updated.called
@mock.patch('website.preprints.tasks.on_preprint_updated.s')
def test_save_published_called(self, mock_on_preprint_updated):
self.preprint.set_published(True, auth=self.auth, save=True)
assert mock_on_preprint_updated.called
# This covers an edge case where a preprint is forced back to unpublished
# that it sends the information back to share
@mock.patch('website.preprints.tasks.on_preprint_updated.s')
def test_save_unpublished_called_forced(self, mock_on_preprint_updated):
self.preprint.set_published(True, auth=self.auth, save=True)
self.preprint.published = False
self.preprint.save(**{'force_update': True})
assert_equal(mock_on_preprint_updated.call_count, 2)
@mock.patch('website.preprints.tasks.on_preprint_updated.s')
def test_save_published_called(self, mock_on_preprint_updated):
self.preprint.set_published(True, auth=self.auth, save=True)
assert mock_on_preprint_updated.called
@mock.patch('website.preprints.tasks.on_preprint_updated.s')
def test_save_published_subject_change_called(self, mock_on_preprint_updated):
self.preprint.is_published = True
self.preprint.set_subjects([[self.subject_two._id]], auth=self.auth, save=True)
assert mock_on_preprint_updated.called
@mock.patch('website.preprints.tasks.on_preprint_updated.s')
def test_save_unpublished_subject_change_not_called(self, mock_on_preprint_updated):
self.preprint.set_subjects([[self.subject_two._id]], auth=self.auth, save=True)
assert not mock_on_preprint_updated.called
|
|
import datetime
import uuid
from lxml import etree
import webtest
from keystone import auth
from keystone.common import serializer
from keystone.common.sql import util as sql_util
from keystone import config
from keystone.openstack.common import timeutils
from keystone.policy.backends import rules
from keystone import test
import test_content_types
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
class RestfulTestCase(test_content_types.RestfulTestCase):
def setUp(self, load_sample_data=True):
"""Setup for v3 Restful Test Cases.
If a child class wants to create their own sample data
and provide their own auth data to obtain tokens, then
load_sample_data should be set to false.
"""
self.config([
test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf'),
test.testsdir('backend_sql_disk.conf')])
sql_util.setup_test_database()
self.load_backends()
self.public_app = webtest.TestApp(
self.loadapp('keystone', name='main'))
self.admin_app = webtest.TestApp(
self.loadapp('keystone', name='admin'))
if load_sample_data:
self.domain_id = uuid.uuid4().hex
self.domain = self.new_domain_ref()
self.domain['id'] = self.domain_id
self.identity_api.create_domain(self.domain_id, self.domain)
self.project_id = uuid.uuid4().hex
self.project = self.new_project_ref(
domain_id=self.domain_id)
self.project['id'] = self.project_id
self.identity_api.create_project(self.project_id, self.project)
self.user_id = uuid.uuid4().hex
self.user = self.new_user_ref(
domain_id=self.domain_id,
project_id=self.project_id)
self.user['id'] = self.user_id
self.identity_api.create_user(self.user_id, self.user)
self.default_domain_project_id = uuid.uuid4().hex
self.default_domain_project = self.new_project_ref(
domain_id=DEFAULT_DOMAIN_ID)
self.default_domain_project['id'] = self.default_domain_project_id
self.identity_api.create_project(self.default_domain_project_id,
self.default_domain_project)
self.default_domain_user_id = uuid.uuid4().hex
self.default_domain_user = self.new_user_ref(
domain_id=DEFAULT_DOMAIN_ID,
project_id=self.default_domain_project_id)
self.default_domain_user['id'] = self.default_domain_user_id
self.identity_api.create_user(self.default_domain_user_id,
self.default_domain_user)
# create & grant policy.json's default role for admin_required
self.role_id = uuid.uuid4().hex
self.role = self.new_role_ref()
self.role['id'] = self.role_id
self.role['name'] = 'admin'
self.identity_api.create_role(self.role_id, self.role)
self.identity_api.add_role_to_user_and_project(
self.user_id, self.project_id, self.role_id)
self.identity_api.add_role_to_user_and_project(
self.default_domain_user_id, self.default_domain_project_id,
self.role_id)
self.identity_api.add_role_to_user_and_project(
self.default_domain_user_id, self.project_id,
self.role_id)
self.public_server = self.serveapp('keystone', name='main')
self.admin_server = self.serveapp('keystone', name='admin')
def tearDown(self):
self.public_server.kill()
self.admin_server.kill()
self.public_server = None
self.admin_server = None
sql_util.teardown_test_database()
# need to reset the plug-ins
auth.controllers.AUTH_METHODS = {}
#drop the policy rules
CONF.reset()
rules.reset()
def new_ref(self):
"""Populates a ref with attributes common to all API entities."""
return {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True}
def new_service_ref(self):
ref = self.new_ref()
ref['type'] = uuid.uuid4().hex
return ref
def new_endpoint_ref(self, service_id):
ref = self.new_ref()
ref['interface'] = uuid.uuid4().hex[:8]
ref['service_id'] = service_id
ref['url'] = uuid.uuid4().hex
ref['region'] = uuid.uuid4().hex
return ref
def new_domain_ref(self):
ref = self.new_ref()
return ref
def new_project_ref(self, domain_id):
ref = self.new_ref()
ref['domain_id'] = domain_id
return ref
def new_user_ref(self, domain_id, project_id=None):
ref = self.new_ref()
ref['domain_id'] = domain_id
ref['email'] = uuid.uuid4().hex
ref['password'] = uuid.uuid4().hex
if project_id:
ref['project_id'] = project_id
return ref
def new_group_ref(self, domain_id):
ref = self.new_ref()
ref['domain_id'] = domain_id
return ref
def new_credential_ref(self, user_id, project_id=None):
ref = self.new_ref()
ref['user_id'] = user_id
ref['blob'] = uuid.uuid4().hex
ref['type'] = uuid.uuid4().hex
if project_id:
ref['project_id'] = project_id
return ref
def new_role_ref(self):
ref = self.new_ref()
return ref
def new_policy_ref(self):
ref = self.new_ref()
ref['blob'] = uuid.uuid4().hex
ref['type'] = uuid.uuid4().hex
return ref
def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None):
ref = self.new_ref()
ref['trustor_user_id'] = trustor_user_id
ref['trustee_user_id'] = trustee_user_id
ref['impersonation'] = impersonation or False
ref['project_id'] = project_id
if isinstance(expires, basestring):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = timeutils.strtime(
timeutils.utcnow() + datetime.timedelta(**expires),
fmt=TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
role_ids = role_ids or []
role_names = role_names or []
if role_ids or role_names:
ref['roles'] = []
for role_id in role_ids:
ref['roles'].append({'id': role_id})
for role_name in role_names:
ref['roles'].append({'name': role_name})
return ref
def admin_request(self, *args, **kwargs):
"""Translates XML responses to dicts.
This implies that we only have to write assertions for JSON.
"""
r = super(RestfulTestCase, self).admin_request(*args, **kwargs)
if r.headers.get('Content-Type') == 'application/xml':
r.result = serializer.from_xml(etree.tostring(r.result))
return r
def get_scoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'name': self.user['name'],
'password': self.user['password'],
'domain': {
'id': self.user['domain_id']
}
}
}
},
'scope': {
'project': {
'id': self.project['id'],
}
}
}
})
return r.headers.get('X-Subject-Token')
def get_requested_token(self, auth):
"""Request the specific token we want."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body=auth)
return r.headers.get('X-Subject-Token')
def v3_request(self, path, **kwargs):
# Check if the caller has passed in auth details for
# use in requesting the token
auth = kwargs.pop('auth', None)
if auth:
token = self.get_requested_token(auth)
else:
token = kwargs.pop('token', None)
if not token:
token = self.get_scoped_token()
path = '/v3' + path
return self.admin_request(path=path, token=token, **kwargs)
def get(self, path, **kwargs):
r = self.v3_request(method='GET', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 200)
return r
def head(self, path, **kwargs):
r = self.v3_request(method='HEAD', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
return r
def post(self, path, **kwargs):
r = self.v3_request(method='POST', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 201)
return r
def put(self, path, **kwargs):
r = self.v3_request(method='PUT', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
return r
def patch(self, path, **kwargs):
r = self.v3_request(method='PATCH', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 200)
return r
def delete(self, path, **kwargs):
r = self.v3_request(method='DELETE', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
return r
def assertValidErrorResponse(self, r):
if r.headers.get('Content-Type') == 'application/xml':
resp = serializer.from_xml(etree.tostring(r.result))
else:
resp = r.result
self.assertIsNotNone(resp.get('error'))
self.assertIsNotNone(resp['error'].get('code'))
self.assertIsNotNone(resp['error'].get('title'))
self.assertIsNotNone(resp['error'].get('message'))
self.assertEqual(int(resp['error']['code']), r.status_code)
def assertValidListLinks(self, links):
self.assertIsNotNone(links)
self.assertIsNotNone(links.get('self'))
self.assertIn(CONF.public_endpoint % CONF, links['self'])
self.assertIn('next', links)
if links['next'] is not None:
self.assertIn(
CONF.public_endpoint % CONF,
links['next'])
self.assertIn('previous', links)
if links['previous'] is not None:
self.assertIn(
CONF.public_endpoint % CONF,
links['previous'])
def assertValidListResponse(self, resp, key, entity_validator, ref=None,
expected_length=None):
"""Make assertions common to all API list responses.
If a reference is provided, it's ID will be searched for in the
response, and asserted to be equal.
"""
entities = resp.result.get(key)
self.assertIsNotNone(entities)
if expected_length is not None:
self.assertEqual(len(entities), expected_length)
elif ref is not None:
# we're at least expecting the ref
self.assertNotEmpty(entities)
# collections should have relational links
self.assertValidListLinks(resp.result.get('links'))
for entity in entities:
self.assertIsNotNone(entity)
self.assertValidEntity(entity)
entity_validator(entity)
if ref:
entity = [x for x in entities if x['id'] == ref['id']][0]
self.assertValidEntity(entity, ref)
entity_validator(entity, ref)
return entities
def assertValidResponse(self, resp, key, entity_validator, *args,
**kwargs):
"""Make assertions common to all API responses."""
entity = resp.result.get(key)
self.assertIsNotNone(entity)
self.assertValidEntity(entity, *args, **kwargs)
entity_validator(entity, *args, **kwargs)
return entity
def assertValidEntity(self, entity, ref=None):
"""Make assertions common to all API entities.
If a reference is provided, the entity will also be compared against
the reference.
"""
keys = ['name', 'description', 'enabled']
for k in ['id'] + keys:
msg = '%s unexpectedly None in %s' % (k, entity)
self.assertIsNotNone(entity.get(k), msg)
self.assertIsNotNone(entity.get('links'))
self.assertIsNotNone(entity['links'].get('self'))
self.assertIn(CONF.public_endpoint % CONF, entity['links']['self'])
self.assertIn(entity['id'], entity['links']['self'])
if ref:
for k in keys:
msg = '%s not equal: %s != %s' % (k, ref[k], entity[k])
self.assertEquals(ref[k], entity[k])
return entity
# auth validation
def assertValidISO8601ExtendedFormatDatetime(self, dt):
try:
return timeutils.parse_strtime(dt, fmt=TIME_FORMAT)
except Exception:
msg = '%s is not a valid ISO 8601 extended format date time.' % dt
raise AssertionError(msg)
self.assertTrue(isinstance(dt, datetime.datetime))
def assertValidTokenResponse(self, r, user=None):
self.assertTrue(r.headers.get('X-Subject-Token'))
token = r.result['token']
self.assertIsNotNone(token.get('expires_at'))
expires_at = self.assertValidISO8601ExtendedFormatDatetime(
token['expires_at'])
self.assertIsNotNone(token.get('issued_at'))
issued_at = self.assertValidISO8601ExtendedFormatDatetime(
token['issued_at'])
self.assertTrue(issued_at < expires_at)
self.assertIn('user', token)
self.assertIn('id', token['user'])
self.assertIn('name', token['user'])
self.assertIn('domain', token['user'])
self.assertIn('id', token['user']['domain'])
if user is not None:
self.assertEqual(user['id'], token['user']['id'])
self.assertEqual(user['name'], token['user']['name'])
self.assertEqual(user['domain_id'], token['user']['domain']['id'])
return token
def assertValidUnscopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidTokenResponse(r, *args, **kwargs)
self.assertNotIn('roles', token)
self.assertNotIn('catalog', token)
self.assertNotIn('project', token)
self.assertNotIn('domain', token)
return token
def assertValidScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidTokenResponse(r, *args, **kwargs)
self.assertIn('catalog', token)
self.assertIn('roles', token)
self.assertTrue(token['roles'])
for role in token['roles']:
self.assertIn('id', role)
self.assertIn('name', role)
return token
def assertValidProjectScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
self.assertIn('project', token)
self.assertIn('id', token['project'])
self.assertIn('name', token['project'])
self.assertIn('domain', token['project'])
self.assertIn('id', token['project']['domain'])
self.assertIn('name', token['project']['domain'])
self.assertEqual(self.role_id, token['roles'][0]['id'])
return token
def assertValidProjectTrustScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidProjectScopedTokenResponse(r, *args, **kwargs)
trust = token.get('OS-TRUST:trust')
self.assertIsNotNone(trust)
self.assertIsNotNone(trust.get('id'))
self.assertTrue(isinstance(trust.get('impersonation'), bool))
self.assertIsNotNone(trust.get('trustor_user'))
self.assertIsNotNone(trust.get('trustee_user'))
self.assertIsNotNone(trust['trustor_user'].get('id'))
self.assertIsNotNone(trust['trustee_user'].get('id'))
def assertValidDomainScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
self.assertIn('domain', token)
self.assertIn('id', token['domain'])
self.assertIn('name', token['domain'])
return token
def assertEqualTokens(self, a, b):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
del token['token']['expires_at']
del token['token']['issued_at']
return token
a_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
a['token']['expires_at'])
b_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
b['token']['expires_at'])
self.assertCloseEnoughForGovernmentWork(a_expires_at, b_expires_at)
a_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
a['token']['issued_at'])
b_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
b['token']['issued_at'])
self.assertCloseEnoughForGovernmentWork(a_issued_at, b_issued_at)
return self.assertDictEqual(normalize(a), normalize(b))
# service validation
def assertValidServiceListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'services',
self.assertValidService,
*args,
**kwargs)
def assertValidServiceResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'service',
self.assertValidService,
*args,
**kwargs)
def assertValidService(self, entity, ref=None):
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['type'], entity['type'])
return entity
# endpoint validation
def assertValidEndpointListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'endpoints',
self.assertValidEndpoint,
*args,
**kwargs)
def assertValidEndpointResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'endpoint',
self.assertValidEndpoint,
*args,
**kwargs)
def assertValidEndpoint(self, entity, ref=None):
self.assertIsNotNone(entity.get('interface'))
self.assertIsNotNone(entity.get('service_id'))
# this is intended to be an unexposed implementation detail
self.assertNotIn('legacy_endpoint_id', entity)
if ref:
self.assertEqual(ref['interface'], entity['interface'])
self.assertEqual(ref['service_id'], entity['service_id'])
return entity
# domain validation
def assertValidDomainListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'domains',
self.assertValidDomain,
*args,
**kwargs)
def assertValidDomainResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'domain',
self.assertValidDomain,
*args,
**kwargs)
def assertValidDomain(self, entity, ref=None):
if ref:
pass
return entity
# project validation
def assertValidProjectListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'projects',
self.assertValidProject,
*args,
**kwargs)
def assertValidProjectResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'project',
self.assertValidProject,
*args,
**kwargs)
def assertValidProject(self, entity, ref=None):
self.assertIsNotNone(entity.get('domain_id'))
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
return entity
# user validation
def assertValidUserListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'users',
self.assertValidUser,
*args,
**kwargs)
def assertValidUserResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'user',
self.assertValidUser,
*args,
**kwargs)
def assertValidUser(self, entity, ref=None):
self.assertIsNotNone(entity.get('domain_id'))
self.assertIsNotNone(entity.get('email'))
self.assertIsNone(entity.get('password'))
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
self.assertEqual(ref['email'], entity['email'])
return entity
# group validation
def assertValidGroupListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'groups',
self.assertValidGroup,
*args,
**kwargs)
def assertValidGroupResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'group',
self.assertValidGroup,
*args,
**kwargs)
def assertValidGroup(self, entity, ref=None):
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
return entity
# credential validation
def assertValidCredentialListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'credentials',
self.assertValidCredential,
*args,
**kwargs)
def assertValidCredentialResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'credential',
self.assertValidCredential,
*args,
**kwargs)
def assertValidCredential(self, entity, ref=None):
self.assertIsNotNone(entity.get('user_id'))
self.assertIsNotNone(entity.get('blob'))
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['user_id'], entity['user_id'])
self.assertEqual(ref['blob'], entity['blob'])
self.assertEqual(ref['type'], entity['type'])
self.assertEqual(ref.get('project_id'), entity.get('project_id'))
return entity
# role validation
def assertValidRoleListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'roles',
self.assertValidRole,
*args,
**kwargs)
def assertValidRoleResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'role',
self.assertValidRole,
*args,
**kwargs)
def assertValidRole(self, entity, ref=None):
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
return entity
# policy validation
def assertValidPolicyListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'policies',
self.assertValidPolicy,
*args,
**kwargs)
def assertValidPolicyResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'policy',
self.assertValidPolicy,
*args,
**kwargs)
def assertValidPolicy(self, entity, ref=None):
self.assertIsNotNone(entity.get('blob'))
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['blob'], entity['blob'])
self.assertEqual(ref['type'], entity['type'])
return entity
# trust validation
def assertValidTrustListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'trusts',
self.assertValidTrust,
*args,
**kwargs)
def assertValidTrustResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'trust',
self.assertValidTrust,
*args,
**kwargs)
def assertValidTrust(self, entity, ref=None):
self.assertIsNotNone(entity.get('trustor_user_id'))
self.assertIsNotNone(entity.get('trustee_user_id'))
self.assertIn('expires_at', entity)
if entity['expires_at'] is not None:
self.assertValidISO8601ExtendedFormatDatetime(entity['expires_at'])
# always disallow project xor project_id (neither or both is allowed)
has_roles = bool(entity.get('roles'))
has_project = bool(entity.get('project_id'))
self.assertFalse(has_roles ^ has_project)
for role in entity['roles']:
self.assertIsNotNone(role)
self.assertValidEntity(role)
self.assertValidRole(role)
self.assertValidListLinks(entity.get('roles_links'))
# these were used during dev and shouldn't land in final impl
self.assertNotIn('role_ids', entity)
self.assertNotIn('role_names', entity)
if ref:
self.assertEqual(ref['trustor_user_id'], entity['trustor_user_id'])
self.assertEqual(ref['trustee_user_id'], entity['trustee_user_id'])
self.assertEqual(ref['project_id'], entity['project_id'])
if entity.get('expires_at') or ref.get('expires_at'):
entity_exp = self.assertValidISO8601ExtendedFormatDatetime(
entity['expires_at'])
ref_exp = self.assertValidISO8601ExtendedFormatDatetime(
ref['expires_at'])
self.assertCloseEnoughForGovernmentWork(entity_exp, ref_exp)
else:
self.assertEqual(ref.get('expires_at'),
entity.get('expires_at'))
return entity
def build_auth_scope(self, project_id=None, project_name=None,
project_domain_id=None, project_domain_name=None,
domain_id=None, domain_name=None, trust_id=None):
scope_data = {}
if project_id or project_name:
scope_data['project'] = {}
if project_id:
scope_data['project']['id'] = project_id
else:
scope_data['project']['name'] = project_name
if project_domain_id or project_domain_name:
project_domain_json = {}
if project_domain_id:
project_domain_json['id'] = project_domain_id
else:
project_domain_json['name'] = project_domain_name
scope_data['project']['domain'] = project_domain_json
if domain_id or domain_name:
scope_data['domain'] = {}
if domain_id:
scope_data['domain']['id'] = domain_id
else:
scope_data['domain']['name'] = domain_name
if trust_id:
scope_data['OS-TRUST:trust'] = {}
scope_data['OS-TRUST:trust']['id'] = trust_id
return scope_data
def build_password_auth(self, user_id=None, username=None,
user_domain_id=None, user_domain_name=None,
password=None):
password_data = {'user': {}}
if user_id:
password_data['user']['id'] = user_id
else:
password_data['user']['name'] = username
if user_domain_id or user_domain_name:
password_data['user']['domain'] = {}
if user_domain_id:
password_data['user']['domain']['id'] = user_domain_id
else:
password_data['user']['domain']['name'] = user_domain_name
password_data['user']['password'] = password
return password_data
def build_token_auth(self, token):
return {'id': token}
def build_authentication_request(self, token=None, user_id=None,
username=None, user_domain_id=None,
user_domain_name=None, password=None,
**kwargs):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_data = {}
auth_data['identity'] = {'methods': []}
if token:
auth_data['identity']['methods'].append('token')
auth_data['identity']['token'] = self.build_token_auth(token)
if user_id or username:
auth_data['identity']['methods'].append('password')
auth_data['identity']['password'] = self.build_password_auth(
user_id, username, user_domain_id, user_domain_name, password)
if kwargs:
auth_data['scope'] = self.build_auth_scope(**kwargs)
return {'auth': auth_data}
class VersionTestCase(RestfulTestCase):
def test_get_version(self):
pass
|
|
"""Tests for the Z-Wave init."""
import asyncio
from collections import OrderedDict
from datetime import datetime
import unittest
from unittest.mock import MagicMock, patch
import pytest
from pytz import utc
import voluptuous as vol
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import zwave
from homeassistant.components.zwave import (
CONF_DEVICE_CONFIG_GLOB,
CONFIG_SCHEMA,
DATA_NETWORK,
const,
)
from homeassistant.components.zwave.binary_sensor import get_device
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.setup import setup_component
from tests.common import (
async_fire_time_changed,
get_test_home_assistant,
mock_coro,
mock_registry,
)
from tests.mock.zwave import MockEntityValues, MockNetwork, MockNode, MockValue
async def test_valid_device_config(hass, mock_openzwave):
"""Test valid device config."""
device_config = {"light.kitchen": {"ignored": "true"}}
result = await async_setup_component(
hass, "zwave", {"zwave": {"device_config": device_config}}
)
await hass.async_block_till_done()
assert result
async def test_invalid_device_config(hass, mock_openzwave):
"""Test invalid device config."""
device_config = {"light.kitchen": {"some_ignored": "true"}}
result = await async_setup_component(
hass, "zwave", {"zwave": {"device_config": device_config}}
)
await hass.async_block_till_done()
assert not result
def test_config_access_error():
"""Test threading error accessing config values."""
node = MagicMock()
def side_effect():
raise RuntimeError
node.values.values.side_effect = side_effect
result = zwave.get_config_value(node, 1)
assert result is None
async def test_network_options(hass, mock_openzwave):
"""Test network options."""
result = await async_setup_component(
hass,
"zwave",
{"zwave": {"usb_path": "mock_usb_path", "config_path": "mock_config_path"}},
)
await hass.async_block_till_done()
assert result
network = hass.data[zwave.DATA_NETWORK]
assert network.options.device == "mock_usb_path"
assert network.options.config_path == "mock_config_path"
async def test_network_key_validation(hass, mock_openzwave):
"""Test network key validation."""
test_values = [
(
"0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, "
"0x0C, 0x0D, 0x0E, 0x0F, 0x10"
),
(
"0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,"
"0x0E,0x0F,0x10"
),
]
for value in test_values:
result = zwave.CONFIG_SCHEMA({"zwave": {"network_key": value}})
assert result["zwave"]["network_key"] == value
async def test_erronous_network_key_fails_validation(hass, mock_openzwave):
"""Test failing erroneous network key validation."""
test_values = [
(
"0x 01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, "
"0x0C, 0x0D, 0x0E, 0x0F, 0x10"
),
(
"0X01,0X02,0X03,0X04,0X05,0X06,0X07,0X08,0X09,0X0A,0X0B,0X0C,0X0D,"
"0X0E,0X0F,0X10"
),
"invalid",
"1234567",
1234567,
]
for value in test_values:
with pytest.raises(vol.Invalid):
zwave.CONFIG_SCHEMA({"zwave": {"network_key": value}})
async def test_auto_heal_midnight(hass, mock_openzwave):
"""Test network auto-heal at midnight."""
await async_setup_component(hass, "zwave", {"zwave": {"autoheal": True}})
await hass.async_block_till_done()
network = hass.data[zwave.DATA_NETWORK]
assert not network.heal.called
time = utc.localize(datetime(2017, 5, 6, 0, 0, 0))
async_fire_time_changed(hass, time)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert network.heal.called
assert len(network.heal.mock_calls) == 1
async def test_auto_heal_disabled(hass, mock_openzwave):
"""Test network auto-heal disabled."""
await async_setup_component(hass, "zwave", {"zwave": {"autoheal": False}})
await hass.async_block_till_done()
network = hass.data[zwave.DATA_NETWORK]
assert not network.heal.called
time = utc.localize(datetime(2017, 5, 6, 0, 0, 0))
async_fire_time_changed(hass, time)
await hass.async_block_till_done()
assert not network.heal.called
async def test_setup_platform(hass, mock_openzwave):
"""Test invalid device config."""
mock_device = MagicMock()
hass.data[DATA_NETWORK] = MagicMock()
hass.data[zwave.DATA_DEVICES] = {456: mock_device}
async_add_entities = MagicMock()
result = await zwave.async_setup_platform(hass, None, async_add_entities, None)
assert not result
assert not async_add_entities.called
result = await zwave.async_setup_platform(
hass, None, async_add_entities, {const.DISCOVERY_DEVICE: 123}
)
assert not result
assert not async_add_entities.called
result = await zwave.async_setup_platform(
hass, None, async_add_entities, {const.DISCOVERY_DEVICE: 456}
)
assert result
assert async_add_entities.called
assert len(async_add_entities.mock_calls) == 1
assert async_add_entities.mock_calls[0][1][0] == [mock_device]
async def test_zwave_ready_wait(hass, mock_openzwave):
"""Test that zwave continues after waiting for network ready."""
# Initialize zwave
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
sleeps = []
def utcnow():
return datetime.fromtimestamp(len(sleeps))
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
if duration > 0:
sleeps.append(duration)
await asyncio_sleep(0)
with patch("homeassistant.components.zwave.dt_util.utcnow", new=utcnow):
with patch("asyncio.sleep", new=sleep):
with patch.object(zwave, "_LOGGER") as mock_logger:
hass.data[DATA_NETWORK].state = MockNetwork.STATE_STARTED
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert len(sleeps) == const.NETWORK_READY_WAIT_SECS
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
assert (
mock_logger.warning.mock_calls[0][1][1]
== const.NETWORK_READY_WAIT_SECS
)
async def test_device_entity(hass, mock_openzwave):
"""Test device entity base class."""
node = MockNode(node_id="10", name="Mock Node")
value = MockValue(
data=False,
node=node,
instance=2,
object_id="11",
label="Sensor",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
)
power_value = MockValue(
data=50.123456, node=node, precision=3, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = zwave.ZWaveDeviceEntity(values, "zwave")
device.hass = hass
device.value_added()
device.update_properties()
await hass.async_block_till_done()
assert not device.should_poll
assert device.unique_id == "10-11"
assert device.name == "Mock Node Sensor"
assert device.device_state_attributes[zwave.ATTR_POWER] == 50.123
async def test_node_removed(hass, mock_openzwave):
"""Test node removed in base class."""
# Create a mock node & node entity
node = MockNode(node_id="10", name="Mock Node")
value = MockValue(
data=False,
node=node,
instance=2,
object_id="11",
label="Sensor",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
)
power_value = MockValue(
data=50.123456, node=node, precision=3, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = zwave.ZWaveDeviceEntity(values, "zwave")
device.hass = hass
device.entity_id = "zwave.mock_node"
device.value_added()
device.update_properties()
await hass.async_block_till_done()
# Save it to the entity registry
registry = mock_registry(hass)
registry.async_get_or_create("zwave", "zwave", device.unique_id)
device.entity_id = registry.async_get_entity_id("zwave", "zwave", device.unique_id)
# Create dummy entity registry entries for other integrations
hue_entity = registry.async_get_or_create("light", "hue", 1234)
zha_entity = registry.async_get_or_create("sensor", "zha", 5678)
# Verify our Z-Wave entity is registered
assert registry.async_is_registered(device.entity_id)
# Remove it
entity_id = device.entity_id
await device.node_removed()
# Verify registry entry for our Z-Wave node is gone
assert not registry.async_is_registered(entity_id)
# Verify registry entries for our other entities remain
assert registry.async_is_registered(hue_entity.entity_id)
assert registry.async_is_registered(zha_entity.entity_id)
async def test_node_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14)
hass.async_add_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert hass.states.get("zwave.mock_node").state == "unknown"
async def test_unparsed_node_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14, manufacturer_name=None, name=None, is_ready=False)
sleeps = []
def utcnow():
return datetime.fromtimestamp(len(sleeps))
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
if duration > 0:
sleeps.append(duration)
await asyncio_sleep(0)
with patch("homeassistant.components.zwave.dt_util.utcnow", new=utcnow):
with patch("asyncio.sleep", new=sleep):
with patch.object(zwave, "_LOGGER") as mock_logger:
hass.async_add_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert len(sleeps) == const.NODE_READY_WAIT_SECS
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
assert mock_logger.warning.mock_calls[0][1][1:] == (
14,
const.NODE_READY_WAIT_SECS,
)
assert hass.states.get("zwave.unknown_node_14").state == "unknown"
async def test_node_ignored(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(
hass,
"zwave",
{"zwave": {"device_config": {"zwave.mock_node": {"ignored": True}}}},
)
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14)
hass.async_add_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert hass.states.get("zwave.mock_node") is None
async def test_value_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SENSOR_BINARY)
value = MockValue(
data=False,
node=node,
index=12,
instance=13,
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
hass.async_add_job(mock_receivers[0], node, value)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mock_node_mock_value").state == "off"
async def test_value_entities(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = {}
def mock_connect(receiver, signal, *args, **kwargs):
mock_receivers[signal] = receiver
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
zwave_network = hass.data[DATA_NETWORK]
zwave_network.state = MockNetwork.STATE_READY
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert mock_receivers
hass.async_add_job(mock_receivers[MockNetwork.SIGNAL_ALL_NODES_QUERIED])
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SENSOR_BINARY)
zwave_network.nodes = {node.node_id: node}
value = MockValue(
data=False,
node=node,
index=12,
instance=1,
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
node.values = {"primary": value, value.value_id: value}
value2 = MockValue(
data=False,
node=node,
index=12,
instance=2,
label="Mock Value B",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
node.values[value2.value_id] = value2
hass.async_add_job(mock_receivers[MockNetwork.SIGNAL_NODE_ADDED], node)
hass.async_add_job(mock_receivers[MockNetwork.SIGNAL_VALUE_ADDED], node, value)
hass.async_add_job(mock_receivers[MockNetwork.SIGNAL_VALUE_ADDED], node, value2)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mock_node_mock_value").state == "off"
assert hass.states.get("binary_sensor.mock_node_mock_value_b").state == "off"
ent_reg = await async_get_registry(hass)
dev_reg = await get_dev_reg(hass)
entry = ent_reg.async_get("zwave.mock_node")
assert entry is not None
assert entry.unique_id == f"node-{node.node_id}"
node_dev_id = entry.device_id
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
assert entry.name is None
assert entry.device_id == node_dev_id
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value_b")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value2.object_id}"
assert entry.name is None
assert entry.device_id != node_dev_id
device_id_b = entry.device_id
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.name == node.name
old_device = device
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
# test renaming without updating
await hass.services.async_call(
"zwave",
"rename_node",
{const.ATTR_NODE_ID: node.node_id, const.ATTR_NAME: "Demo Node"},
)
await hass.async_block_till_done()
assert node.name == "Demo Node"
entry = ent_reg.async_get("zwave.mock_node")
assert entry is not None
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value")
assert entry is not None
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value_b")
assert entry is not None
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.id == old_device.id
assert device.name == node.name
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
# test renaming
await hass.services.async_call(
"zwave",
"rename_node",
{
const.ATTR_NODE_ID: node.node_id,
const.ATTR_UPDATE_IDS: True,
const.ATTR_NAME: "New Node",
},
)
await hass.async_block_till_done()
assert node.name == "New Node"
entry = ent_reg.async_get("zwave.new_node")
assert entry is not None
assert entry.unique_id == f"node-{node.node_id}"
entry = ent_reg.async_get("binary_sensor.new_node_mock_value")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.id == old_device.id
assert device.name == node.name
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
await hass.services.async_call(
"zwave",
"rename_value",
{
const.ATTR_NODE_ID: node.node_id,
const.ATTR_VALUE_ID: value.object_id,
const.ATTR_UPDATE_IDS: True,
const.ATTR_NAME: "New Label",
},
)
await hass.async_block_till_done()
entry = ent_reg.async_get("binary_sensor.new_node_new_label")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
async def test_value_discovery_existing_entity(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(
node_id=11,
generic=const.GENERIC_TYPE_THERMOSTAT,
specific=const.SPECIFIC_TYPE_THERMOSTAT_GENERAL_V2,
)
thermostat_mode = MockValue(
data="Heat",
data_items=["Off", "Heat"],
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
genre=const.GENRE_USER,
)
setpoint_heating = MockValue(
data=22.0,
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT,
index=1,
genre=const.GENRE_USER,
)
hass.async_add_job(mock_receivers[0], node, thermostat_mode)
await hass.async_block_till_done()
def mock_update(self):
self.hass.add_job(self.async_update_ha_state)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
hass.async_add_job(mock_receivers[0], node, setpoint_heating)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
assert (
hass.states.get("climate.mock_node_mock_value").attributes[
"current_temperature"
]
is None
)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
temperature = MockValue(
data=23.5,
node=node,
index=1,
command_class=const.COMMAND_CLASS_SENSOR_MULTILEVEL,
genre=const.GENRE_USER,
units="C",
)
hass.async_add_job(mock_receivers[0], node, temperature)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
assert (
hass.states.get("climate.mock_node_mock_value").attributes[
"current_temperature"
]
== 23.5
)
async def test_value_discovery_legacy_thermostat(hass, mock_openzwave):
"""Test discovery of a node. Special case for legacy thermostats."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(
node_id=11,
generic=const.GENERIC_TYPE_THERMOSTAT,
specific=const.SPECIFIC_TYPE_SETPOINT_THERMOSTAT,
)
setpoint_heating = MockValue(
data=22.0,
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT,
index=1,
genre=const.GENRE_USER,
)
hass.async_add_job(mock_receivers[0], node, setpoint_heating)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
async def test_power_schemes(hass, mock_openzwave):
"""Test power attribute."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SWITCH_BINARY)
switch = MockValue(
data=True,
node=node,
index=12,
instance=13,
command_class=const.COMMAND_CLASS_SWITCH_BINARY,
genre=const.GENRE_USER,
type=const.TYPE_BOOL,
)
hass.async_add_job(mock_receivers[0], node, switch)
await hass.async_block_till_done()
assert hass.states.get("switch.mock_node_mock_value").state == "on"
assert (
"power_consumption"
not in hass.states.get("switch.mock_node_mock_value").attributes
)
def mock_update(self):
self.hass.add_job(self.async_update_ha_state)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
power = MockValue(
data=23.5,
node=node,
index=const.INDEX_SENSOR_MULTILEVEL_POWER,
instance=13,
command_class=const.COMMAND_CLASS_SENSOR_MULTILEVEL,
)
hass.async_add_job(mock_receivers[0], node, power)
await hass.async_block_till_done()
assert (
hass.states.get("switch.mock_node_mock_value").attributes["power_consumption"]
== 23.5
)
async def test_network_ready(hass, mock_openzwave):
"""Test Node network ready event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_ALL_NODES_QUERIED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_COMPLETE, listener)
hass.async_add_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_network_complete(hass, mock_openzwave):
"""Test Node network complete event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_AWAKE_NODES_QUERIED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_READY, listener)
hass.async_add_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_network_complete_some_dead(hass, mock_openzwave):
"""Test Node network complete some dead event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_COMPLETE_SOME_DEAD, listener)
hass.async_add_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
class TestZWaveDeviceEntityValues(unittest.TestCase):
"""Tests for the ZWaveDeviceEntityValues helper."""
@pytest.fixture(autouse=True)
def set_mock_openzwave(self, mock_openzwave):
"""Use the mock_openzwave fixture for this class."""
self.mock_openzwave = mock_openzwave
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.hass.start()
self.registry = mock_registry(self.hass)
setup_component(self.hass, "zwave", {"zwave": {}})
self.hass.block_till_done()
self.node = MockNode()
self.mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {const.DISC_COMMAND_CLASS: ["mock_primary_class"]},
"secondary": {const.DISC_COMMAND_CLASS: ["mock_secondary_class"]},
"optional": {
const.DISC_COMMAND_CLASS: ["mock_optional_class"],
const.DISC_OPTIONAL: True,
},
},
}
self.primary = MockValue(
command_class="mock_primary_class", node=self.node, value_id=1000
)
self.secondary = MockValue(command_class="mock_secondary_class", node=self.node)
self.duplicate_secondary = MockValue(
command_class="mock_secondary_class", node=self.node
)
self.optional = MockValue(command_class="mock_optional_class", node=self.node)
self.no_match_value = MockValue(command_class="mock_bad_class", node=self.node)
self.entity_id = "mock_component.mock_node_mock_value"
self.zwave_config = {"zwave": {}}
self.device_config = {self.entity_id: {}}
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_discovery(self, discovery, import_module):
"""Test the creation of a new entity."""
discovery.async_load_platform.return_value = mock_coro()
mock_platform = MagicMock()
import_module.return_value = mock_platform
mock_device = MagicMock()
mock_device.name = "test_device"
mock_platform.get_device.return_value = mock_device
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
assert values.primary is self.primary
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[self.primary, None, None], key=lambda a: id(a)
)
assert not discovery.async_load_platform.called
values.check_value(self.secondary)
self.hass.block_till_done()
assert values.secondary is self.secondary
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[self.primary, self.secondary, None], key=lambda a: id(a)
)
assert discovery.async_load_platform.called
assert len(discovery.async_load_platform.mock_calls) == 1
args = discovery.async_load_platform.mock_calls[0][1]
assert args[0] == self.hass
assert args[1] == "mock_component"
assert args[2] == "zwave"
assert args[3] == {const.DISCOVERY_DEVICE: mock_device.unique_id}
assert args[4] == self.zwave_config
discovery.async_load_platform.reset_mock()
values.check_value(self.optional)
values.check_value(self.duplicate_secondary)
values.check_value(self.no_match_value)
self.hass.block_till_done()
assert values.optional is self.optional
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[self.primary, self.secondary, self.optional], key=lambda a: id(a)
)
assert not discovery.async_load_platform.called
assert values._entity.value_added.called
assert len(values._entity.value_added.mock_calls) == 1
assert values._entity.value_changed.called
assert len(values._entity.value_changed.mock_calls) == 1
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_existing_values(self, discovery, import_module):
"""Test the loading of already discovered values."""
discovery.async_load_platform.return_value = mock_coro()
mock_platform = MagicMock()
import_module.return_value = mock_platform
mock_device = MagicMock()
mock_device.name = "test_device"
mock_platform.get_device.return_value = mock_device
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
self.optional.value_id: self.optional,
self.no_match_value.value_id: self.no_match_value,
}
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
self.hass.block_till_done()
assert values.primary is self.primary
assert values.secondary is self.secondary
assert values.optional is self.optional
assert len(list(values)) == 3
assert sorted(list(values), key=lambda a: id(a)) == sorted(
[self.primary, self.secondary, self.optional], key=lambda a: id(a)
)
assert discovery.async_load_platform.called
assert len(discovery.async_load_platform.mock_calls) == 1
args = discovery.async_load_platform.mock_calls[0][1]
assert args[0] == self.hass
assert args[1] == "mock_component"
assert args[2] == "zwave"
assert args[3] == {const.DISCOVERY_DEVICE: mock_device.unique_id}
assert args[4] == self.zwave_config
assert not self.primary.enable_poll.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_node_schema_mismatch(self, discovery, import_module):
"""Test node schema mismatch."""
self.node.generic = "no_match"
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
self.mock_schema[const.DISC_GENERIC_DEVICE_CLASS] = ["generic_match"]
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_workaround_component(self, discovery, import_module):
"""Test component workaround."""
discovery.async_load_platform.return_value = mock_coro()
mock_platform = MagicMock()
import_module.return_value = mock_platform
mock_device = MagicMock()
mock_device.name = "test_device"
mock_platform.get_device.return_value = mock_device
self.node.manufacturer_id = "010f"
self.node.product_type = "0b00"
self.primary.command_class = const.COMMAND_CLASS_SENSOR_ALARM
self.entity_id = "binary_sensor.mock_node_mock_value"
self.device_config = {self.entity_id: {}}
self.mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY]
}
},
}
with patch.object(zwave, "async_dispatcher_send") as mock_dispatch_send:
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert mock_dispatch_send.called
assert len(mock_dispatch_send.mock_calls) == 1
args = mock_dispatch_send.mock_calls[0][1]
assert args[1] == "zwave_new_binary_sensor"
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_workaround_ignore(self, discovery, import_module):
"""Test ignore workaround."""
self.node.manufacturer_id = "010f"
self.node.product_type = "0301"
self.primary.command_class = const.COMMAND_CLASS_SWITCH_BINARY
self.mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY]
}
},
}
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_config_ignore(self, discovery, import_module):
"""Test ignore config."""
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
self.device_config = {self.entity_id: {zwave.CONF_IGNORED: True}}
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_config_ignore_with_registry(self, discovery, import_module):
"""Test ignore config.
The case when the device is in entity registry.
"""
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
self.device_config = {"mock_component.registry_id": {zwave.CONF_IGNORED: True}}
with patch.object(self.registry, "async_schedule_save"):
self.registry.async_get_or_create(
"mock_component",
zwave.DOMAIN,
"567-1000",
suggested_object_id="registry_id",
)
zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_entity_platform_ignore(self, discovery, import_module):
"""Test platform ignore device."""
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
platform = MagicMock()
import_module.return_value = platform
platform.get_device.return_value = None
zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
self.hass.block_till_done()
assert not discovery.async_load_platform.called
@patch.object(zwave, "import_module")
@patch.object(zwave, "discovery")
def test_config_polling_intensity(self, discovery, import_module):
"""Test polling intensity."""
mock_platform = MagicMock()
import_module.return_value = mock_platform
mock_device = MagicMock()
mock_device.name = "test_device"
mock_platform.get_device.return_value = mock_device
self.node.values = {
self.primary.value_id: self.primary,
self.secondary.value_id: self.secondary,
}
self.device_config = {self.entity_id: {zwave.CONF_POLLING_INTENSITY: 123}}
values = zwave.ZWaveDeviceEntityValues(
hass=self.hass,
schema=self.mock_schema,
primary_value=self.primary,
zwave_config=self.zwave_config,
device_config=self.device_config,
registry=self.registry,
)
values._check_entity_ready()
self.hass.block_till_done()
assert discovery.async_load_platform.called
assert self.primary.enable_poll.called
assert len(self.primary.enable_poll.mock_calls) == 1
assert self.primary.enable_poll.mock_calls[0][1][0] == 123
class TestZwave(unittest.TestCase):
"""Test zwave init."""
def test_device_config_glob_is_ordered(self):
"""Test that device_config_glob preserves order."""
conf = CONFIG_SCHEMA({"zwave": {CONF_DEVICE_CONFIG_GLOB: OrderedDict()}})
assert isinstance(conf["zwave"][CONF_DEVICE_CONFIG_GLOB], OrderedDict)
class TestZWaveServices(unittest.TestCase):
"""Tests for zwave services."""
@pytest.fixture(autouse=True)
def set_mock_openzwave(self, mock_openzwave):
"""Use the mock_openzwave fixture for this class."""
self.mock_openzwave = mock_openzwave
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.hass.start()
# Initialize zwave
setup_component(self.hass, "zwave", {"zwave": {}})
self.hass.block_till_done()
self.zwave_network = self.hass.data[DATA_NETWORK]
self.zwave_network.state = MockNetwork.STATE_READY
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.block_till_done()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.services.call("zwave", "stop_network", {})
self.hass.block_till_done()
self.hass.stop()
def test_add_node(self):
"""Test zwave add_node service."""
self.hass.services.call("zwave", "add_node", {})
self.hass.block_till_done()
assert self.zwave_network.controller.add_node.called
assert len(self.zwave_network.controller.add_node.mock_calls) == 1
assert len(self.zwave_network.controller.add_node.mock_calls[0][1]) == 0
def test_add_node_secure(self):
"""Test zwave add_node_secure service."""
self.hass.services.call("zwave", "add_node_secure", {})
self.hass.block_till_done()
assert self.zwave_network.controller.add_node.called
assert len(self.zwave_network.controller.add_node.mock_calls) == 1
assert self.zwave_network.controller.add_node.mock_calls[0][1][0] is True
def test_remove_node(self):
"""Test zwave remove_node service."""
self.hass.services.call("zwave", "remove_node", {})
self.hass.block_till_done()
assert self.zwave_network.controller.remove_node.called
assert len(self.zwave_network.controller.remove_node.mock_calls) == 1
def test_cancel_command(self):
"""Test zwave cancel_command service."""
self.hass.services.call("zwave", "cancel_command", {})
self.hass.block_till_done()
assert self.zwave_network.controller.cancel_command.called
assert len(self.zwave_network.controller.cancel_command.mock_calls) == 1
def test_heal_network(self):
"""Test zwave heal_network service."""
self.hass.services.call("zwave", "heal_network", {})
self.hass.block_till_done()
assert self.zwave_network.heal.called
assert len(self.zwave_network.heal.mock_calls) == 1
def test_soft_reset(self):
"""Test zwave soft_reset service."""
self.hass.services.call("zwave", "soft_reset", {})
self.hass.block_till_done()
assert self.zwave_network.controller.soft_reset.called
assert len(self.zwave_network.controller.soft_reset.mock_calls) == 1
def test_test_network(self):
"""Test zwave test_network service."""
self.hass.services.call("zwave", "test_network", {})
self.hass.block_till_done()
assert self.zwave_network.test.called
assert len(self.zwave_network.test.mock_calls) == 1
def test_stop_network(self):
"""Test zwave stop_network service."""
with patch.object(self.hass.bus, "fire") as mock_fire:
self.hass.services.call("zwave", "stop_network", {})
self.hass.block_till_done()
assert self.zwave_network.stop.called
assert len(self.zwave_network.stop.mock_calls) == 1
assert mock_fire.called
assert len(mock_fire.mock_calls) == 1
assert mock_fire.mock_calls[0][1][0] == const.EVENT_NETWORK_STOP
def test_rename_node(self):
"""Test zwave rename_node service."""
self.zwave_network.nodes = {11: MagicMock()}
self.hass.services.call(
"zwave",
"rename_node",
{const.ATTR_NODE_ID: 11, const.ATTR_NAME: "test_name"},
)
self.hass.block_till_done()
assert self.zwave_network.nodes[11].name == "test_name"
def test_rename_value(self):
"""Test zwave rename_value service."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, label="Old Label")
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.label == "Old Label"
self.hass.services.call(
"zwave",
"rename_value",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_NAME: "New Label",
},
)
self.hass.block_till_done()
assert value.label == "New Label"
def test_set_poll_intensity_enable(self):
"""Test zwave set_poll_intensity service, successful set."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=0)
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 0
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 4,
},
)
self.hass.block_till_done()
enable_poll = value.enable_poll
assert value.enable_poll.called
assert len(enable_poll.mock_calls) == 2
assert enable_poll.mock_calls[0][1][0] == 4
def test_set_poll_intensity_enable_failed(self):
"""Test zwave set_poll_intensity service, failed set."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=0)
value.enable_poll.return_value = False
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 0
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 4,
},
)
self.hass.block_till_done()
enable_poll = value.enable_poll
assert value.enable_poll.called
assert len(enable_poll.mock_calls) == 1
def test_set_poll_intensity_disable(self):
"""Test zwave set_poll_intensity service, successful disable."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=4)
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 4
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 0,
},
)
self.hass.block_till_done()
disable_poll = value.disable_poll
assert value.disable_poll.called
assert len(disable_poll.mock_calls) == 2
def test_set_poll_intensity_disable_failed(self):
"""Test zwave set_poll_intensity service, failed disable."""
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=4)
value.disable_poll.return_value = False
node.values = {123456: value}
self.zwave_network.nodes = {11: node}
assert value.poll_intensity == 4
self.hass.services.call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 0,
},
)
self.hass.block_till_done()
disable_poll = value.disable_poll
assert value.disable_poll.called
assert len(disable_poll.mock_calls) == 1
def test_remove_failed_node(self):
"""Test zwave remove_failed_node service."""
self.hass.services.call("zwave", "remove_failed_node", {const.ATTR_NODE_ID: 12})
self.hass.block_till_done()
remove_failed_node = self.zwave_network.controller.remove_failed_node
assert remove_failed_node.called
assert len(remove_failed_node.mock_calls) == 1
assert remove_failed_node.mock_calls[0][1][0] == 12
def test_replace_failed_node(self):
"""Test zwave replace_failed_node service."""
self.hass.services.call(
"zwave", "replace_failed_node", {const.ATTR_NODE_ID: 13}
)
self.hass.block_till_done()
replace_failed_node = self.zwave_network.controller.replace_failed_node
assert replace_failed_node.called
assert len(replace_failed_node.mock_calls) == 1
assert replace_failed_node.mock_calls[0][1][0] == 13
def test_set_config_parameter(self):
"""Test zwave set_config_parameter service."""
value_byte = MockValue(
index=12,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BYTE,
)
value_list = MockValue(
index=13,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_LIST,
data_items=["item1", "item2", "item3"],
)
value_button = MockValue(
index=14,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BUTTON,
)
value_list_int = MockValue(
index=15,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_LIST,
data_items=["1", "2", "3"],
)
value_bool = MockValue(
index=16,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BOOL,
)
node = MockNode(node_id=14)
node.get_values.return_value = {
12: value_byte,
13: value_list,
14: value_button,
15: value_list_int,
16: value_bool,
}
self.zwave_network.nodes = {14: node}
# Byte
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 12,
const.ATTR_CONFIG_VALUE: 7,
},
)
self.hass.block_till_done()
assert value_byte.data == 7
# List
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 13,
const.ATTR_CONFIG_VALUE: "item3",
},
)
self.hass.block_till_done()
assert value_list.data == "item3"
# Button
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 14,
const.ATTR_CONFIG_VALUE: True,
},
)
self.hass.block_till_done()
assert self.zwave_network.manager.pressButton.called
assert self.zwave_network.manager.releaseButton.called
# List of Ints
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 15,
const.ATTR_CONFIG_VALUE: 3,
},
)
self.hass.block_till_done()
assert value_list_int.data == "3"
# Boolean Truthy
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 16,
const.ATTR_CONFIG_VALUE: "True",
},
)
self.hass.block_till_done()
assert value_bool.data == 1
# Boolean Falsy
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 16,
const.ATTR_CONFIG_VALUE: "False",
},
)
self.hass.block_till_done()
assert value_bool.data == 0
# Different Parameter Size
self.hass.services.call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 19,
const.ATTR_CONFIG_VALUE: 0x01020304,
const.ATTR_CONFIG_SIZE: 4,
},
)
self.hass.block_till_done()
assert node.set_config_param.called
assert len(node.set_config_param.mock_calls) == 1
assert node.set_config_param.mock_calls[0][1][0] == 19
assert node.set_config_param.mock_calls[0][1][1] == 0x01020304
assert node.set_config_param.mock_calls[0][1][2] == 4
node.set_config_param.reset_mock()
def test_print_config_parameter(self):
"""Test zwave print_config_parameter service."""
value1 = MockValue(
index=12, command_class=const.COMMAND_CLASS_CONFIGURATION, data=1234
)
value2 = MockValue(
index=13, command_class=const.COMMAND_CLASS_CONFIGURATION, data=2345
)
node = MockNode(node_id=14)
node.values = {12: value1, 13: value2}
self.zwave_network.nodes = {14: node}
with patch.object(zwave, "_LOGGER") as mock_logger:
self.hass.services.call(
"zwave",
"print_config_parameter",
{const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_PARAMETER: 13},
)
self.hass.block_till_done()
assert mock_logger.info.called
assert len(mock_logger.info.mock_calls) == 1
assert mock_logger.info.mock_calls[0][1][1] == 13
assert mock_logger.info.mock_calls[0][1][2] == 14
assert mock_logger.info.mock_calls[0][1][3] == 2345
def test_print_node(self):
"""Test zwave print_node_parameter service."""
node = MockNode(node_id=14)
self.zwave_network.nodes = {14: node}
with self.assertLogs(level="DEBUG") as mock_logger:
self.hass.services.call("zwave", "print_node", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert "FOUND NODE " in mock_logger.output[1]
def test_set_wakeup(self):
"""Test zwave set_wakeup service."""
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave", "set_wakeup", {const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_VALUE: 15}
)
self.hass.block_till_done()
assert value.data == 15
node.can_wake_up_value = False
self.hass.services.call(
"zwave", "set_wakeup", {const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_VALUE: 20}
)
self.hass.block_till_done()
assert value.data == 15
def test_reset_node_meters(self):
"""Test zwave reset_node_meters service."""
value = MockValue(
instance=1, index=8, data=99.5, command_class=const.COMMAND_CLASS_METER
)
reset_value = MockValue(
instance=1, index=33, command_class=const.COMMAND_CLASS_METER
)
node = MockNode(node_id=14)
node.values = {8: value, 33: reset_value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"reset_node_meters",
{const.ATTR_NODE_ID: 14, const.ATTR_INSTANCE: 2},
)
self.hass.block_till_done()
assert not self.zwave_network.manager.pressButton.called
assert not self.zwave_network.manager.releaseButton.called
self.hass.services.call("zwave", "reset_node_meters", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert self.zwave_network.manager.pressButton.called
(value_id,) = self.zwave_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == reset_value.value_id
assert self.zwave_network.manager.releaseButton.called
(value_id,) = self.zwave_network.manager.releaseButton.mock_calls.pop(0)[1]
assert value_id == reset_value.value_id
def test_add_association(self):
"""Test zwave change_association service."""
ZWaveGroup = self.mock_openzwave.group.ZWaveGroup
group = MagicMock()
ZWaveGroup.return_value = group
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"change_association",
{
const.ATTR_ASSOCIATION: "add",
const.ATTR_NODE_ID: 14,
const.ATTR_TARGET_NODE_ID: 24,
const.ATTR_GROUP: 3,
const.ATTR_INSTANCE: 5,
},
)
self.hass.block_till_done()
assert ZWaveGroup.called
assert len(ZWaveGroup.mock_calls) == 2
assert ZWaveGroup.mock_calls[0][1][0] == 3
assert ZWaveGroup.mock_calls[0][1][2] == 14
assert group.add_association.called
assert len(group.add_association.mock_calls) == 1
assert group.add_association.mock_calls[0][1][0] == 24
assert group.add_association.mock_calls[0][1][1] == 5
def test_remove_association(self):
"""Test zwave change_association service."""
ZWaveGroup = self.mock_openzwave.group.ZWaveGroup
group = MagicMock()
ZWaveGroup.return_value = group
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"change_association",
{
const.ATTR_ASSOCIATION: "remove",
const.ATTR_NODE_ID: 14,
const.ATTR_TARGET_NODE_ID: 24,
const.ATTR_GROUP: 3,
const.ATTR_INSTANCE: 5,
},
)
self.hass.block_till_done()
assert ZWaveGroup.called
assert len(ZWaveGroup.mock_calls) == 2
assert ZWaveGroup.mock_calls[0][1][0] == 3
assert ZWaveGroup.mock_calls[0][1][2] == 14
assert group.remove_association.called
assert len(group.remove_association.mock_calls) == 1
assert group.remove_association.mock_calls[0][1][0] == 24
assert group.remove_association.mock_calls[0][1][1] == 5
def test_refresh_entity(self):
"""Test zwave refresh_entity service."""
node = MockNode()
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SENSOR_BINARY
)
power_value = MockValue(
data=50, node=node, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = get_device(node=node, values=values, node_config={})
device.hass = self.hass
device.entity_id = "binary_sensor.mock_entity_id"
self.hass.add_job(device.async_added_to_hass())
self.hass.block_till_done()
self.hass.services.call(
"zwave", "refresh_entity", {ATTR_ENTITY_ID: "binary_sensor.mock_entity_id"}
)
self.hass.block_till_done()
assert node.refresh_value.called
assert len(node.refresh_value.mock_calls) == 2
assert sorted(
[
node.refresh_value.mock_calls[0][1][0],
node.refresh_value.mock_calls[1][1][0],
]
) == sorted([value.value_id, power_value.value_id])
def test_refresh_node(self):
"""Test zwave refresh_node service."""
node = MockNode(node_id=14)
self.zwave_network.nodes = {14: node}
self.hass.services.call("zwave", "refresh_node", {const.ATTR_NODE_ID: 14})
self.hass.block_till_done()
assert node.refresh_info.called
assert len(node.refresh_info.mock_calls) == 1
def test_set_node_value(self):
"""Test zwave set_node_value service."""
value = MockValue(index=12, command_class=const.COMMAND_CLASS_INDICATOR, data=4)
node = MockNode(node_id=14, command_classes=[const.COMMAND_CLASS_INDICATOR])
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"set_node_value",
{
const.ATTR_NODE_ID: 14,
const.ATTR_VALUE_ID: 12,
const.ATTR_CONFIG_VALUE: 2,
},
)
self.hass.block_till_done()
assert self.zwave_network.nodes[14].values[12].data == 2
def test_refresh_node_value(self):
"""Test zwave refresh_node_value service."""
node = MockNode(
node_id=14,
command_classes=[const.COMMAND_CLASS_INDICATOR],
network=self.zwave_network,
)
value = MockValue(
node=node, index=12, command_class=const.COMMAND_CLASS_INDICATOR, data=2
)
value.refresh = MagicMock()
node.values = {12: value}
node.get_values.return_value = node.values
self.zwave_network.nodes = {14: node}
self.hass.services.call(
"zwave",
"refresh_node_value",
{const.ATTR_NODE_ID: 14, const.ATTR_VALUE_ID: 12},
)
self.hass.block_till_done()
assert value.refresh.called
def test_heal_node(self):
"""Test zwave heal_node service."""
node = MockNode(node_id=19)
self.zwave_network.nodes = {19: node}
self.hass.services.call("zwave", "heal_node", {const.ATTR_NODE_ID: 19})
self.hass.block_till_done()
assert node.heal.called
assert len(node.heal.mock_calls) == 1
def test_test_node(self):
"""Test the zwave test_node service."""
node = MockNode(node_id=19)
self.zwave_network.nodes = {19: node}
self.hass.services.call("zwave", "test_node", {const.ATTR_NODE_ID: 19})
self.hass.block_till_done()
assert node.test.called
assert len(node.test.mock_calls) == 1
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool_service
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import SpecialistPoolServiceGrpcTransport
from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport
class SpecialistPoolServiceClientMeta(type):
"""Metaclass for the SpecialistPoolService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[SpecialistPoolServiceTransport]]
_transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport
_transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[SpecialistPoolServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta):
"""A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> SpecialistPoolServiceTransport:
"""Returns the transport used by the client instance.
Returns:
SpecialistPoolServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def specialist_pool_path(project: str, location: str, specialist_pool: str,) -> str:
"""Returns a fully-qualified specialist_pool string."""
return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(
project=project, location=location, specialist_pool=specialist_pool,
)
@staticmethod
def parse_specialist_pool_path(path: str) -> Dict[str, str]:
"""Parses a specialist_pool path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/specialistPools/(?P<specialist_pool>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, SpecialistPoolServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the specialist pool service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, SpecialistPoolServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, SpecialistPoolServiceTransport):
# transport is a SpecialistPoolServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_specialist_pool(
self,
request: Union[
specialist_pool_service.CreateSpecialistPoolRequest, dict
] = None,
*,
parent: str = None,
specialist_pool: gca_specialist_pool.SpecialistPool = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Creates a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
parent="parent_value",
specialist_pool=specialist_pool,
)
# Make the request
operation = client.create_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool].
parent (str):
Required. The parent Project name for the new
SpecialistPool. The form is
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
Required. The SpecialistPool to
create.
This corresponds to the ``specialist_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for
managing the workers in this pool as well as
customers' data labeling jobs associated with this
pool. Customers create specialist pool as well as
start data labeling jobs on Cloud, managers and
workers handle the jobs using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, specialist_pool])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.CreateSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest):
request = specialist_pool_service.CreateSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if specialist_pool is not None:
request.specialist_pool = specialist_pool
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
gca_specialist_pool.SpecialistPool,
metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata,
)
# Done; return the response.
return response
def get_specialist_pool(
self,
request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> specialist_pool.SpecialistPool:
r"""Gets a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetSpecialistPoolRequest(
name="name_value",
)
# Make the request
response = client.get_specialist_pool(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool].
name (str):
Required. The name of the SpecialistPool resource. The
form is
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.SpecialistPool:
SpecialistPool represents customers'
own workforce to work on their data
labeling jobs. It includes a group of
specialist managers and workers.
Managers are responsible for managing
the workers in this pool as well as
customers' data labeling jobs associated
with this pool. Customers create
specialist pool as well as start data
labeling jobs on Cloud, managers and
workers handle the jobs using
CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.GetSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest):
request = specialist_pool_service.GetSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_specialist_pools(
self,
request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSpecialistPoolsPager:
r"""Lists SpecialistPools in a Location.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_specialist_pools():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListSpecialistPoolsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_specialist_pools(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest, dict]):
The request object. Request message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools].
parent (str):
Required. The name of the SpecialistPool's parent
resource. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager:
Response message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.ListSpecialistPoolsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest):
request = specialist_pool_service.ListSpecialistPoolsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListSpecialistPoolsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_specialist_pool(
self,
request: Union[
specialist_pool_service.DeleteSpecialistPoolRequest, dict
] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a SpecialistPool as well as all Specialists
in the pool.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteSpecialistPoolRequest(
name="name_value",
)
# Make the request
operation = client.delete_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool].
name (str):
Required. The resource name of the SpecialistPool to
delete. Format:
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.DeleteSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest):
request = specialist_pool_service.DeleteSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def update_specialist_pool(
self,
request: Union[
specialist_pool_service.UpdateSpecialistPoolRequest, dict
] = None,
*,
specialist_pool: gca_specialist_pool.SpecialistPool = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Updates a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.UpdateSpecialistPoolRequest(
specialist_pool=specialist_pool,
)
# Make the request
operation = client.update_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool].
specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
Required. The SpecialistPool which
replaces the resource on the server.
This corresponds to the ``specialist_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to
the resource.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for
managing the workers in this pool as well as
customers' data labeling jobs associated with this
pool. Customers create specialist pool as well as
start data labeling jobs on Cloud, managers and
workers handle the jobs using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([specialist_pool, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.UpdateSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest):
request = specialist_pool_service.UpdateSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if specialist_pool is not None:
request.specialist_pool = specialist_pool
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("specialist_pool.name", request.specialist_pool.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
gca_specialist_pool.SpecialistPool,
metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SpecialistPoolServiceClient",)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Reference:
# WpcapSrc_4_1_3
#
# TODO:
# Remote capture
# AirPcap
#
from ctypes import *
from ctypes.util import find_library
import sys
WIN32 = True if sys.platform.startswith('win') else False
if WIN32:
MSDOS, WPCAP, HAVE_REMOTE = True, True, True
_libpath = find_library('wpcap')
else:
MSDOS, WPCAP, HAVE_REMOTE = False, False, False
_libpath = find_library('pcap')
if not _libpath:
raise PcapException("Can't find pcap library")
_pcap = CDLL(_libpath)
PCAP_VERSION_MAJOR = 2
PCAP_VERSION_MINOR = 4
PCAP_ERRBUF_SIZE = 256
if WIN32:
bpf_int32 = c_long
bpf_u_int32 = c_ulong
else:
bpf_int32 = c_int
bpf_u_int32 = c_uint
class bpf_insn(Structure):
_fields_ = [("code", c_ushort),
("jt", c_ubyte),
("jf", c_ubyte),
("k", bpf_u_int32),
]
class bpf_program(Structure):
pass
bpf_program._fields_ = [('bf_len', c_uint),
('bf_insns', POINTER(bpf_insn)),
]
class timeval(Structure):
_fields_ = [('tv_sec', c_long),
('tv_usec', c_long),
]
class pcap_file_header(Structure):
_fields_ = [('magic', bpf_u_int32),
('version_major', c_ushort),
('version_minor', c_ushort),
('thiszone', bpf_int32),
('sigfigs', bpf_u_int32),
('snaplen', bpf_u_int32),
('linktype', bpf_u_int32),
]
class pcap_pkthdr(Structure):
_fields_ = [('ts', timeval),
('caplen', bpf_u_int32),
('len', bpf_u_int32),
]
class pcap_stat(Structure):
_fields_ = [("ps_recv", c_uint),
("ps_drop", c_uint),
("ps_ifdrop", c_uint),
]
if HAVE_REMOTE:
pcap_stat._fields_.extend([
("ps_capt", c_uint),
("ps_sent", c_uint),
("ps_netdrop", c_uint),
])
class pcap_stat_ex(Structure):
_fields_ = [("rx_packets", c_ulong), # total packets received
("tx_packets", c_ulong), # total packets transmitted
("rx_bytes", c_ulong), # total bytes received
("tx_bytes", c_ulong), # total bytes transmitted
("rx_errors", c_ulong), # bad packets received
("tx_errors", c_ulong), # packet transmit problems
("rx_dropped", c_ulong), # no space in Rx buffers
("tx_dropped", c_ulong), # no space available for Tx
("multicast", c_ulong), # multicast packets received
("collisions", c_ulong),
## detailed rx_errors
("rx_length_errors", c_ulong),
("rx_over_errors", c_ulong), # receiver ring buff overflow
("rx_crc_errors", c_ulong), # recv'd pkt with crc error
("rx_frame_errors", c_ulong), # recv'd frame alignment error
("rx_fifo_errors", c_ulong), # recv'r fifo overrun
("rx_missed_errors", c_ulong), # recv'r missed packet
## detailed tx_errors
("tx_aborted_errors", c_ulong), # recv'r missed packet
("tx_carrier_errors", c_ulong), # recv'r missed packet
("tx_fifo_errors", c_ulong), # recv'r missed packet
("tx_heartbeat_errors", c_ulong), # recv'r missed packet
("tx_window_errors", c_ulong), # recv'r missed packet
]
class pcap(Structure):
pass
class pcap_dumper(Structure):
pass
class sockaddr(Structure):
_fields_ = [("sa_family", c_ushort),
("sa_data", c_char*14),
]
class pcap_addr(Structure):
pass
pcap_addr._fields_ = [('next', POINTER(pcap_addr)),
('addr', POINTER(sockaddr)),
('netmask', POINTER(sockaddr)),
('broadaddr', POINTER(sockaddr)),
('dstaddr', POINTER(sockaddr)),
]
class pcap_if(Structure):
pass
pcap_if._fields_ = [('next', POINTER(pcap_if)),
('name', c_char_p),
('description', c_char_p),
('addresses', POINTER(pcap_addr)),
('flags', bpf_u_int32),
]
pcap_handler = CFUNCTYPE(None, POINTER(c_ubyte), POINTER(pcap_pkthdr), POINTER(c_ubyte))
pcap_t = pcap
pcap_dumper_t = pcap_dumper
pcap_if_t = pcap_if
pcap_addr_t = pcap_addr
u_char = c_ubyte
FILE = c_void_p
## Error codes for the pcap API.
#generic error code
PCAP_ERROR = -1
#loop terminated by pcap_breakloop
PCAP_ERROR_BREAK = -2
#the capture needs to be activated
PCAP_ERROR_NOT_ACTIVATED = -3
#the operation can't be performed on already activated captures
PCAP_ERROR_ACTIVATED = -4
#no such device exists
PCAP_ERROR_NO_SUCH_DEVICE = -5
#this device doesn't support rfmon (monitor) mode
PCAP_ERROR_RFMON_NOTSUP = -6
#operation supported only in monitor mode
PCAP_ERROR_NOT_RFMON = -7
#no permission to open the device
PCAP_ERROR_PERM_DENIED = -8
#interface isn't up
PCAP_ERROR_IFACE_NOT_UP = -9
## Warning codes for the pcap API.
#generic warning code
PCAP_WARNING = 1
#this device doesn't support
PCAP_WARNING_PROMISC_NOTSUP = 2
# char *pcap_lookupdev(char *);
pcap_lookupdev = _pcap.pcap_lookupdev
pcap_lookupdev.restype = c_char_p
pcap_lookupdev.argtypes = [c_char_p]
# int pcap_lookupnet(const char *, bpf_u_int32 *, bpf_u_int32 *, char *);
pcap_lookupnet = _pcap.pcap_lookupnet
pcap_lookupnet.restype = c_int
pcap_lookupnet.argtypes = [c_char_p, POINTER(bpf_u_int32), POINTER(bpf_u_int32), c_char_p]
# pcap_t *pcap_create(const char *, char *);
pcap_create = _pcap.pcap_create
pcap_create.restype = POINTER(pcap_t)
pcap_create.argtypes = [c_char_p, c_char_p]
# int pcap_set_snaplen(pcap_t *, int);
pcap_set_snaplen = _pcap.pcap_set_snaplen
pcap_set_snaplen.restype = c_int
pcap_set_snaplen.argtypes = [POINTER(pcap_t), c_int]
# int pcap_set_promisc(pcap_t *, int);
pcap_set_promisc = _pcap.pcap_set_promisc
pcap_set_promisc.restype = c_int
pcap_set_promisc.argtypes = [POINTER(pcap_t), c_int]
# int pcap_can_set_rfmon(pcap_t *);
#pcap_can_set_rfmon = _pcap.pcap_can_set_rfmon
#pcap_can_set_rfmon.restype = c_int
#pcap_can_set_rfmon.argtypes = [POINTER(pcap_t)]
# int pcap_set_rfmon(pcap_t *, int);
#pcap_set_rfmon = _pcap.pcap_set_rfmon
#pcap_set_rfmon.restype = c_int
#pcap_set_rfmon.argtypes = [POINTER(pcap_t), c_int]
# int pcap_set_timeout(pcap_t *, int);
pcap_set_timeout = _pcap.pcap_set_timeout
pcap_set_timeout.restype = c_int
pcap_set_timeout.argtypes = [POINTER(pcap_t), c_int]
# int pcap_set_buffer_size(pcap_t *, int);
pcap_set_buffer_size = _pcap.pcap_set_buffer_size
pcap_set_buffer_size.restype = c_int
pcap_set_buffer_size.argtypes = [POINTER(pcap_t), c_int]
# int pcap_activate(pcap_t *);
pcap_activate = _pcap.pcap_activate
pcap_activate.restype = c_int
pcap_activate.argtypes = [POINTER(pcap_t)]
# pcap_t *pcap_open_live(const char *, int, int, int, char *);
pcap_open_live = _pcap.pcap_open_live
pcap_open_live.restype = POINTER(pcap_t)
pcap_open_live.argtypes = [c_char_p, c_int, c_int, c_int, c_char_p]
# pcap_t *pcap_open_dead(int, int);
pcap_open_dead = _pcap.pcap_open_dead
pcap_open_dead.restype = POINTER(pcap_t)
pcap_open_dead.argtypes = [c_int, c_int]
# pcap_t *pcap_open_offline(const char *, char *);
pcap_open_offline = _pcap.pcap_open_offline
pcap_open_offline.restype = POINTER(pcap_t)
pcap_open_offline.argtypes = [c_char_p, c_char_p]
"""
if WIN32:
# pcap_t *pcap_hopen_offline(intptr_t, char *);
if not LIBPCAP_EXPORTS:
#define pcap_fopen_offline(f,b) pcap_hopen_offline(_get_osfhandle(_fileno(f)), b)
pass
else:
#static pcap_t *pcap_fopen_offline(FILE *, char *);
pass
else:
# pcap_t *pcap_fopen_offline(FILE *, char *);
pass
"""
# void pcap_close(pcap_t *);
pcap_close = _pcap.pcap_close
pcap_close.restype = None
pcap_close.argtypes = [POINTER(pcap_t)]
# int pcap_loop(pcap_t *, int, pcap_handler, u_char *);
pcap_loop = _pcap.pcap_loop
pcap_loop.restype = c_int
pcap_loop.argtypes = [POINTER(pcap_t), c_int, pcap_handler, POINTER(u_char)]
# int pcap_dispatch(pcap_t *, int, pcap_handler, u_char *);
pcap_dispatch = _pcap.pcap_dispatch
pcap_dispatch.restype = c_int
pcap_dispatch.argtypes = [POINTER(pcap_t), c_int, pcap_handler, POINTER(u_char)]
# const u_char* pcap_next(pcap_t *, struct pcap_pkthdr *);
pcap_next = _pcap.pcap_next
pcap_next.restype = POINTER(u_char)
pcap_next.argtypes = [POINTER(pcap_t), POINTER(pcap_pkthdr)]
# int pcap_next_ex(pcap_t *, struct pcap_pkthdr **, const u_char **);
pcap_next_ex = _pcap.pcap_next_ex
pcap_next_ex.restype = c_int
pcap_next_ex.argtypes = [POINTER(pcap_t), POINTER(POINTER(pcap_pkthdr)), POINTER(POINTER(u_char))]
# void pcap_breakloop(pcap_t *);
pcap_breakloop = _pcap.pcap_next
pcap_breakloop.restype = None
pcap_breakloop.argtypes = [POINTER(pcap_t)]
# int pcap_stats(pcap_t *, struct pcap_stat *);
pcap_stats = _pcap.pcap_stats
pcap_stats.restype = c_int
pcap_stats.argtypes = [POINTER(pcap_t), POINTER(pcap_stat)]
# int pcap_setfilter(pcap_t *, struct bpf_program *);
pcap_setfilter = _pcap.pcap_setfilter
pcap_setfilter.restype = c_int
pcap_setfilter.argtypes = [POINTER(pcap_t), POINTER(bpf_program)]
# int pcap_setdirection(pcap_t *, pcap_direction_t);
#pcap_setdirection = _pcap.pcap_setdirection
#pcap_setdirection.restype = c_int
#pcap_setdirection.argtypes = [POINTER(pcap_t), pcap_direction_t]
# int pcap_getnonblock(pcap_t *, char *);
pcap_getnonblock = _pcap.pcap_getnonblock
pcap_getnonblock.restype = c_int
pcap_getnonblock.argtypes = [POINTER(pcap_t), c_char_p]
# int pcap_setnonblock(pcap_t *, int, char *);
pcap_setnonblock = _pcap.pcap_setnonblock
pcap_setnonblock.restype = c_int
pcap_setnonblock.argtypes = [POINTER(pcap_t), c_int, c_char_p]
# int pcap_inject(pcap_t *, const void *, size_t);
pcap_sendpacket = _pcap.pcap_sendpacket
pcap_sendpacket.restype = c_int
pcap_sendpacket.argtypes = [POINTER(pcap_t), c_void_p, c_uint]
# int pcap_sendpacket(pcap_t *, const u_char *, int);
pcap_sendpacket = _pcap.pcap_sendpacket
pcap_sendpacket.restype = c_int
pcap_sendpacket.argtypes = [POINTER(pcap_t), POINTER(u_char), c_int]
# const char *pcap_statustostr(int);
#pcap_statustostr = _pcap.pcap_statustostr
#pcap_statustostr.restype = c_char_p
#pcap_statustostr.argtypes = [c_int]
# const char *pcap_strerror(int);
pcap_strerror = _pcap.pcap_strerror
pcap_strerror.restype = c_char_p
pcap_strerror.argtypes = [c_int]
# char *pcap_geterr(pcap_t *);
pcap_geterr = _pcap.pcap_geterr
pcap_geterr.restype = c_char_p
pcap_geterr.argtypes = [POINTER(pcap_t)]
# void pcap_perror(pcap_t *, char *);
pcap_perror = _pcap.pcap_perror
pcap_perror.restype = None
pcap_perror.argtypes = [POINTER(pcap_t), c_char_p]
# int pcap_compile(pcap_t *, struct bpf_program *, const char *, int, bpf_u_int32);
pcap_compile = _pcap.pcap_compile
pcap_compile.restype = c_int
pcap_compile.argtypes = [POINTER(pcap_t), POINTER(bpf_program), c_char_p, c_int, bpf_u_int32]
# int pcap_compile_nopcap(int, int, struct bpf_program *, const char *, int, bpf_u_int32);
pcap_compile_nopcap = _pcap.pcap_compile_nopcap
pcap_compile_nopcap.restype = c_int
pcap_compile_nopcap.argtypes = [c_int, c_int, POINTER(bpf_program), c_char_p, bpf_u_int32]
# void pcap_freecode(struct bpf_program *);
pcap_freecode = _pcap.pcap_freecode
pcap_freecode.restype = None
pcap_freecode.argtypes = [POINTER(bpf_program)]
# int pcap_offline_filter(struct bpf_program *, const struct pcap_pkthdr * const u_char *);
pcap_offline_filter = _pcap.pcap_offline_filter
pcap_offline_filter.restype = c_int
pcap_offline_filter.argtypes = [POINTER(bpf_program), POINTER(pcap_pkthdr), u_char]
# int pcap_datalink(pcap_t *);
pcap_datalink = _pcap.pcap_datalink
pcap_datalink.restype = c_int
pcap_datalink.argtypes = [POINTER(pcap_t)]
# int pcap_datalink_ext(pcap_t *);
#pcap_datalink_ext = _pcap.pcap_datalink_ext
#pcap_datalink_ext.restype = c_int
#pcap_datalink_ext.argtypes = [POINTER(pcap_t)]
# int pcap_list_datalinks(pcap_t *, int **);
pcap_list_datalinks = _pcap.pcap_list_datalinks
pcap_list_datalinks.restype = c_int
pcap_list_datalinks.argtypes = [POINTER(pcap_t), POINTER(POINTER(c_int))]
# int pcap_set_datalink(pcap_t *, int);
pcap_set_datalink = _pcap.pcap_set_datalink
pcap_set_datalink.restype = c_int
pcap_set_datalink.argtypes = [POINTER(pcap_t), c_int]
# void pcap_free_datalinks(int *);
pcap_free_datalinks = _pcap.pcap_free_datalinks
pcap_free_datalinks.restype = None
pcap_free_datalinks.argtypes = [POINTER(c_int)]
# int pcap_datalink_name_to_val(const char *);
pcap_datalink_name_to_val = _pcap.pcap_datalink_name_to_val
pcap_datalink_name_to_val.restype = c_int
pcap_datalink_name_to_val.argtypes = [c_char_p]
# const char *pcap_datalink_val_to_name(int);
pcap_datalink_val_to_name = _pcap.pcap_datalink_val_to_name
pcap_datalink_val_to_name.restype = c_char_p
pcap_datalink_val_to_name.argtypes = [c_int]
# const char *pcap_datalink_val_to_description(int);
pcap_datalink_val_to_description = _pcap.pcap_datalink_val_to_description
pcap_datalink_val_to_description.restype = c_char_p
pcap_datalink_val_to_description.argtypes = [c_int]
# int pcap_snapshot(pcap_t *);
pcap_snapshot = _pcap.pcap_snapshot
pcap_snapshot.restype = c_int
pcap_snapshot.argtypes = [POINTER(pcap_t)]
# int pcap_is_swapped(pcap_t *);
pcap_is_swapped = _pcap.pcap_is_swapped
pcap_is_swapped.restype = c_int
pcap_is_swapped.argtypes = [POINTER(pcap_t)]
# int pcap_major_version(pcap_t *);
pcap_major_version = _pcap.pcap_major_version
pcap_major_version.restype = c_int
pcap_major_version.argtypes = [POINTER(pcap_t)]
# int pcap_minor_version(pcap_t *);
pcap_minor_version = _pcap.pcap_minor_version
pcap_minor_version.restype = c_int
pcap_minor_version.argtypes = [POINTER(pcap_t)]
# /* XXX */
# FILE *pcap_file(pcap_t *);
pcap_file = _pcap.pcap_file
pcap_file.restype = FILE
pcap_file.argtypes = [POINTER(pcap_t)]
# int pcap_fileno(pcap_t *);
pcap_fileno = _pcap.pcap_fileno
pcap_fileno.restype = c_int
pcap_fileno.argtypes = [POINTER(pcap_t)]
# pcap_dumper_t *pcap_dump_open(pcap_t *, const char *);
pcap_dump_open = _pcap.pcap_dump_open
pcap_dump_open.restype = POINTER(pcap_dumper_t)
pcap_dump_open.argtypes = [POINTER(pcap_t), c_char_p]
# pcap_dumper_t *pcap_dump_fopen(pcap_t *, FILE *fp);
#pcap_dump_fopen = _pcap.pcap_dump_fopen
#pcap_dump_fopen.restype = POINTER(pcap_dumper_t)
#pcap_dump_fopen.argtypes= [POINTER(pcap_t), POINTER(FILE)]
# FILE *pcap_dump_file(pcap_dumper_t *);
pcap_dump_file = _pcap.pcap_dump_file
pcap_dump_file.restype = FILE
pcap_dump_file.argtypes= [POINTER(pcap_dumper_t)]
# long pcap_dump_ftell(pcap_dumper_t *);
pcap_dump_ftell = _pcap.pcap_dump_ftell
pcap_dump_ftell.restype = c_long
pcap_dump_ftell.argtypes = [POINTER(pcap_dumper_t)]
# int pcap_dump_flush(pcap_dumper_t *);
pcap_dump_flush = _pcap.pcap_dump_flush
pcap_dump_flush.restype = c_int
pcap_dump_flush.argtypes = [POINTER(pcap_dumper_t)]
# void pcap_dump_close(pcap_dumper_t *);
pcap_dump_close = _pcap.pcap_dump_close
pcap_dump_close.restype = None
pcap_dump_close.argtypes = [POINTER(pcap_dumper_t)]
# void pcap_dump(u_char *, const struct pcap_pkthdr *, const u_char *);
pcap_dump = _pcap.pcap_dump
pcap_dump.restype = None
pcap_dump.argtypes = [POINTER(pcap_dumper_t), POINTER(pcap_pkthdr), POINTER(u_char)]
# int pcap_findalldevs(pcap_if_t **, char *);
pcap_findalldevs = _pcap.pcap_findalldevs
pcap_findalldevs.restype = c_int
pcap_findalldevs.argtypes = [POINTER(POINTER(pcap_if_t)), c_char_p]
# void pcap_freealldevs(pcap_if_t *);
pcap_freealldevs = _pcap.pcap_freealldevs
pcap_freealldevs.restype = None
pcap_freealldevs.argtypes = [POINTER(pcap_if_t)]
# const char *pcap_lib_version(void);
pcap_lib_version = _pcap.pcap_lib_version
pcap_lib_version.restype = c_char_p
pcap_lib_version.argtypes = []
# /* XXX this guy lives in the bpf tree */
# u_int bpf_filter(const struct bpf_insn *, const u_char *, u_int, u_int);
bpf_filter = _pcap.bpf_filter
bpf_filter.restype = c_uint
bpf_filter.argtypes = [POINTER(bpf_insn), u_char, c_uint, c_uint]
# int bpf_validate(const struct bpf_insn *f, int len);
bpf_validate = _pcap.bpf_validate
bpf_validate.restype = c_int
bpf_validate.argtypes = [POINTER(bpf_insn), c_int]
# char *bpf_image(const struct bpf_insn *, int);
bpf_image = _pcap.bpf_image
bpf_image.restype = c_char_p
bpf_image.argtypes = [POINTER(bpf_insn), c_int]
# void bpf_dump(const struct bpf_program *, int);
bpf_dump = _pcap.bpf_dump
bpf_dump.restype = None
bpf_dump.argtypes = [POINTER(bpf_program), c_int]
if WIN32:
"""
Win32 definitions
"""
# int pcap_setbuff(pcap_t *p, int dim);
pcap_setbuff = _pcap.pcap_setbuff
pcap_setbuff.restype = c_int
pcap_setbuff.argtypes = [POINTER(pcap_t), c_int]
# int pcap_setmode(pcap_t *p, int mode);
pcap_setmode = _pcap.pcap_setmode
pcap_setmode.restype = c_int
pcap_setmode.argtypes = [POINTER(pcap_t), c_int]
# int pcap_setmintocopy(pcap_t *p, int size);
pcap_setmintocopy = _pcap.pcap_setmintocopy
pcap_setmintocopy.restype = c_int
pcap_setmintocopy.argtype = [POINTER(pcap_t), c_int]
if WPCAP:
# Include file with the wpcap-specific extensions
#include <Win32-Extensions.h>
class pcap_send_queue(Structure):
_fields_ = [('maxlen', c_uint),
('len', c_uint),
('buffer', c_char_p),
]
# pcap_send_queue* pcap_sendqueue_alloc(u_int memsize);
pcap_sendqueue_alloc = _pcap.pcap_sendqueue_alloc
pcap_sendqueue_alloc.restype = POINTER(pcap_send_queue)
pcap_sendqueue_alloc.argtypes = [c_uint]
# void pcap_sendqueue_destroy(pcap_send_queue* queue);
pcap_sendqueue_destroy = _pcap.pcap_sendqueue_destroy
pcap_sendqueue_destroy.restype = None
pcap_sendqueue_destroy.argtypes = [POINTER(pcap_send_queue)]
# int pcap_sendqueue_queue(pcap_send_queue* queue, const struct pcap_pkthdr *pkt_header, const u_char *pkt_data);
pcap_sendqueue_queue = _pcap.pcap_sendqueue_queue
pcap_sendqueue_queue.restype = c_int
pcap_sendqueue_queue.argtypes = [POINTER(pcap_send_queue), POINTER(pcap_pkthdr), POINTER(u_char)]
# u_int pcap_sendqueue_transmit(pcap_t *p, pcap_send_queue* queue, int sync);
pcap_sendqueue_transmit = _pcap.pcap_sendqueue_transmit
pcap_sendqueue_transmit.retype = c_uint
pcap_sendqueue_transmit.argtypes = [POINTER(pcap_t), POINTER(pcap_send_queue), c_int]
# HANDLE pcap_getevent(pcap_t *p);
HANDLE = c_void_p
pcap_getevent = _pcap.pcap_getevent
pcap_getevent.restype = HANDLE
pcap_getevent.argtypes = [POINTER(pcap_t)]
# struct pcap_stat *pcap_stats_ex(pcap_t *p, int *pcap_stat_size);
pcap_stats_ex = _pcap.pcap_stats_ex
pcap_stats_ex.restype = POINTER(pcap_stat)
pcap_stats_ex.argtypes = [POINTER(pcap_t), POINTER(c_int)]
# int pcap_setuserbuffer(pcap_t *p, int size);
pcap_setuserbuffer = _pcap.pcap_setuserbuffer
pcap_setuserbuffer.restype = c_int
pcap_setuserbuffer.argtypes = [POINTER(pcap_t), c_int]
# int pcap_live_dump(pcap_t *p, char *filename, int maxsize, int maxpacks);
pcap_live_dump = _pcap.pcap_live_dump
pcap_live_dump.restype = c_int
pcap_live_dump.argtypes = [POINTER(pcap_t), c_char_p, c_int, c_int]
# int pcap_live_dump_ended(pcap_t *p, int sync);
pcap_live_dump_ended = _pcap.pcap_live_dump_ended
pcap_live_dump_ended.restype = c_int
pcap_live_dump_ended.argtypes = [POINTER(pcap_t), c_int]
# int pcap_offline_filter(struct bpf_program *prog, const struct pcap_pkthdr *header, const u_char *pkt_data);
pcap_offline_filter = _pcap.pcap_offline_filter
pcap_offline_filter.restype = c_int
pcap_offline_filter.argtypes = [POINTER(bpf_program), POINTER(pcap_pkthdr), POINTER(u_char)]
# int pcap_start_oem(char* err_str, int flags);
#pcap_start_oem = _pcap.pcap_start_oem
#pcap_start_oem.restype = c_int
#pcap_start_oem.argtypes = [c_char_p, c_int]
# PAirpcapHandle pcap_get_airpcap_handle(pcap_t *p);
# TODO
MODE_CAPT = 0
MODE_STAT = 1
MODE_MON = 2
elif MSDOS:
"""
MSDOS definitions
"""
# Now, if WIN32 is True, always MSDOS is True.
pass
"""
# int pcap_stats_ex (pcap_t *, struct pcap_stat_ex *);
pcap_stats_ex = _pcap.pcap_stats_ex
pcap_stats_ex.restype = c_int
pcap_stats_ex.argtypes = [POINTER(POINTER(pcap_t)), POINTER(pcap_stat_ex)]
# void pcap_set_wait (pcap_t *p, void (*yield)(void), int wait);
pcap_set_wait = _pcap.pcap_set_wait
pcap_set_wait.restype = None
pcap_set_wait.argtypes = [POINTER(pcap_t), c_void_p, c_int]
# u_long pcap_mac_packets (void);
pcap_mac_packets = _pcap.pcap_mac_packets
pcap_mac_packets.restype = c_long
pcap_mac_packets.argtypes = []
"""
else:
"""
UN*X definitions
"""
# int pcap_get_selectable_fd(pcap_t *);
pcap_get_selectable_fd = _pcap.pcap_get_selectable_fd
pcap_get_selectable_fd.restype = c_int
pcap_get_selectable_fd.argtype = [POINTER(pcap_t)]
#ifdef HAVE_REMOTE
# /* Includes most of the public stuff that is needed for the remote capture */
#include <remote-ext.h>
PCAP_BUF_SIZE = 1024
PCAP_SRC_FILE = 2
PCAP_SRC_IFLOCAL = 3
PCAP_SRC_IFREMOTE = 4
PCAP_SRC_FILE_STRING = "file://"
PCAP_SRC_IF_STRING = "rpcap://"
PCAP_OPENFLAG_PROMISCUOUS = 1
PCAP_OPENFLAG_DATATX_UDP = 2
PCAP_OPENFLAG_NOCAPTURE_RPCAP = 4
PCAP_OPENFLAG_NOCAPTURE_LOCAL = 8
PCAP_OPENFLAG_MAX_RESPONSIVENESS = 16
PCAP_SAMP_NOSAMP = 0
PCAP_SAMP_1_EVERY_N = 1
PCAP_SAMP_FIRST_AFTER_N_MS = 2
RPCAP_RMTAUTH_NULL = 0
RPCAP_RMTAUTH_PWD = 1
if HAVE_REMOTE:
class pcap_rmtauth(Structure):
_fields_=[("type", c_int),
("username", c_char_p),
("password", c_char_p),
]
class pcap_samp(Structure):
_fields_=[("method", c_int),
("value", c_char_p),
]
RPCAP_HOSTLIST_SIZE = 1024
"""
\name New WinPcap functions
This section lists the new functions that are able to help considerably in writing
WinPcap programs because of their easiness of use.
"""
# pcap_t *pcap_open(const char *source, int snaplen, int flags, int read_timeout, struct pcap_rmtauth *auth, char *errbuf);
pcap_open = _pcap.pcap_open
pcap_open.restype = POINTER(pcap_t)
pcap_open.argtypes = [c_char_p, c_int, c_int, c_int, POINTER(pcap_rmtauth), c_char_p]
# int pcap_createsrcstr(char *source, int type, const char *host, const char *port, const char *name, char *errbuf);
pcap_createsrcstr = _pcap.pcap_createsrcstr
pcap_createsrcstr.restype = c_int
pcap_createsrcstr.argtypes = [c_char_p, c_int, c_char_p, c_char_p, c_char_p, c_char_p]
# int pcap_parsesrcstr(const char *source, int *type, char *host, char *port, char *name, char *errbuf);
pcap_parsesrcstr = _pcap.pcap_parsesrcstr
pcap_parsesrcstr.restype = c_int
pcap_parsesrcstr.argtypes = [c_char_p, POINTER(c_int), c_char_p, c_char_p, c_char_p, c_char_p]
# int pcap_findalldevs_ex(char *source, struct pcap_rmtauth *auth, pcap_if_t **alldevs, char *errbuf);
pcap_findalldevs_ex = _pcap.pcap_findalldevs_ex
pcap_findalldevs_ex.restype = c_int
pcap_findalldevs_ex.argtypes = [c_char_p, POINTER(pcap_rmtauth), POINTER(POINTER(pcap_if_t)), c_char_p]
# struct pcap_samp *pcap_setsampling(pcap_t *p);
pcap_setsampling = _pcap.pcap_setsampling
pcap_setsampling.restype = pcap_samp
pcap_setsampling.argtypes = [POINTER(pcap_t)]
"""
\name Remote Capture functions
"""
SOCKET = c_int
# SOCKET pcap_remoteact_accept(const char *address, const char *port, const char *hostlist, char *connectinghost, struct pcap_rmtauth *auth, char *errbuf);
pcap_remoteact_accept = _pcap.pcap_remoteact_accept
pcap_remoteact_accept.restype = SOCKET
pcap_remoteact_accept.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p, POINTER(pcap_rmtauth), c_char_p]
# int pcap_remoteact_list(char *hostlist, char sep, int size, char *errbuf);
pcap_remoteact_list = _pcap.pcap_remoteact_list
pcap_remoteact_list.restype = c_int
pcap_remoteact_list.argtypes = [c_char_p, c_char, c_int, c_char_p]
# int pcap_remoteact_close(const char *host, char *errbuf);
pcap_remoteact_close = _pcap.pcap_remoteact_close
pcap_remoteact_close.restype = c_int
pcap_remoteact_close.argtypes = [c_char_p, c_char_p]
# void pcap_remoteact_cleanup();
pcap_remoteact_cleanup = _pcap.pcap_remoteact_cleanup
pcap_remoteact_cleanup.restype = None
pcap_remoteact_cleanup.argtypes = None
|
|
# This script generates a Python interface for an Apple Macintosh Manager.
# It uses the "bgen" package to generate C code.
# The function specifications are generated by scanning the mamager's header file,
# using the "scantools" package (customized for this particular manager).
#
# XXXX TO DO:
# - Implement correct missing FSSpec handling for Alias methods
# - Implement FInfo
import string
# Declarations that change for each manager
#MACHEADERFILE = 'Files.h' # The Apple header file
MODNAME = '_File' # The name of the module
LONGMODNAME = 'Carbon.File' # The "normal" external name of the module
# The following is *usually* unchanged but may still require tuning
MODPREFIX = 'File' # The prefix for module-wide routines
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Various integers:
SInt64 = Type("SInt64", "L")
UInt64 = Type("UInt64", "L")
FNMessage = Type("FNMessage", "l")
FSAllocationFlags = Type("FSAllocationFlags", "H")
FSCatalogInfoBitmap = Type("FSCatalogInfoBitmap", "l")
FSIteratorFlags = Type("FSIteratorFlags", "l")
FSVolumeRefNum = Type("FSVolumeRefNum", "h")
AliasInfoType = Type("AliasInfoType", "h")
# Various types of strings:
#class UniCharCountBuffer(InputOnlyType):
# pass
class VarReverseInputBufferType(ReverseInputBufferMixin, VarInputBufferType):
pass
FullPathName = VarReverseInputBufferType()
ConstStr31Param = OpaqueArrayType("Str31", "PyMac_BuildStr255", "PyMac_GetStr255")
ConstStr32Param = OpaqueArrayType("Str32", "PyMac_BuildStr255", "PyMac_GetStr255")
ConstStr63Param = OpaqueArrayType("Str63", "PyMac_BuildStr255", "PyMac_GetStr255")
Str63 = OpaqueArrayType("Str63", "PyMac_BuildStr255", "PyMac_GetStr255")
HFSUniStr255 = OpaqueType("HFSUniStr255", "PyMac_BuildHFSUniStr255", "PyMac_GetHFSUniStr255")
UInt8_ptr = InputOnlyType("UInt8 *", "s")
# Other types:
class OptionalFSxxxType(OpaqueByValueType):
def declare(self, name):
Output("%s %s__buf__;", self.typeName, name)
Output("%s *%s = &%s__buf__;", self.typeName, name, name)
class FSCatalogInfoAndBitmapType(InputOnlyType):
def __init__(self):
InputOnlyType.__init__(self, "BUG", "BUG")
def declare(self, name):
Output("PyObject *%s__object = NULL;", name)
Output("FSCatalogInfoBitmap %s__bitmap = 0;", name)
Output("FSCatalogInfo %s;", name)
def getargsFormat(self):
return "lO"
def getargsArgs(self, name):
return "%s__bitmap, %s__object"%(name, name)
def getargsCheck(self, name):
Output("if (!convert_FSCatalogInfo(%s__object, %s__bitmap, &%s)) return NULL;", name, name, name)
def passInput(self, name):
return "%s__bitmap, &%s"% (name, name)
def passOutput(self, name):
return "%s__bitmap, &%s"% (name, name)
def mkvalueFormat(self):
return "O"
def mkvalueArgs(self, name):
return "%s__object" % (name)
def xxxxmkvalueCheck(self, name):
Output("if ((%s__object = new_FSCatalogInfo(%s__bitmap, &%s)) == NULL) return NULL;", name, name)
class FSCatalogInfoAndBitmap_inType(FSCatalogInfoAndBitmapType, InputOnlyMixIn):
def xxxxmkvalueCheck(self, name):
pass
class FSCatalogInfoAndBitmap_outType(FSCatalogInfoAndBitmapType):
def getargsFormat(self):
return "l"
def getargsArgs(self, name):
return "%s__bitmap" % name
def getargsCheck(self, name):
pass
FInfo = OpaqueType("FInfo", "FInfo")
FInfo_ptr = OpaqueType("FInfo", "FInfo")
AliasHandle = OpaqueByValueType("AliasHandle", "Alias")
FSSpec = OpaqueType("FSSpec", "FSSpec")
FSSpec_ptr = OpaqueType("FSSpec", "FSSpec")
OptFSSpecPtr = OptionalFSxxxType("FSSpec", "BUG", "myPyMac_GetOptFSSpecPtr")
FSRef = OpaqueType("FSRef", "FSRef")
FSRef_ptr = OpaqueType("FSRef", "FSRef")
OptFSRefPtr = OptionalFSxxxType("FSRef", "BUG", "myPyMac_GetOptFSRefPtr")
FSCatalogInfo = OpaqueType("FSCatalogInfo", "FSCatalogInfo")
FSCatalogInfo_ptr = OpaqueType("FSCatalogInfo", "FSCatalogInfo")
# To be done:
#CatPositionRec
#FSCatalogInfo
#FSForkInfo
#FSIterator
#FSVolumeInfo
#FSSpecArrayPtr
includestuff = includestuff + """
#include <Carbon/Carbon.h>
#ifdef USE_TOOLBOX_OBJECT_GLUE
extern int _PyMac_GetFSSpec(PyObject *v, FSSpec *spec);
extern int _PyMac_GetFSRef(PyObject *v, FSRef *fsr);
extern PyObject *_PyMac_BuildFSSpec(FSSpec *spec);
extern PyObject *_PyMac_BuildFSRef(FSRef *spec);
#define PyMac_GetFSSpec _PyMac_GetFSSpec
#define PyMac_GetFSRef _PyMac_GetFSRef
#define PyMac_BuildFSSpec _PyMac_BuildFSSpec
#define PyMac_BuildFSRef _PyMac_BuildFSRef
#else
extern int PyMac_GetFSSpec(PyObject *v, FSSpec *spec);
extern int PyMac_GetFSRef(PyObject *v, FSRef *fsr);
extern PyObject *PyMac_BuildFSSpec(FSSpec *spec);
extern PyObject *PyMac_BuildFSRef(FSRef *spec);
#endif
/* Forward declarations */
static PyObject *FInfo_New(FInfo *itself);
static PyObject *FSRef_New(FSRef *itself);
static PyObject *FSSpec_New(FSSpec *itself);
static PyObject *Alias_New(AliasHandle itself);
static int FInfo_Convert(PyObject *v, FInfo *p_itself);
#define FSRef_Convert PyMac_GetFSRef
#define FSSpec_Convert PyMac_GetFSSpec
static int Alias_Convert(PyObject *v, AliasHandle *p_itself);
/*
** UTCDateTime records
*/
static int
UTCDateTime_Convert(PyObject *v, UTCDateTime *ptr)
{
return PyArg_Parse(v, "(HlH)", &ptr->highSeconds, &ptr->lowSeconds, &ptr->fraction);
}
static PyObject *
UTCDateTime_New(UTCDateTime *ptr)
{
return Py_BuildValue("(HlH)", ptr->highSeconds, ptr->lowSeconds, ptr->fraction);
}
/*
** Optional fsspec and fsref pointers. None will pass NULL
*/
static int
myPyMac_GetOptFSSpecPtr(PyObject *v, FSSpec **spec)
{
if (v == Py_None) {
*spec = NULL;
return 1;
}
return PyMac_GetFSSpec(v, *spec);
}
static int
myPyMac_GetOptFSRefPtr(PyObject *v, FSRef **ref)
{
if (v == Py_None) {
*ref = NULL;
return 1;
}
return PyMac_GetFSRef(v, *ref);
}
/*
** Parse/generate objsect
*/
static PyObject *
PyMac_BuildHFSUniStr255(HFSUniStr255 *itself)
{
return Py_BuildValue("u#", itself->unicode, itself->length);
}
/*
** Get pathname for a given FSSpec
*/
static OSErr
_PyMac_GetFullPathname(FSSpec *fss, char *path, int len)
{
FSRef fsr;
OSErr err;
*path = '\0';
err = FSpMakeFSRef(fss, &fsr);
if (err == fnfErr) {
/* FSSpecs can point to non-existing files, fsrefs can't. */
FSSpec fss2;
int tocopy;
err = FSMakeFSSpec(fss->vRefNum, fss->parID, "", &fss2);
if (err)
return err;
err = FSpMakeFSRef(&fss2, &fsr);
if (err)
return err;
err = (OSErr)FSRefMakePath(&fsr, path, len-1);
if (err)
return err;
/* This part is not 100% safe: we append the filename part, but
** I'm not sure that we don't run afoul of the various 8bit
** encodings here. Will have to look this up at some point...
*/
strcat(path, "/");
tocopy = fss->name[0];
if ((strlen(path) + tocopy) >= len)
tocopy = len - strlen(path) - 1;
if (tocopy > 0)
strncat(path, fss->name+1, tocopy);
}
else {
if (err)
return err;
err = (OSErr)FSRefMakePath(&fsr, path, len);
if (err)
return err;
}
return 0;
}
"""
finalstuff = finalstuff + """
int
PyMac_GetFSSpec(PyObject *v, FSSpec *spec)
{
Str255 path;
short refnum;
long parid;
OSErr err;
FSRef fsr;
if (FSSpec_Check(v)) {
*spec = ((FSSpecObject *)v)->ob_itself;
return 1;
}
if (PyArg_Parse(v, "(hlO&)",
&refnum, &parid, PyMac_GetStr255, &path)) {
err = FSMakeFSSpec(refnum, parid, path, spec);
if ( err && err != fnfErr ) {
PyMac_Error(err);
return 0;
}
return 1;
}
PyErr_Clear();
/* Otherwise we try to go via an FSRef. On OSX we go all the way,
** on OS9 we accept only a real FSRef object
*/
if ( PyMac_GetFSRef(v, &fsr) ) {
err = FSGetCatalogInfo(&fsr, kFSCatInfoNone, NULL, NULL, spec, NULL);
if (err != noErr) {
PyMac_Error(err);
return 0;
}
return 1;
}
return 0;
}
int
PyMac_GetFSRef(PyObject *v, FSRef *fsr)
{
OSStatus err;
FSSpec fss;
if (FSRef_Check(v)) {
*fsr = ((FSRefObject *)v)->ob_itself;
return 1;
}
/* On OSX we now try a pathname */
if ( PyString_Check(v) || PyUnicode_Check(v)) {
char *path = NULL;
if (!PyArg_Parse(v, "et", Py_FileSystemDefaultEncoding, &path))
return 0;
if ( (err=FSPathMakeRef(path, fsr, NULL)) )
PyMac_Error(err);
PyMem_Free(path);
return !err;
}
/* XXXX Should try unicode here too */
/* Otherwise we try to go via an FSSpec */
if (FSSpec_Check(v)) {
fss = ((FSSpecObject *)v)->ob_itself;
if ((err=FSpMakeFSRef(&fss, fsr)) == 0)
return 1;
PyMac_Error(err);
return 0;
}
PyErr_SetString(PyExc_TypeError, "FSRef, FSSpec or pathname required");
return 0;
}
extern PyObject *
PyMac_BuildFSSpec(FSSpec *spec)
{
return FSSpec_New(spec);
}
extern PyObject *
PyMac_BuildFSRef(FSRef *spec)
{
return FSRef_New(spec);
}
"""
initstuff = initstuff + """
PyMac_INIT_TOOLBOX_OBJECT_NEW(FSSpec *, PyMac_BuildFSSpec);
PyMac_INIT_TOOLBOX_OBJECT_NEW(FSRef *, PyMac_BuildFSRef);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(FSSpec, PyMac_GetFSSpec);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(FSRef, PyMac_GetFSRef);
"""
execfile(string.lower(MODPREFIX) + 'typetest.py')
# Our object types:
class FSCatalogInfoDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("nodeFlags",
"return Py_BuildValue(\"H\", self->ob_itself.nodeFlags);",
"return PyArg_Parse(v, \"H\", &self->ob_itself.nodeFlags)-1;",
None
),
("volume",
"return Py_BuildValue(\"h\", self->ob_itself.volume);",
"return PyArg_Parse(v, \"h\", &self->ob_itself.volume)-1;",
None
),
("parentDirID",
"return Py_BuildValue(\"l\", self->ob_itself.parentDirID);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.parentDirID)-1;",
None
),
("nodeID",
"return Py_BuildValue(\"l\", self->ob_itself.nodeID);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.nodeID)-1;",
None
),
("createDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.createDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.createDate)-1;",
None
),
("contentModDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.contentModDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.contentModDate)-1;",
None
),
("attributeModDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.attributeModDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.attributeModDate)-1;",
None
),
("accessDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.accessDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.accessDate)-1;",
None
),
("backupDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.backupDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.backupDate)-1;",
None
),
("permissions",
"return Py_BuildValue(\"(llll)\", self->ob_itself.permissions[0], self->ob_itself.permissions[1], self->ob_itself.permissions[2], self->ob_itself.permissions[3]);",
"return PyArg_Parse(v, \"(llll)\", &self->ob_itself.permissions[0], &self->ob_itself.permissions[1], &self->ob_itself.permissions[2], &self->ob_itself.permissions[3])-1;",
None
),
# XXXX FinderInfo TBD
# XXXX FinderXInfo TBD
("valence",
"return Py_BuildValue(\"l\", self->ob_itself.valence);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.valence)-1;",
None
),
("dataLogicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.dataLogicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.dataLogicalSize)-1;",
None
),
("dataPhysicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.dataPhysicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.dataPhysicalSize)-1;",
None
),
("rsrcLogicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.rsrcLogicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.rsrcLogicalSize)-1;",
None
),
("rsrcPhysicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.rsrcPhysicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.rsrcPhysicalSize)-1;",
None
),
("sharingFlags",
"return Py_BuildValue(\"l\", self->ob_itself.sharingFlags);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.sharingFlags)-1;",
None
),
("userPrivileges",
"return Py_BuildValue(\"b\", self->ob_itself.userPrivileges);",
"return PyArg_Parse(v, \"b\", &self->ob_itself.userPrivileges)-1;",
None
),
]
# The same info, but in a different form
INITFORMAT = "HhllO&O&O&O&O&llllllb"
INITARGS = """&((FSCatalogInfoObject *)_self)->ob_itself.nodeFlags,
&((FSCatalogInfoObject *)_self)->ob_itself.volume,
&((FSCatalogInfoObject *)_self)->ob_itself.parentDirID,
&((FSCatalogInfoObject *)_self)->ob_itself.nodeID,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.createDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.contentModDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.attributeModDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.accessDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.backupDate,
&((FSCatalogInfoObject *)_self)->ob_itself.valence,
&((FSCatalogInfoObject *)_self)->ob_itself.dataLogicalSize,
&((FSCatalogInfoObject *)_self)->ob_itself.dataPhysicalSize,
&((FSCatalogInfoObject *)_self)->ob_itself.rsrcLogicalSize,
&((FSCatalogInfoObject *)_self)->ob_itself.rsrcPhysicalSize,
&((FSCatalogInfoObject *)_self)->ob_itself.sharingFlags,
&((FSCatalogInfoObject *)_self)->ob_itself.userPrivileges"""
INITNAMES = """
"nodeFlags",
"volume",
"parentDirID",
"nodeID",
"createDate",
"contentModDate",
"atributeModDate",
"accessDate",
"backupDate",
"valence",
"dataLogicalSize",
"dataPhysicalSize",
"rsrcLogicalSize",
"rsrcPhysicalSize",
"sharingFlags",
"userPrivileges"
"""
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSSpecs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) { Py_INCREF(Py_None); return Py_None; }")
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("static char *kw[] = {%s, 0};", self.INITNAMES)
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"|%s\", kw, %s))",
self.INITFORMAT, self.INITARGS)
OutLbrace()
Output("return -1;")
OutRbrace()
Output("return 0;")
class FInfoDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("Type",
"return Py_BuildValue(\"O&\", PyMac_BuildOSType, self->ob_itself.fdType);",
"return PyArg_Parse(v, \"O&\", PyMac_GetOSType, &self->ob_itself.fdType)-1;",
"4-char file type"
),
("Creator",
"return Py_BuildValue(\"O&\", PyMac_BuildOSType, self->ob_itself.fdCreator);",
"return PyArg_Parse(v, \"O&\", PyMac_GetOSType, &self->ob_itself.fdCreator)-1;",
"4-char file creator"
),
("Flags",
"return Py_BuildValue(\"H\", self->ob_itself.fdFlags);",
"return PyArg_Parse(v, \"H\", &self->ob_itself.fdFlags)-1;",
"Finder flag bits"
),
("Location",
"return Py_BuildValue(\"O&\", PyMac_BuildPoint, self->ob_itself.fdLocation);",
"return PyArg_Parse(v, \"O&\", PyMac_GetPoint, &self->ob_itself.fdLocation)-1;",
"(x, y) location of the file's icon in its parent finder window"
),
("Fldr",
"return Py_BuildValue(\"h\", self->ob_itself.fdFldr);",
"return PyArg_Parse(v, \"h\", &self->ob_itself.fdFldr)-1;",
"Original folder, for 'put away'"
),
]
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSSpecs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("%s *itself = NULL;", self.itselftype)
Output("static char *kw[] = {\"itself\", 0};")
Output()
Output("if (PyArg_ParseTupleAndKeywords(_args, _kwds, \"|O&\", kw, FInfo_Convert, &itself))")
OutLbrace()
Output("if (itself) memcpy(&((%s *)_self)->ob_itself, itself, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return 0;")
OutRbrace()
Output("return -1;")
class FSSpecDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("data",
"return PyString_FromStringAndSize((char *)&self->ob_itself, sizeof(self->ob_itself));",
None,
"Raw data of the FSSpec object"
)
]
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSSpecs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
# We do Convert ourselves (with PyMac_GetFSxxx)
def outputConvert(self):
pass
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("PyObject *v = NULL;")
Output("char *rawdata = NULL;")
Output("int rawdatalen = 0;")
Output("static char *kw[] = {\"itself\", \"rawdata\", 0};")
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"|Os#\", kw, &v, &rawdata, &rawdatalen))")
Output("return -1;")
Output("if (v && rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"Only one of itself or rawdata may be specified\");")
Output("return -1;")
OutRbrace()
Output("if (!v && !rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"One of itself or rawdata must be specified\");")
Output("return -1;")
OutRbrace()
Output("if (rawdata)")
OutLbrace()
Output("if (rawdatalen != sizeof(%s))", self.itselftype)
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"%s rawdata incorrect size\");",
self.itselftype)
Output("return -1;")
OutRbrace()
Output("memcpy(&((%s *)_self)->ob_itself, rawdata, rawdatalen);", self.objecttype)
Output("return 0;")
OutRbrace()
Output("if (PyMac_GetFSSpec(v, &((%s *)_self)->ob_itself)) return 0;", self.objecttype)
Output("return -1;")
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[512];")
Output("""PyOS_snprintf(buf, sizeof(buf), \"%%s((%%d, %%ld, '%%.*s'))\",
self->ob_type->tp_name,
self->ob_itself.vRefNum,
self->ob_itself.parID,
self->ob_itself.name[0], self->ob_itself.name+1);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class FSRefDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("data",
"return PyString_FromStringAndSize((char *)&self->ob_itself, sizeof(self->ob_itself));",
None,
"Raw data of the FSRef object"
)
]
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSRefs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
# We do Convert ourselves (with PyMac_GetFSxxx)
def outputConvert(self):
pass
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("PyObject *v = NULL;")
Output("char *rawdata = NULL;")
Output("int rawdatalen = 0;")
Output("static char *kw[] = {\"itself\", \"rawdata\", 0};")
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"|Os#\", kw, &v, &rawdata, &rawdatalen))")
Output("return -1;")
Output("if (v && rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"Only one of itself or rawdata may be specified\");")
Output("return -1;")
OutRbrace()
Output("if (!v && !rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"One of itself or rawdata must be specified\");")
Output("return -1;")
OutRbrace()
Output("if (rawdata)")
OutLbrace()
Output("if (rawdatalen != sizeof(%s))", self.itselftype)
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"%s rawdata incorrect size\");",
self.itselftype)
Output("return -1;")
OutRbrace()
Output("memcpy(&((%s *)_self)->ob_itself, rawdata, rawdatalen);", self.objecttype)
Output("return 0;")
OutRbrace()
Output("if (PyMac_GetFSRef(v, &((%s *)_self)->ob_itself)) return 0;", self.objecttype)
Output("return -1;")
class AliasDefinition(PEP253Mixin, ObjectDefinition):
# XXXX Should inherit from resource?
getsetlist = [
("data",
"""int size;
PyObject *rv;
size = GetHandleSize((Handle)self->ob_itself);
HLock((Handle)self->ob_itself);
rv = PyString_FromStringAndSize(*(Handle)self->ob_itself, size);
HUnlock((Handle)self->ob_itself);
return rv;
""",
None,
"Raw data of the alias object"
)
]
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def outputStructMembers(self):
ObjectDefinition.outputStructMembers(self)
Output("void (*ob_freeit)(%s ptr);", self.itselftype)
def outputInitStructMembers(self):
ObjectDefinition.outputInitStructMembers(self)
Output("it->ob_freeit = NULL;")
def outputCleanupStructMembers(self):
Output("if (self->ob_freeit && self->ob_itself)")
OutLbrace()
Output("self->ob_freeit(self->ob_itself);")
OutRbrace()
Output("self->ob_itself = NULL;")
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("((%s *)self)->ob_itself = NULL;", self.objecttype)
Output("return self;")
def output_tp_initBody(self):
Output("%s itself = NULL;", self.itselftype)
Output("char *rawdata = NULL;")
Output("int rawdatalen = 0;")
Output("Handle h;")
Output("static char *kw[] = {\"itself\", \"rawdata\", 0};")
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"|O&s#\", kw, %s_Convert, &itself, &rawdata, &rawdatalen))",
self.prefix)
Output("return -1;")
Output("if (itself && rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"Only one of itself or rawdata may be specified\");")
Output("return -1;")
OutRbrace()
Output("if (!itself && !rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"One of itself or rawdata must be specified\");")
Output("return -1;")
OutRbrace()
Output("if (rawdata)")
OutLbrace()
Output("if ((h = NewHandle(rawdatalen)) == NULL)")
OutLbrace()
Output("PyErr_NoMemory();")
Output("return -1;")
OutRbrace()
Output("HLock(h);")
Output("memcpy((char *)*h, rawdata, rawdatalen);")
Output("HUnlock(h);")
Output("((%s *)_self)->ob_itself = (%s)h;", self.objecttype, self.itselftype)
Output("return 0;")
OutRbrace()
Output("((%s *)_self)->ob_itself = itself;", self.objecttype)
Output("return 0;")
# Alias methods come in two flavors: those with the alias as arg1 and
# those with the alias as arg 2.
class Arg2MethodGenerator(OSErrMethodGenerator):
"""Similar to MethodGenerator, but has self as second argument"""
def parseArgumentList(self, args):
args0, arg1, argsrest = args[:1], args[1], args[2:]
t0, n0, m0 = arg1
args = args0 + argsrest
if m0 != InMode:
raise ValueError, "method's 'self' must be 'InMode'"
self.itself = Variable(t0, "_self->ob_itself", SelfMode)
FunctionGenerator.parseArgumentList(self, args)
self.argumentList.insert(2, self.itself)
# From here on it's basically all boiler plate...
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff,
longname=LONGMODNAME)
fscataloginfoobject = FSCatalogInfoDefinition('FSCatalogInfo', 'FSCatalogInfo', 'FSCatalogInfo')
finfoobject = FInfoDefinition('FInfo', 'FInfo', 'FInfo')
aliasobject = AliasDefinition('Alias', 'Alias', 'AliasHandle')
fsspecobject = FSSpecDefinition('FSSpec', 'FSSpec', 'FSSpec')
fsrefobject = FSRefDefinition('FSRef', 'FSRef', 'FSRef')
module.addobject(fscataloginfoobject)
module.addobject(finfoobject)
module.addobject(aliasobject)
module.addobject(fsspecobject)
module.addobject(fsrefobject)
# Create the generator classes used to populate the lists
Function = OSErrFunctionGenerator
Method = OSErrMethodGenerator
# Create and populate the lists
functions = []
alias_methods = []
fsref_methods = []
fsspec_methods = []
execfile(INPUTFILE)
# Manual generators:
FSRefMakePath_body = """
OSStatus _err;
#define MAXPATHNAME 1024
UInt8 path[MAXPATHNAME];
UInt32 maxPathSize = MAXPATHNAME;
if (!PyArg_ParseTuple(_args, ""))
return NULL;
_err = FSRefMakePath(&_self->ob_itself,
path,
maxPathSize);
if (_err != noErr) return PyMac_Error(_err);
_res = Py_BuildValue("s", path);
return _res;
"""
f = ManualGenerator("FSRefMakePath", FSRefMakePath_body)
f.docstring = lambda: "() -> string"
fsref_methods.append(f)
FSRef_as_pathname_body = """
if (!PyArg_ParseTuple(_args, ""))
return NULL;
_res = FSRef_FSRefMakePath(_self, _args);
return _res;
"""
f = ManualGenerator("as_pathname", FSRef_as_pathname_body)
f.docstring = lambda: "() -> string"
fsref_methods.append(f)
FSSpec_as_pathname_body = """
char strbuf[1024];
OSErr err;
if (!PyArg_ParseTuple(_args, ""))
return NULL;
err = _PyMac_GetFullPathname(&_self->ob_itself, strbuf, sizeof(strbuf));
if ( err ) {
PyMac_Error(err);
return NULL;
}
_res = PyString_FromString(strbuf);
return _res;
"""
f = ManualGenerator("as_pathname", FSSpec_as_pathname_body)
f.docstring = lambda: "() -> string"
fsspec_methods.append(f)
FSSpec_as_tuple_body = """
if (!PyArg_ParseTuple(_args, ""))
return NULL;
_res = Py_BuildValue("(iis#)", _self->ob_itself.vRefNum, _self->ob_itself.parID,
&_self->ob_itself.name[1], _self->ob_itself.name[0]);
return _res;
"""
f = ManualGenerator("as_tuple", FSSpec_as_tuple_body)
f.docstring = lambda: "() -> (vRefNum, dirID, name)"
fsspec_methods.append(f)
pathname_body = """
PyObject *obj;
if (!PyArg_ParseTuple(_args, "O", &obj))
return NULL;
if (PyString_Check(obj)) {
Py_INCREF(obj);
return obj;
}
if (PyUnicode_Check(obj))
return PyUnicode_AsEncodedString(obj, "utf8", "strict");
_res = PyObject_CallMethod(obj, "as_pathname", NULL);
return _res;
"""
f = ManualGenerator("pathname", pathname_body)
f.docstring = lambda: "(str|unicode|FSSpec|FSref) -> pathname"
functions.append(f)
# add the populated lists to the generator groups
# (in a different wordl the scan program would generate this)
for f in functions: module.add(f)
for f in alias_methods: aliasobject.add(f)
for f in fsspec_methods: fsspecobject.add(f)
for f in fsref_methods: fsrefobject.add(f)
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the API endpoint."""
import random
import re
import StringIO
import boto
import boto.connection
from boto.ec2 import regioninfo
from boto import exception as boto_exc
# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
if hasattr(boto.connection, 'HTTPResponse'):
httplib = boto.connection
else:
import httplib
import fixtures
import webob
from nova.api import auth
from nova.api import ec2
from nova.api.ec2 import ec2utils
from nova import block_device
from nova import context
from nova import exception
from nova.openstack.common import versionutils
from nova import test
from nova.tests.unit import matchers
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our WSGI app, we then wait for the response and turn it back into
the HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
def request(self, method, path, data, headers):
req = webob.Request.blank(path)
req.method = method
req.body = data
req.headers = headers
req.headers['Accept'] = 'text/html'
req.host = self.host
# Call the WSGI app, get the HTTP response
resp = str(req.get_response(self.app))
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
self.sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(self.sock)
# NOTE(vish): boto is accessing private variables for some reason
self._HTTPConnection__response = self.http_response
self.http_response.begin()
def getresponse(self):
return self.http_response
def getresponsebody(self):
return self.sock.response_string
def close(self):
"""Required for compatibility with boto/tornado."""
pass
class XmlConversionTestCase(test.NoDBTestCase):
"""Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertIsNone(conv('None'))
self.assertEqual(conv('True'), True)
self.assertEqual(conv('TRUE'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('False'), False)
self.assertEqual(conv('FALSE'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('0'), 0)
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('3.14'), 3.14)
self.assertEqual(conv('-57.12'), -57.12)
self.assertEqual(conv('0x57'), 0x57)
self.assertEqual(conv('-0x57'), -0x57)
self.assertEqual(conv('-'), '-')
self.assertEqual(conv('-0'), 0)
self.assertEqual(conv('0.0'), 0.0)
self.assertEqual(conv('1e-8'), 0.0)
self.assertEqual(conv('-1e-8'), 0.0)
self.assertEqual(conv('0xDD8G'), '0xDD8G')
self.assertEqual(conv('0XDD8G'), '0XDD8G')
self.assertEqual(conv('-stringy'), '-stringy')
self.assertEqual(conv('stringy'), 'stringy')
self.assertEqual(conv('add'), 'add')
self.assertEqual(conv('remove'), 'remove')
self.assertEqual(conv(''), '')
class Ec2utilsTestCase(test.NoDBTestCase):
def test_ec2_id_to_id(self):
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
def test_bad_ec2_id(self):
self.assertRaises(exception.InvalidEc2Id,
ec2utils.ec2_id_to_id,
'badone')
def test_id_to_ec2_id(self):
self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
def test_dict_from_dotted_str(self):
in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
expected_dict = {
'block_device_mapping': {
'1': {'device_name': '/dev/sda1',
'ebs': {'snapshot_id': 'snap-0000001c',
'volume_size': 80,
'delete_on_termination': False}},
'2': {'device_name': '/dev/sdc',
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
self.assertThat(out_dict, matchers.DictMatches(expected_dict))
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
properties0 = {'mappings': mappings}
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
root_device_name = block_device.properties_root_device_name(
properties0)
self.assertEqual(root_device_name, '/dev/sda1')
root_device_name = block_device.properties_root_device_name(
properties1)
self.assertEqual(root_device_name, '/dev/sdb')
def test_regex_from_ec2_regex(self):
def _test_re(ec2_regex, expected, literal, match=True):
regex = ec2utils.regex_from_ec2_regex(ec2_regex)
self.assertEqual(regex, expected)
if match:
self.assertIsNotNone(re.match(regex, literal))
else:
self.assertIsNone(re.match(regex, literal))
# wildcards
_test_re('foo', '\Afoo\Z(?s)', 'foo')
_test_re('foo', '\Afoo\Z(?s)', 'baz', match=False)
_test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar')
_test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar', match=False)
_test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'foo QUUX bar')
# backslashes and escaped wildcards
_test_re('foo\\', '\Afoo\\\\\Z(?s)', 'foo\\')
_test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'zork QUUX bar', match=False)
_test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo?bar')
_test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo bar', match=False)
_test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo*bar')
_test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo bar', match=False)
# analog to the example given in the EC2 API docs
ec2_regex = '\*nova\?\\end'
expected = r'\A[*]nova[?]\\end\Z(?s)'
literal = r'*nova?\end'
_test_re(ec2_regex, expected, literal)
def test_mapping_prepend_dev(self):
mappings = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
expected_result = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': '/dev/sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
self.assertThat(block_device.mappings_prepend_dev(mappings),
matchers.DictListMatches(expected_result))
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API."""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
# NOTE(vish): skipping the Authorizer
roles = ['sysadmin', 'netadmin']
ctxt = context.RequestContext('fake', 'fake', roles=roles)
self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
), 'nova.api.ec2.cloud.CloudController'))))
self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection."""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=8773,
path='/services/Cloud')
if api_version:
self.ec2.APIVersion = api_version
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
if versionutils.is_compatible('2.14', boto.Version, same_major=False):
self.ec2.new_http_connection(host or self.host, 8773,
is_secure).AndReturn(self.http)
elif versionutils.is_compatible('2', boto.Version, same_major=False):
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
is_secure).AndReturn(self.http)
else:
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
def test_xmlns_version_matches_request_version(self):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
# Any request should be fine
self.ec2.get_all_instances()
self.assertIn(self.ec2.APIVersion, self.http.getresponsebody(),
'The version in the xmlns of the response does '
'not match the API version given in the request.')
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
instances call to the API works properly.
"""
self.expect_http()
self.mox.ReplayAll()
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
# Attempt to terminate an invalid instance.
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.terminate_instances, "i-00000005")
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly.
"""
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.expect_http()
self.mox.ReplayAll()
self.ec2.create_key_pair(keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
self.assertEqual(len(results), 1)
def test_create_duplicate_key_pair(self):
"""Test that, after successfully generating a keypair,
requesting a second keypair with the same name fails sanely.
"""
self.expect_http()
self.mox.ReplayAll()
self.ec2.create_key_pair('test')
try:
self.ec2.create_key_pair('test')
except boto_exc.EC2ResponseError as e:
if e.code == 'InvalidKeyPair.Duplicate':
pass
else:
self.assertEqual('InvalidKeyPair.Duplicate', e.code)
else:
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
# Test that we can retrieve security groups.
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_create_delete_security_group(self):
# Test that we can create a security group.
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.ec2.create_security_group(security_group_name, 'test group')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 2)
self.assertIn(security_group_name, [group.name for group in rv])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
def test_group_name_valid_chars_security_group(self):
"""Test that we sanely handle invalid security group names.
EC2 API Spec states we should only accept alphanumeric characters,
spaces, dashes, and underscores. Amazon implementation
accepts more characters - so, [:print:] is ok.
"""
bad_strict_ec2 = "aa \t\x01\x02\x7f"
bad_amazon_ec2 = "aa #^% -=99"
test_raise = [
(True, bad_amazon_ec2, "test desc"),
(True, "test name", bad_amazon_ec2),
(False, bad_strict_ec2, "test desc"),
]
for t in test_raise:
self.expect_http()
self.mox.ReplayAll()
self.flags(ec2_strict_validation=t[0])
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
t[1],
t[2])
test_accept = [
(False, bad_amazon_ec2, "test desc"),
(False, "test name", bad_amazon_ec2),
]
for t in test_accept:
self.expect_http()
self.mox.ReplayAll()
self.flags(ec2_strict_validation=t[0])
self.ec2.create_security_group(t[1], t[2])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(t[1])
def test_group_name_valid_length_security_group(self):
"""Test that we sanely handle invalid security group names.
API Spec states that the length should not exceed 255 char.
"""
self.expect_http()
self.mox.ReplayAll()
# Test block group_name > 255 chars
security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
for x in range(random.randint(256, 266)))
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
security_group_name,
'test group')
def test_authorize_revoke_security_group_cidr(self):
"""Test that we can add and remove CIDR based rules
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '0.0.0.0/0')
group.authorize('icmp', -1, -1, '0.0.0.0/0')
group.authorize('udp', 80, 81, '0.0.0.0/0')
group.authorize('tcp', 1, 65535, '0.0.0.0/0')
group.authorize('udp', 1, 65535, '0.0.0.0/0')
group.authorize('icmp', 1, 0, '0.0.0.0/0')
group.authorize('icmp', 0, 1, '0.0.0.0/0')
group.authorize('icmp', 0, 0, '0.0.0.0/0')
def _assert(message, *args):
try:
group.authorize(*args)
except boto_exc.EC2ResponseError as e:
self.assertEqual(e.status, 400, 'Expected status to be 400')
self.assertIn(message, e.error_message)
else:
raise self.failureException('EC2ResponseError not raised')
# Invalid CIDR address
_assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
# Missing ports
_assert('Not enough parameters', 'tcp', '0.0.0.0/0')
# from port cannot be greater than to port
_assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
# For tcp, negative values are not allowed
_assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
# For tcp, valid port range 1-65535
_assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
# Invalid Cidr for ICMP type
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
# Invalid protocol
_assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
# Invalid port
_assert('Invalid input received: To and From ports must be integers',
'tcp', " ", "81", '0.0.0.0/0')
# Invalid icmp port
_assert('Invalid input received: '
'Type and Code must be integers for ICMP protocol type',
'icmp', " ", "81", '0.0.0.0/0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
# Invalid Cidr ports
_assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEqual(len(group.rules), 8)
self.assertEqual(int(group.rules[0].from_port), 80)
self.assertEqual(int(group.rules[0].to_port), 81)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]), '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '0.0.0.0/0')
group.revoke('icmp', -1, -1, '0.0.0.0/0')
group.revoke('udp', 80, 81, '0.0.0.0/0')
group.revoke('tcp', 1, 65535, '0.0.0.0/0')
group.revoke('udp', 1, 65535, '0.0.0.0/0')
group.revoke('icmp', 1, 0, '0.0.0.0/0')
group.revoke('icmp', 0, 1, '0.0.0.0/0')
group.revoke('icmp', 0, 0, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_cidr_v6(self):
"""Test that we can add and remove CIDR based rules
to a security group for IPv6
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEqual(len(group.rules), 1)
self.assertEqual(int(group.rules[0].from_port), 80)
self.assertEqual(int(group.rules[0].to_port), 81)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]), '::/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_foreign_group(self):
"""Test that we can grant and revoke another security group access
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
rand_string = 'sdiuisudfsdcnpaqwertasd'
security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
other_security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
other_group = self.ec2.create_security_group(other_security_group_name,
'some other group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:
self.assertEqual(len(group.rules), 3)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]),
'%s-%s' % (other_security_group_name, 'fake'))
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
for group in rv:
if group.name == security_group_name:
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.ec2.delete_security_group(other_security_group_name)
|
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities used by SPIRAL agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
import sonnet as snt
import tensorflow as tf
import tensorflow_hub as hub
nest = tf.contrib.framework.nest
AgentOutput = collections.namedtuple(
"AgentOutput", ["action", "policy_logits", "baseline"])
AgentState = collections.namedtuple(
"AgentState", ["lstm_state", "prev_action"])
class ResidualStack(snt.AbstractModule):
"""A stack of ResNet V2 blocks."""
def __init__(self,
num_hiddens,
num_residual_layers,
num_residual_hiddens,
filter_size=3,
initializers=None,
data_format="NHWC",
activation=tf.nn.relu,
name="residual_stack"):
"""Instantiate a ResidualStack."""
super(ResidualStack, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._filter_size = filter_size
self._initializers = initializers
self._data_format = data_format
self._activation = activation
def _build(self, h):
for i in range(self._num_residual_layers):
h_i = self._activation(h)
h_i = snt.Conv2D(
output_channels=self._num_residual_hiddens,
kernel_shape=(self._filter_size, self._filter_size),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="res_nxn_%d" % i)(h_i)
h_i = self._activation(h_i)
h_i = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(1, 1),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="res_1x1_%d" % i)(h_i)
h += h_i
return self._activation(h)
class ConvEncoder(snt.AbstractModule):
"""Convolutional encoder."""
def __init__(self,
factor_h,
factor_w,
num_hiddens,
num_residual_layers,
num_residual_hiddens,
initializers=None,
data_format="NHWC",
name="conv_encoder"):
super(ConvEncoder, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._initializers = initializers
self._data_format = data_format
# Note that implicitly the network uses conv strides of 2.
# input height / output height == factor_h.
self._num_steps_h = factor_h.bit_length() - 1
# input width / output width == factor_w.
self._num_steps_w = factor_w.bit_length() - 1
num_steps = max(self._num_steps_h, self._num_steps_w)
if factor_h & (factor_h - 1) != 0:
raise ValueError("`factor_h` must be a power of 2. It is %d" % factor_h)
if factor_w & (factor_w - 1) != 0:
raise ValueError("`factor_w` must be a power of 2. It is %d" % factor_w)
self._num_steps = num_steps
def _build(self, x):
h = x
for i in range(self._num_steps):
stride = (2 if i < self._num_steps_h else 1,
2 if i < self._num_steps_w else 1)
h = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(4, 4),
stride=stride,
initializers=self._initializers,
data_format=self._data_format,
name="strided_{}".format(i))(h)
h = tf.nn.relu(h)
h = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(3, 3),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="pre_stack")(h)
h = ResidualStack( # pylint: disable=not-callable
self._num_hiddens,
self._num_residual_layers,
self._num_residual_hiddens,
initializers=self._initializers,
data_format=self._data_format,
name="residual_stack")(h)
return h
class ConvDecoder(snt.AbstractModule):
"""Convolutional decoder."""
def __init__(self,
factor_h,
factor_w,
num_hiddens,
num_residual_layers,
num_residual_hiddens,
num_output_channels=3,
initializers=None,
data_format="NHWC",
name="conv_decoder"):
super(ConvDecoder, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._num_output_channels = num_output_channels
self._initializers = initializers
self._data_format = data_format
# input height / output height == factor_h.
self._num_steps_h = factor_h.bit_length() - 1
# input width / output width == factor_w.
self._num_steps_w = factor_w.bit_length() - 1
num_steps = max(self._num_steps_h, self._num_steps_w)
if factor_h & (factor_h - 1) != 0:
raise ValueError("`factor_h` must be a power of 2. It is %d" % factor_h)
if factor_w & (factor_w - 1) != 0:
raise ValueError("`factor_w` must be a power of 2. It is %d" % factor_w)
self._num_steps = num_steps
def _build(self, x):
h = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(3, 3),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="pre_stack")(x)
h = ResidualStack( # pylint: disable=not-callable
self._num_hiddens,
self._num_residual_layers,
self._num_residual_hiddens,
initializers=self._initializers,
data_format=self._data_format,
name="residual_stack")(h)
for i in range(self._num_steps):
# Does reverse striding -- puts stride-2s after stride-1s.
stride = (2 if (self._num_steps - 1 - i) < self._num_steps_h else 1,
2 if (self._num_steps - 1 - i) < self._num_steps_w else 1)
h = snt.Conv2DTranspose(
output_channels=self._num_hiddens,
output_shape=None,
kernel_shape=(4, 4),
stride=stride,
initializers=self._initializers,
data_format=self._data_format,
name="strided_transpose_{}".format(i))(h)
h = tf.nn.relu(h)
x_recon = snt.Conv2D(
output_channels=self._num_output_channels,
kernel_shape=(3, 3),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="final")(h)
return x_recon
def export_hub_module(agent_ctor,
observation_spec,
noise_dim,
module_path,
checkpoint_path,
name_transform_fn=None):
"""Exports the agent as a TF-Hub module.
Args:
agent_ctor: A function returning a Sonnet module for the agent.
observation_spec: A nested dict of `Array` specs describing an observation
coming from the environment.
noise_dim: The dimensionality of the noise vector used by the agent.
module_path: A path where to export the module to.
checkpoint_path: A path where to load the weights for the module.
name_transform_fn: An optional function to provide mapping between
variable name in the module and the variable name in the checkpoint.
"""
def module_fn():
"""Builds a graph for the TF-Hub module."""
agent = agent_ctor()
# Get the initial agent state tensor.
initial_agent_state = agent.initial_state(1)
# Create a bunch of placeholders for the step function inputs.
step_type_ph = tf.placeholder(dtype=tf.int32, shape=(1,))
observation_ph = nest.map_structure(
lambda s: tf.placeholder(dtype=tf.dtypes.as_dtype(s.dtype), # pylint: disable=g-long-lambda
shape=(1,) + s.shape),
observation_spec)
observation_ph["noise_sample"] = tf.placeholder(
dtype=tf.float32, shape=(1, noise_dim))
agent_state_ph = nest.map_structure(
lambda t: tf.placeholder(dtype=t.dtype, shape=t.shape),
initial_agent_state)
# Get the step function outputs.
agent_output, agent_state = agent.step(
step_type_ph, observation_ph, agent_state_ph)
# Now we need to add the module signatures. TF Hub modules require inputs
# to be flat dictionaries. Since the agent's methods accept multiple
# argument some of which being nested dictionaries we gotta work
# some magic in order flatten the structure of the placeholders.
initial_state_output_dict = dict(
state=initial_agent_state)
initial_state_output_dict = dict(
nest.flatten_with_joined_string_paths(initial_state_output_dict))
step_inputs_dict = dict(
step_type=step_type_ph,
observation=observation_ph,
state=agent_state_ph)
step_inputs_dict = dict(
nest.flatten_with_joined_string_paths(step_inputs_dict))
step_outputs_dict = dict(
action=agent_output.action,
state=agent_state)
step_outputs_dict = dict(
nest.flatten_with_joined_string_paths(step_outputs_dict))
hub.add_signature(
"initial_state", outputs=initial_state_output_dict)
hub.add_signature(
"step", inputs=step_inputs_dict, outputs=step_outputs_dict)
spec = hub.create_module_spec(module_fn, drop_collections=["sonnet"])
spec.export(module_path,
checkpoint_path=checkpoint_path,
name_transform_fn=name_transform_fn)
def get_module_wrappers(module_path):
"""Returns python functions implementing the agent.
Args:
module_path: A path which should be used to load the agent from.
Returns:
A tuple of two functions:
* A function that returns the initial state of the agent.
* A function that performs a step.
"""
g = tf.Graph()
session = tf.Session(graph=g)
with g.as_default():
agent = hub.Module(module_path)
def to_python_fn(session, signature):
"""Converts a symbolic function into a plain python functions."""
inputs_ph = {
k: tf.placeholder(v.dtype, v.get_shape().as_list(), k)
for k, v in six.iteritems(agent.get_input_info_dict(signature))}
outputs = agent(inputs=inputs_ph, signature=signature, as_dict=True)
def fn(**kwargs):
feed_dict = {inputs_ph[k]: kwargs[k] for k in six.iterkeys(inputs_ph)}
return session.run(outputs, feed_dict=feed_dict)
return fn
raw_initial_state_fn = to_python_fn(session, "initial_state")
raw_step_fn = to_python_fn(session, "step")
init_op = tf.global_variables_initializer()
g.finalize()
session.run(init_op)
def wrapped_step_fn(step_type, observation, prev_state):
"""A convenience wrapper for a raw step function."""
step_type, observation = nest.map_structure(
lambda t: np.expand_dims(t, 0),
(step_type, observation))
step_inputs_dict = dict(
step_type=step_type,
observation=observation)
step_inputs_dict = dict(
nest.flatten_with_joined_string_paths(step_inputs_dict))
step_inputs_dict.update(prev_state)
output = raw_step_fn(**step_inputs_dict)
action = {k.replace("action/", ""): v
for k, v in six.iteritems(output)
if k.startswith("action/")}
state = {k: v for k, v in six.iteritems(output) if k.startswith("state/")}
action = nest.map_structure(lambda t: np.squeeze(t, 0), action)
return action, state
return raw_initial_state_fn, wrapped_step_fn
|
|
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test for solve.py."""
import functools
import jax
import jax.numpy as jnp
from jax import lax
import numpy as np
import pytest
import scipy as sp
from distla_core.linalg.solve import solve
from distla_core.linalg.utils import testutils
from distla_core.utils import pops
from distla_core.utils import vops
AXIS_NAME = pops.AXIS_NAME
NCOL = pops.NCOLS
NROW = pops.NROWS
NPROC = pops.NPROCS
dims = [4, 16]
seeds = [1, 2]
precisions = [lax.Precision.HIGHEST, ]
dtypes = [np.float32, ]
##############################################################################
# HELPERS
##############################################################################
def _dephase_r(R):
""" Maps the R factor from an arbitrary QR decomposition to the unique one
with non-negative diagonal entries.
"""
phases_data = np.sign(np.diagonal(R))
phases = np.ones((max(R.shape)))
phases[:phases_data.size] = phases_data
R = phases.conj()[:, None] * R
return R
def _arnoldi_assert_one(A, V, H, j, tol):
lhs = np.dot(A, V[:, :j])
rhs = np.dot(V[:, :j + 1], H[:j + 1, :j])
testutils.assert_allclose(lhs, rhs, atol=tol)
def _arnoldi_assert_two(A, V, H, j, tol):
lhs = H[:j, :j]
rhs = np.dot(V[:, :j].conj().T, A)
rhs = np.dot(rhs, V[:, :j])
testutils.assert_allclose(lhs, rhs, atol=tol)
def _gmres_update_np(X, V, R, beta):
Y = sp.linalg.solve_triangular(R[:-1, :], beta[:-1])
dX = np.dot(V[:, :-1], Y).reshape(X.shape)
return X + dX
##############################################################################
# GIVENS QR
##############################################################################
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("dtype", dtypes)
def test_givens(seed, dtype):
""" Tests that when the Givens factors produced by
`solve._compute_givens_rotation` are applied to a two-vector
`v = (a, b)` via `solve._apply_ith_rotation`, the result is
`h = (||v||, 0)` up to a sign.
"""
np.random.seed(seed)
v = np.random.randn(2).astype(dtype)
v = jnp.array(v)
cs, sn = solve._compute_givens_rotation(v[0], v[1])
cs = jnp.full(1, cs)
sn = jnp.full(1, sn)
r = np.sqrt(v[0] ** 2 + v[1] ** 2)
h, _, _ = solve._apply_ith_rotation(0, (v, cs, sn))
expected = np.array([r, 0.])
eps = jnp.finfo(dtype).eps * r
testutils.assert_allclose(np.abs(h), expected, atol=eps)
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("dtype", dtypes)
def test_hessenberg_qr(seed, dim, dtype):
""" Tests that solve._update_hessenberg_qr correctly performs the
QR factorization of an upper Hessenberg matrix.
"""
np.random.seed(seed)
H = np.random.randn(dim + 1, dim).astype(dtype)
H = jnp.triu(H, k=-1)
H_r = jnp.zeros_like(H)
cs = jnp.zeros(dim, dtype=dtype)
sn = jnp.zeros(dim, dtype=dtype)
tol = jnp.finfo(dtype).eps * np.linalg.cond(H)
for j in range(dim):
H_r, cs, sn = solve._update_hessenberg_qr(H_r, H, cs, sn, j)
this_H_r = np.array(H_r)[:j+2, :j+1]
this_H = np.array(H)[:j+2, :j+1]
# H_r is upper triangular.
testutils.assert_allclose(this_H_r, np.triu(this_H_r), atol=tol)
# Agreement with NumPy up to a phase.
_, expected_R = np.linalg.qr(this_H)
expected_R = _dephase_r(expected_R)
result_R = _dephase_r(this_H_r[:-1, :])
testutils.assert_allclose(expected_R, result_R, tol)
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("dtype", dtypes)
def test_cgs(dim, seed, dtype):
j = dim // 2
np.random.seed(seed)
Vs = np.random.randn(dim, dim).astype(dtype)
Vs, _ = np.linalg.qr(Vs)
Vs[:, j:] = 0.
V_v = vops.distribute(Vs)
new_v = np.random.randn(dim, 1).astype(dtype)
new_v = vops.distribute(new_v)
@functools.partial(pops.pmap, out_axes=(0, None, None))
def cgs_f(new_v, V_v):
return solve.cgs(new_v, V_v)
orth_v, _, _ = cgs_f(new_v, V_v)
orth_v = vops.undistribute(orth_v)
Vs[:, j] = orth_v.ravel()
testutils.test_unitarity(
Vs[:, :j + 1], eps_coef=np.linalg.cond(Vs[:, :j + 1]) * 10)
# Arnoldi iteration
def test_arnoldi_cond():
""" Checks that the Arnoldi loop condition obeys the correct logic.
"""
maxiter = 2
tol = 1.0
err_1 = tol * 2
err_2 = tol // 2
err_3 = tol
errs = [err_1, err_2, err_3]
err_gt_tol = [True, False, False]
j_1 = maxiter * 2
j_2 = maxiter // 2
j_3 = maxiter
js = [j_1, j_2, j_3]
j_lt_maxiter = [False, True, False]
for err, err_bool in zip(errs, err_gt_tol):
for j, j_bool in zip(js, j_lt_maxiter):
err = jnp.full(1, err)
j = jnp.full(1, j)
expected = jnp.logical_and(err_bool, j_bool)
args = (0, 0, 0, 0, 0, 0, err, j)
result = solve._arnoldi_cond(maxiter, tol, args)
assert expected == result
@pytest.mark.parametrize("seed", [1, 2])
@pytest.mark.parametrize("dim", [4, ])
@pytest.mark.parametrize("N_k", [3, ])
@pytest.mark.parametrize("j", [1, ])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("precision", precisions)
def test_update_arnoldi(dim, N_k, seed, j, dtype, precision):
""" Tests that solve._update_arnoldi puts vectors in the correct places.
"""
np.random.seed(seed)
V = np.random.randn(dim, N_k).astype(dtype)
V[:, j + 1:] = 0.
H_s = np.random.randn(N_k + 1, N_k).astype(dtype)
H_s_data = np.random.randn(j + 1, j).astype(dtype)
H_s_data = np.triu(H_s_data, k=-1)
H_s = np.zeros((N_k + 1, N_k), dtype=dtype)
H_s[:j + 1, :j] = H_s_data
orth = np.random.randn(dim, 1).astype(dtype)
overlaps = np.random.randn(N_k + 1, 1).astype(dtype)
overlaps[j + 1:, :] = 0.
norm_v = np.abs(np.random.randn(1, 1).astype(dtype))
V_expected = np.copy(V)
V_expected[:, j + 1] = orth[:, 0]
H_expected = np.copy(H_s)
H_expected[:, j] = overlaps[:, 0]
H_expected[j + 1, j] = norm_v[0, 0]
V_v = vops.distribute(V)
orth_v = vops.distribute(orth)
@functools.partial(
pops.pmap, in_axes=(0, None, None, 0, None, None), out_axes=(0, None))
def test_f(V_v, H_s, j, orth_v, overlaps, norm_v):
return solve._update_arnoldi(V_v, H_s, j, orth_v, overlaps, norm_v)
V_result, H_result = test_f(V_v, H_s, j, orth_v, overlaps, norm_v)
V_result = vops.undistribute(V_result)
tol = testutils.eps(precision, dtype=dtype)
testutils.assert_allclose(V_expected, V_result, atol=tol * np.linalg.norm(V))
testutils.assert_allclose(
H_expected, H_result, atol=tol * np.linalg.norm(H_expected))
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("seed", [0, 1])
@pytest.mark.parametrize("check_residual", [True, ])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("precision", precisions)
def test_arnoldi_qr(dim, seed, check_residual, dtype, precision):
""" Initializes GMRES and runs the iterated Arnoldi QR to
completion. At each iteration, tests that:
1. The Arnoldi matrices satisfy the relations given in
test_arnoldi_step.
2. The computed R factor is indeed that of the Arnoldi
upper Hessenberg matrix.
3. The error vector beta has been appropriately rotated.
If check_residual is True, also performs the GMRES linear solve,
and makes sure the updated error agrees with the actual residual norm.
"""
dtype = dtype
np.random.seed(seed)
A = np.random.randn(dim, dim).astype(dtype)
cond = np.linalg.cond(A)
expected = np.random.randn(dim, 1).astype(dtype)
B = np.dot(A, expected)
b_norm = np.linalg.norm(B)
tol = testutils.eps(precision, dtype=dtype)
tol *= cond * np.linalg.norm(B)
A_d = pops.distribute(A)
B_d = vops.distribute(B)
@functools.partial(
pops.pmap, out_axes=(0, 0, None, None, None, None, None, None, None))
def init_f(A, B):
X_v, args, _, _, _ = solve._gmres_init(A, B, dim, None, precision, None)
V_v, H_s, R_s, beta_s, cos_s, sin_s, err, j = args
return X_v, V_v, H_s, R_s, beta_s, cos_s, sin_s, err, j
X_v, V_v, H_s, R_s, beta_s, cos_s, sin_s, err, j = init_f(A_d, B_d)
beta_0 = jnp.array(beta_s)
itertol = testutils.eps(precision, A.dtype) * b_norm
@functools.partial(
pops.pmap,
in_axes=(0, 0, None, None, None, None, None, None, None),
out_axes=(0, None, None, None, None, None, None, None))
def test_f(A, V, H, R, beta, cos, sin, err, j):
args = (V, H, R, beta, cos, sin, err, jnp.full(1, j, dtype=jnp.int32))
V, H, R, beta, cos, sin, err, j = solve._arnoldi_qr(
A, args, precision=precision)
return V, H, R, beta, cos, sin, err, j
@functools.partial(
pops.pmap,
in_axes=(0, None, None, None, None, None, None, None),
out_axes=None)
def test_cond_f(V, H, R, beta, cos, sin, err, j):
args = (V, H, R, beta, cos, sin, err, j)
return solve._arnoldi_cond(dim, itertol, args)
if check_residual:
@functools.partial(
pops.pmap,
in_axes=(0, 0, None, None, None),
static_broadcasted_argnums=(4,))
def _update_f(X, V, R, beta, arnoldi_maxiter):
return solve._gmres_update_solution(X, V, R, beta, None, arnoldi_maxiter)
while test_cond_f(V_v, H_s, R_s, beta_s, cos_s, sin_s, err, j):
out = test_f(A_d, V_v, H_s, R_s, beta_s, cos_s, sin_s, err, j)
V_v, H_s, R_s, beta_s, cos_s, sin_s, err, j = out
V_test = vops.undistribute(V_v)
_arnoldi_assert_one(A, V_test, H_s, j[0], tol)
_arnoldi_assert_two(A, V_test, H_s, j[0], tol)
j = int(j)
_, R_expected = np.linalg.qr(H_s[:j + 1, :j], mode="complete")
R_expected = _dephase_r(R_expected)
R_result = _dephase_r(R_s)
testutils.assert_allclose(R_expected, R_result[:j + 1, :j], atol=tol)
beta_expected = jnp.array(beta_0)
for i in range(j):
beta_expected, _, _ = solve._apply_ith_rotation(
i, (beta_expected, cos_s, sin_s))
testutils.assert_allclose(beta_expected, beta_s, atol=tol)
if check_residual:
this_X_v = _update_f(X_v, V_v, R_s, beta_s, j)
this_X = vops.undistribute(this_X_v)
residual = np.linalg.norm(B - np.dot(A, this_X))
testutils.assert_allclose(np.array([residual]), err, atol=tol)
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("seed", [0, 1])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("precision", precisions)
def test_gmres_update_solution(dim, seed, dtype, precision):
np.random.seed(seed)
X = np.zeros((dim, 1), dtype=dtype)
X_v = vops.distribute(X)
V = np.random.rand(dim, dim + 1).astype(dtype)
V_o, _ = np.linalg.qr(V[:, :-1])
V[:, :-1] = V_o
V_v = vops.distribute(V)
Y = np.random.randn(dim,).astype(dtype)
R = np.triu(np.random.randn(dim + 1, dim).astype(dtype))
beta = np.zeros(dim + 1, dtype=dtype)
beta[:-1] = np.dot(R[:-1, :], Y)
R_s = jnp.array(R)
beta_s = jnp.array(beta)
@functools.partial(
pops.pmap,
in_axes=(0, 0, None, None),
)
def update_f(X, V, R, beta):
return solve._gmres_update_solution(X, V, R, beta, None, dim)
expected = _gmres_update_np(X, V, R, beta)
result = update_f(X_v, V_v, R_s, beta_s)
result = vops.undistribute(result)
cond = np.linalg.cond(R[:-1, :])
tol = testutils.eps(precision, dtype=dtype) * np.linalg.norm(expected) * cond
testutils.assert_allclose(expected, result, atol=tol)
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("precision", precisions)
def test_arnoldi_step(dim, dtype, precision):
""" Iterates through solve._arnoldi_step and ensures the relations
`A @ V_v[:, :j] = V_v[:, :j+1] @ H_s[:j+1, :j]`
along with
`H_s[:j+1, :j] = V_v[:, :j]^H @ A @ V_v[:, :j]`
are satisfied.
"""
np.random.seed(1)
A = np.random.randn(dim, dim).astype(dtype)
cond = np.linalg.cond(A)
A_d = pops.distribute(A)
x0 = np.random.randn(dim).astype(dtype)
x0_norm = np.linalg.norm(x0)
x0 /= x0_norm
V_v = np.zeros((dim, dim + 1), dtype=A.dtype)
V_v[:, 0] = x0
V_v = vops.distribute(V_v)
H_s = jnp.eye(dim + 1, dim, dtype=A.dtype)
tol = testutils.eps(precision, dtype=dtype) * x0_norm * cond
@functools.partial(
pops.pmap, in_axes=(None, 0, None, 0, None), out_axes=(0, None))
def _arnoldi_f(j, V_v, H_s, A_d, A_inv):
return solve._arnoldi_step(j, V_v, H_s, A_d, A_inv, precision)
for j in range(dim):
V_v, H_s = _arnoldi_f(j, V_v, H_s, A_d, None)
V = vops.undistribute(V_v)
_arnoldi_assert_one(A, V, H_s, j + 1, tol)
_arnoldi_assert_two(A, V, H_s, j + 1, tol)
# Linear solve
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("dtype", dtypes)
def test_gmres_bare(dim, dtype):
""" Tests that solve.gmres produces a correct solution when run with
arnold_maxiter = dim.
"""
np.random.seed(1)
A = np.random.randn(dim, dim).astype(dtype)
cond = np.linalg.cond(A)
expected = np.random.randn(dim, 1).astype(dtype)
B = np.dot(A, expected)
tol = testutils.eps(lax.Precision.HIGHEST, dtype=dtype)
tol *= cond * np.linalg.norm(B)
A_d = pops.distribute(A)
B_d = vops.distribute(B)
@functools.partial(pops.pmap, out_axes=(0, None, None))
def solve_f(A, B):
return solve.gmres(A, B, arnoldi_maxiter=dim)
result, _, _ = solve_f(A_d, B_d)
result = vops.undistribute(result)
testutils.assert_allclose(expected, result, atol=tol)
@pytest.mark.parametrize("dim", dims)
@pytest.mark.parametrize("dtype", dtypes)
def test_gmres_solve(dim, dtype):
""" Tests that solve.solve produces a correct solution.
"""
A = np.random.randn(dim, dim).astype(dtype)
cond = np.linalg.cond(A)
expected = np.random.randn(dim, 1).astype(dtype)
B = np.dot(A, expected)
tol = testutils.eps(lax.Precision.HIGHEST, dtype=dtype)
tol *= cond * np.linalg.norm(B)
A_d = pops.distribute(A)
B_d = vops.distribute(B)
@functools.partial(pops.pmap, out_axes=(0, None, None, 0))
def solve_f(A, B):
return solve.solve(A, B)
result, _, _, _ = solve_f(A_d, B_d)
result = vops.undistribute(result)
testutils.assert_allclose(expected, result, atol=tol)
|
|
'''
SequenceBase.py
Copyright (c) 2003 - 2008 James Urquhart(j_urquhart@btinternet.com)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import bpy
import Common_Gui
import DtsGlobals
# ***************************************************************************************************
## @brief Base Class For sequence control sub-panel classes.
#
# This class implements functionality that is common to all sequence sub panels.
#
class SeqControlsClassBase:
## @brief Initialize the controls and values that are common to all sequence control panels.
# @note Child classes should call this method explicitly at the beginning of their own __init__ methods.
# @param tabContainer The GUI tab container control into which everything should be placed.
def __init__(self, tabContainer):
self.startEvent = 5
# initialize GUI controls
self.guiSeqList = Common_Gui.ListContainer("guiSeqList", "sequence.list", self.handleListEvent,
self.guiSeqListResize)
self.guiSeqListTitle = Common_Gui.SimpleText("guiSeqListTitle", "All Sequences:", None,
self.guiSeqListTitleResize)
self.guiSeqOptsContainer = Common_Gui.BasicContainer("guiSeqOptsContainer", "guiSeqOptsContainer", None,
self.guiSeqOptsContainerResize)
self.guiSeqSelectedBoxLabel = Common_Gui.BoxSelectionLabel("guiSeqSelectedBoxLabel",
"Selected Sequence:\n None Selected", None,
self.guiSeqSelectedBoxLabelResize)
# set initial states
self.guiSeqOptsContainer.enabled = False
self.guiSeqOptsContainer.fade_mode = 5
self.guiSeqOptsContainer.borderColor = None
self.guiSeqList.fade_mode = 0
# add controls to containers
tabContainer.addControl(self.guiSeqList)
tabContainer.addControl(self.guiSeqListTitle)
tabContainer.addControl(self.guiSeqOptsContainer)
self.guiSeqOptsContainer.addControl(self.guiSeqSelectedBoxLabel)
## Need to set this explicitly in child classes
# @note valid values are: "All", "Action", "IFL", "Vis" and eventually "TexUV" and "Morph"
self.seqFilter = "All"
## @brief Gets an event ID # for native Blender controls that need one. We don't actually
# use these, but most native controls must have one.
# @note Most child classes should be able to inherit this method and use it as-is
def getNextEvent(self):
retVal = self.startEvent
self.startEvent += 1
return retVal
## @brief Gets the name of the sequence currently selected in the sequence list
# @note Most child classes should be able to inherit this method and use it as-is
def getSelectedSeqNameAndPrefs(self):
Prefs = DtsGlobals.Prefs
if self.guiSeqList.itemIndex == -1: return None, None
try:
seqName = self.guiSeqList.controls[self.guiSeqList.itemIndex].controls[0].label
seqPrefs = Prefs['Sequences'][seqName]
except:
return None, None
return seqName, seqPrefs
## @brief Selects the desired sequence in the list
# @note If the sequence is not found, nothing happens.
# @note Most child classes should be able to inherit this method and use it as-is
# @param selectThis string name of sequence to select.
def selectSequence(self, selectThis):
for i in range(0, len(self.guiSeqList.controls)):
seqName = self.guiSeqList.controls[i].controls[0].label
if seqName == selectThis:
self.guiSeqList.selectItem(i)
self.guiSeqList.scrollToSelectedItem()
if self.guiSeqList.callback: self.guiSeqList.callback(self.guiSeqList)
return
## @brief Cleans up Blender GUI objects before the interpreter exits;
# we must destroy any GUI objects that are referenced in a non-global scope
# explicitly before interpreter shutdown to avoid the dreaded
# "error totblock" message when exiting Blender.
# @note The builtin __del__ method is not guaranteed to be called for objects
# that still exist when the interpreter exits.
# @note Child classes should explicitly call this method at the end of their own cleanup method.
def cleanup(self):
del self.guiSeqList
del self.guiSeqListTitle
del self.guiSeqOptsContainer
del self.guiSeqSelectedBoxLabel
## @brief Refreshes all controls on the panel w/ fresh data from blender and the prefs.
# @note Most child classes should be able to inherit this method and use it as-is
def refreshAll(self):
# refresh action data and repopulate the sequence list
self.refreshSequenceList()
## @brief Refreshes the items in the sequence list, preserving list selection if possible.
# @note Most child classes should be able to inherit this method and use it as-is
def refreshSequenceList(self):
Prefs = DtsGlobals.Prefs
Prefs.refreshSequencePrefs()
# store last sequence selection
seqName = None
seqPrefs = None
if self.guiSeqList.itemIndex != -1:
seqName, seqPrefs = self.getSelectedSeqNameAndPrefs()
else:
# no valid selection, so select the first item in the list
self.guiSeqList.selectItem(0)
self.guiSeqList.scrollToSelectedItem()
if self.guiSeqList.callback: self.guiSeqList.callback(self.guiSeqList)
seqName, seqPrefs = self.getSelectedSeqNameAndPrefs()
# populateSequenceList automatically clears the sequence list first.
self.populateSequenceList()
# restore last sequence selection
for itemIndex in range(0, len(self.guiSeqList.controls)):
if self.guiSeqList.controls[itemIndex].controls[0].label == seqName:
self.guiSeqList.selectItem(itemIndex)
self.guiSeqList.scrollToSelectedItem()
self.refreshSequenceOptions(seqName, seqPrefs)
if self.guiSeqList.callback: self.guiSeqList.callback(self.guiSeqList)
return
self.guiSeqList.selectItem(0)
self.guiSeqList.scrollToSelectedItem()
if self.guiSeqList.callback: self.guiSeqList.callback(self.guiSeqList)
## @brief Refreshes sequence specific option controls on the right side of the sequences panel.
# @note This method should be called whenever the sequence list is refreshed, or when sequence
# list selection changes.
# @note Must be overridden by child classes.
# @param seqName The name of the currently selected sequence.
# @param seqPrefs The preferences key of the currently selected sequence.
def refreshSequenceOptions(self, seqName, seqPrefs):
print("Parent refreshSequenceOptions called. You probably forgot to implement it in your new child class :-)")
pass
## @brief Clears sequence specific option controls on the right side of the sequences panel.
# @note This method should be called when no sequence list item is currently selected.
# @note Must be overridden by child classes.
def clearSequenceOptions(self):
print("Parent clearSequenceOptions called. You probably forgot to implement it in your new child class :-)")
pass
## @brief Updates GUI states when the sequence list item selection is changed.
# @note This method should only be called by the sequence list GUI control
# event handler callback mechanism.
# @note Most child classes should be able to inherit this method and use it as-is
# @param control The invoking GUI Control object (should be the sequence list control)
def handleListEvent(self, control):
if control.itemIndex != -1:
seqName, seqPrefs = self.getSelectedSeqNameAndPrefs()
self.refreshSequenceOptions(seqName, seqPrefs)
self.guiSeqSelectedBoxLabel.text = "Selected Sequence:\n '%s'" % seqName
self.guiSeqOptsContainer.enabled = True
else:
self.clearSequenceOptions()
self.guiSeqOptsContainer.enabled = False
## @brief Updates relevant preferences when a sequence list item button state is changed.
# @note This method should only be called by the list item container's event handing mechanism
# @note Most child classes should be able to inherit this method and use it as-is
# @param control The invoking GUI Control object (should be a sequence list item container control)
def handleListItemEvent(self, control):
Prefs = DtsGlobals.Prefs
ShowDSQButton = len(self.guiSeqList.controls[0].controls) == 4
if ShowDSQButton:
evtOffset = 3
else:
evtOffset = 2
# Determine sequence name
if control.evt == 40:
calcIdx = 0
else:
calcIdx = (control.evt - 40) / evtOffset
# Must use calcIdx here instead of self.getSelectedSeqNameAndPrefs()
# because the user can click on a list button even when the list item
# isn't selected.
seqName = self.guiSeqList.controls[calcIdx].controls[0].label
seqPrefs = Prefs['Sequences'][seqName]
realItem = control.evt - 40 - (calcIdx * evtOffset)
# no validation needed on these, so it's OK to set the prefs directly.
if ShowDSQButton:
if realItem == 0:
seqPrefs['NoExport'] = not control.state
elif realItem == 1:
seqPrefs['Cyclic'] = control.state
elif realItem == 2:
seqPrefs['Dsq'] = control.state
else:
if realItem == 0:
seqPrefs['NoExport'] = not control.state
elif realItem == 1:
seqPrefs['Cyclic'] = control.state
def guiSeqButtonItemResize(self, control, newwidth, newheight):
Prefs = DtsGlobals.Prefs
listWidth = self.guiSeqList.width - self.guiSeqList.barWidth
buttonWidth = 50
numButtons = len(self.guiSeqList.controls[0].controls) - 1
buttonPos = []
for i in range(1, numButtons + 1): buttonPos.append(((listWidth - 5) - (buttonWidth * i + 1)))
if control.name == "guiExport":
pos = buttonPos[2]
elif control.name == "guiCyclic":
pos = buttonPos[1]
elif control.name == "guiDSQ":
pos = buttonPos[0]
control.x, control.y = pos, 5
control.width, control.height = 50, 15
## @brief Place holder resize callback
# @note Child classes should call override this method explicitly
# @param control The invoking GUI control object
# @param newwidth The new width of the GUI control in pixels.
# @param newheight The new height of the GUI control in pixels.
def guiSeqListResize(self, control, newwidth, newheight):
print("Parent guiSeqListResize called. You probably forgot to implement it in your new child class :-)")
pass
## @brief Place holder resize callback
# @note Child classes should call override this method explicitly
# @param control The invoking GUI control object
# @param newwidth The new width of the GUI control in pixels.
# @param newheight The new height of the GUI control in pixels.
def guiSeqListTitleResize(self, control, newwidth, newheight):
control.x, control.y, control.height, control.width = 10, 310, 20, 82
## @brief Place holder resize callback
# @note Child classes should call override this method explicitly
# @param control The invoking GUI control object
# @param newwidth The new width of the GUI control in pixels.
# @param newheight The new height of the GUI control in pixels.
def guiSeqOptsContainerResize(self, control, newwidth, newheight):
control.x, control.y, control.height, control.width = 241, 0, 334, 249
def guiSeqSelectedBoxLabelResize(self, control, newwidth, newheight):
control.x, control.y, control.height, control.width = 5, newheight - 35, 33, 117
## @brief Creates a sequence list item and it's associated GUI controls.
# @note If a child class needs to display a "DSQ" button, it should call
# the parent version explicitly with the third parameter set to True from
# it's own createSequenceListItem method.
# @note Called by populateSequenceList, and methods of derived classes, as needed.
# @note Most child classes can inherit this method and just use it as-is.
# @param seqName The name of the sequence for which we're creating the list item.
# @param ShowDSQButton If true, a DSQ button is displayed in the list item. If
# false, no DSQ button is displayed.
def createSequenceListItem(self, seqName, ShowButtons=False):
Prefs = DtsGlobals.Prefs
startEvent = self.curSeqListEvent
listWidth = self.guiSeqList.width - self.guiSeqList.barWidth
buttonWidth = 50
numButtons = 0
if ShowButtons: numButtons = 3
# Note on positions:
# It quicker to assign these here, as there is no realistic chance of scaling being required.
guiContainer = Common_Gui.BasicContainer("", None, None)
guiName = Common_Gui.SimpleText("", seqName, None, None)
if ShowButtons:
guiExport = Common_Gui.ToggleButton("guiExport", "Export", "Export Sequence", startEvent,
self.handleListItemEvent, self.guiSeqButtonItemResize)
guiCyclic = Common_Gui.ToggleButton("guiCyclic", "Cyclic", "Export Sequence as Cyclic", startEvent + 1,
self.handleListItemEvent, self.guiSeqButtonItemResize)
guiDSQ = Common_Gui.ToggleButton("guiDSQ", "Dsq", "Export Sequence as DSQ", startEvent + 2,
self.handleListItemEvent, self.guiSeqButtonItemResize)
guiContainer.fade_mode = 0 # flat color
guiContainer.addControl(guiName)
guiName.x, guiName.y = 5, 5
if numButtons == 3:
# Add everything
guiContainer.addControl(guiExport)
guiContainer.addControl(guiCyclic)
guiContainer.addControl(guiDSQ)
guiExport.state = not Prefs['Sequences'][seqName]['NoExport']
guiCyclic.state = Prefs['Sequences'][seqName]['Cyclic']
guiDSQ.state = Prefs['Sequences'][seqName]['Dsq']
# increment the current event counter
self.curSeqListEvent += 3
else:
self.curSeqListEvent += 1
return guiContainer
## @brief Populates the sequence list using current pref values.
def populateSequenceList(self):
self.clearSequenceList()
Prefs = DtsGlobals.Prefs
# loop through all actions in the preferences
keys = list(Prefs['Sequences'].keys())
keys.sort(lambda x, y: cmp(x.lower(), y.lower()))
for seqName in keys:
seqPrefs = Prefs['Sequences'][seqName]
if self.seqFilter == "All":
self.guiSeqList.addControl(self.createSequenceListItem(seqName))
elif seqPrefs[self.seqFilter]['Enabled']:
self.guiSeqList.addControl(self.createSequenceListItem(seqName))
## @brief Clears the sequence list.
def clearSequenceList(self):
for i in range(0, len(self.guiSeqList.controls)):
del self.guiSeqList.controls[i].controls[:]
del self.guiSeqList.controls[:]
self.curSeqListEvent = 40
self.guiSeqList.itemIndex = -1
self.guiSeqList.scrollPosition = 0
if self.guiSeqList.callback: self.guiSeqList.callback(self.guiSeqList) # Bit of a hack, but works
|
|
# orm/strategies.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from .. import exc as sa_exc, inspect
from .. import util, log, event
from ..sql import util as sql_util, visitors
from . import (
attributes, interfaces, exc as orm_exc, loading,
unitofwork, util as orm_util
)
from .state import InstanceState
from .util import _none_set
from . import properties
from .interfaces import (
LoaderStrategy, StrategizedProperty
)
from .session import _state_session
import itertools
def _register_attribute(strategy, mapper, useobject,
compare_function=None,
typecallable=None,
uselist=False,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
prop = strategy.parent_property
attribute_ext = list(util.to_list(prop.extension, default=[]))
listen_hooks = []
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(desc,
prop.key, fn, **opts)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
backref = kw.pop('backref', None)
if backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(desc,
backref,
uselist)
)
for m in mapper.self_and_descendants:
if prop is m._props.get(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
extension=attribute_ext,
trackparent=useobject and (prop.single_parent
or prop.direction is interfaces.ONETOMANY),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent the a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
def __init__(self, parent):
super(UninstrumentedColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
def setup_query(self, context, entity, path, loadopt, adapter,
column_collection=None, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
return None, None, None
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
def __init__(self, parent):
super(ColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, 'composite_class')
def setup_query(self, context, entity, path, loadopt,
adapter, column_collection, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = self.parent_property.active_history or \
self.columns[0].primary_key or \
mapper.version_id_col in set(self.columns)
_register_attribute(self, mapper, useobject=False,
compare_function=coltype.compare_values,
active_history=active_history
)
def create_row_processor(self, context, path,
loadopt, mapper, row, adapter):
key = self.key
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
if col is not None and col in row:
def fetch_col(state, dict_, row):
dict_[key] = row[col]
return fetch_col, None, None
else:
def expire_for_non_present_col(state, dict_, row):
state._expire_attribute_pre_commit(dict_, key)
return expire_for_non_present_col, None, None
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
def __init__(self, parent):
super(DeferredColumnLoader, self).__init__(parent)
if hasattr(self.parent_property, 'composite_class'):
raise NotImplementedError("Deferred loading for composite "
"types not implemented yet")
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
col = self.columns[0]
if adapter:
col = adapter.columns[col]
key = self.key
if col in row:
return self.parent_property._get_strategy_by_cls(ColumnLoader).\
create_row_processor(
context, path, loadopt, mapper, row, adapter)
elif not self.is_class_level:
set_deferred_for_local_state = InstanceState._row_processor(
mapper.class_manager,
LoadDeferredColumns(key), key)
return set_deferred_for_local_state, None, None
else:
def reset_col_for_deferred(state, dict_, row):
# reset state on the key so that deferred callables
# fire off on next access.
state._reset(dict_, key)
return reset_col_for_deferred, None, None
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(self, mapper, useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
expire_missing=False
)
def setup_query(self, context, entity, path, loadopt, adapter,
only_load_props=None, **kwargs):
if (
loadopt and self.group and
loadopt.local_opts.get('undefer_group', False) == self.group
) or (only_load_props and self.key in only_load_props):
self.parent_property._get_strategy_by_cls(ColumnLoader).\
setup_query(context, entity,
path, loadopt, adapter, **kwargs)
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key for p in
localparent.iterate_properties
if isinstance(p, StrategizedProperty) and
isinstance(p.strategy, DeferredColumnLoader) and
p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
query = session.query(localparent)
if loading.load_on_ident(query, state.key,
only_load_props=group, refresh_state=state) is None:
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
strategy = prop._strategies[DeferredColumnLoader]
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
def __init__(self, parent):
super(AbstractRelationshipLoader, self).__init__(parent)
self.mapper = self.parent_property.mapper
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(self, mapper,
useobject=True,
uselist=self.parent_property.uselist,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
def invoke_no_load(state, dict_, row):
state._initialize(self.key)
return invoke_no_load, None, None
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
class LazyLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
def __init__(self, parent):
super(LazyLoader, self).__init__(parent)
join_condition = self.parent_property._join_condition
self._lazywhere, \
self._bind_to_col, \
self._equated_columns = join_condition.create_lazy_clause()
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns = join_condition.create_lazy_clause(
reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
self.use_get = not self.uselist and \
self.mapper._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info("%s will use query.get() to "
"optimize instance loads" % self)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history or
self.parent_property.direction is not interfaces.MANYTOONE or
not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(self,
mapper,
useobject=True,
callable_=self._load_for_state,
uselist=self.parent_property.uselist,
backref=self.parent_property.back_populates,
typecallable=self.parent_property.collection_class,
active_history=active_history
)
def lazy_clause(self, state, reverse_direction=False,
alias_secondary=False,
adapt_source=None,
passive=None):
if state is None:
return self._lazy_none_clause(
reverse_direction,
adapt_source=adapt_source)
if not reverse_direction:
criterion, bind_to_col, rev = \
self._lazywhere, \
self._bind_to_col, \
self._equated_columns
else:
criterion, bind_to_col, rev = \
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns
if reverse_direction:
mapper = self.parent_property.mapper
else:
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
# use the "committed state" only if we're in a flush
# for this state.
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = \
lambda: mapper._get_committed_state_attr_by_column(
state, dict_,
bind_to_col[bindparam._identifying_key])
else:
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = \
lambda: mapper._get_state_attr_by_column(
state, dict_,
bind_to_col[bindparam._identifying_key])
if self.parent_property.secondary is not None and alias_secondary:
criterion = sql_util.ClauseAdapter(
self.parent_property.secondary.alias()).\
traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {'bindparam': visit_bindparam})
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col, rev = \
self._lazywhere, \
self._bind_to_col,\
self._equated_columns
else:
criterion, bind_to_col, rev = \
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns
criterion = sql_util.adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _load_for_state(self, state, passive):
if not state.key and \
(
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
ident_key = None
if (
(not passive & attributes.SQL_OK and not self.use_get)
or
(not passive & attributes.NON_PERSISTENT_OK and pending)
):
return attributes.PASSIVE_NO_RESULT
session = _state_session(state)
if not session:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
ident = self._get_ident_for_use_get(
session,
state,
passive
)
if attributes.PASSIVE_NO_RESULT in ident:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in ident:
return attributes.NEVER_SET
if _none_set.issuperset(ident):
return None
ident_key = self.mapper.identity_key_from_primary_key(ident)
instance = loading.get_from_identity(session, ident_key, passive)
if instance is not None:
return instance
elif not passive & attributes.SQL_OK or \
not passive & attributes.RELATED_OBJECT_OK:
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(session, state, ident_key, passive)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(
state,
dict_,
self._equated_columns[pk],
passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(self, strategy_options, session, state, ident_key, passive):
q = session.query(self.mapper)._adapt_all_clauses()
if self.parent_property.secondary is not None:
q = q.select_from(self.mapper, self.parent_property.secondary)
q = q._with_invoke_all_eagers(False)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q = q.autoflush(False)
if state.load_path:
q = q._with_current_path(state.load_path[self.parent_property])
if state.load_options:
q = q._conditional_options(*state.load_options)
if self.use_get:
return loading.load_on_ident(q, ident_key)
if self.parent_property.order_by:
q = q.order_by(*util.to_list(self.parent_property.order_by))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, LazyLoader):
q = q.options(strategy_options.Load(rev.parent).lazyload(rev.key))
lazy_clause = self.lazy_clause(state, passive=passive)
if pending:
bind_values = sql_util.bind_values(lazy_clause)
if None in bind_values:
return None
q = q.filter(lazy_clause)
result = q.all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = InstanceState._row_processor(
mapper.class_manager,
LoadLazyAttribute(key), key)
return set_lazy_callable, None, None
else:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
return reset_for_lazy_callable, None, None
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[LazyLoader]
return strategy._load_for_state(state, passive)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(self, context, entity,
path, loadopt, adapter, column_collection=None,
parentmapper=None, **kwargs):
pass
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
return None, None, load_immediate
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(AbstractRelationshipLoader):
def __init__(self, parent):
super(SubqueryLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(self, context, entity,
path, loadopt, adapter,
column_collection=None,
parentmapper=None, **kwargs):
if not context.query._enable_eagerloads:
return
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_info = path.get(context.attributes,
"path_with_polymorphic", None)
if with_poly_info is not None:
effective_entity = with_poly_info.entity
else:
effective_entity = self.mapper
subq_path = context.attributes.get(('subquery_path', None),
orm_util.PathRegistry.root)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
subq_mapper, leftmost_mapper, leftmost_attr, leftmost_relationship = \
self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader),
context.query)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship,
entity.mapper
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
('subquery_path', None): subq_path
}
q = q._enable_single_crit(False)
to_join, local_attr, parent_alias = \
self._prep_for_joins(left_alias, subq_path)
q = q.order_by(*local_attr)
q = q.add_columns(*local_attr)
q = self._apply_joins(q, to_join, left_alias,
parent_alias, effective_entity)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if self.parent.isa(subq_mapper) and self.parent_property is subq_path[1]:
leftmost_mapper, leftmost_prop = \
self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = \
subq_mapper, \
subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
leftmost_mapper._columntoproperty[c].class_attribute
for c in leftmost_cols
]
return subq_mapper, leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(self,
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship,
entity_mapper
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set a real "from" if not present, as this is more
# accurate than just going off of the column expression
if not q._from_obj and entity_mapper.isa(leftmost_mapper):
q._set_select_from([entity_mapper], False)
target_cols = q._adapt_col_list(leftmost_attr)
# select from the identity columns of the outer
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(leftmost_mapper, embed_q,
use_mapper_path=True)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) > 1:
info = inspect(to_join[-1][0])
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
elif info.mapper.isa(self.parent):
# In the case of multiple levels, retrieve
# it from subq_path[-2]. This is the same as self.parent
# in the vast majority of cases, and [ticket:2014]
# illustrates a case where sub_path[-2] is a subclass
# of self.parent
parent_alias = orm_util.AliasedClass(to_join[-1][0],
use_mapper_path=True)
else:
# if of_type() were used leading to this relationship,
# self.parent is more specific than subq_path[-2]
parent_alias = orm_util.AliasedClass(self.parent,
use_mapper_path=True)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(self, q, to_join, left_alias, parent_alias,
effective_entity):
for i, (mapper, key) in enumerate(to_join):
# we need to use query.join() as opposed to
# orm.join() here because of the
# rich behavior it brings when dealing with
# "with_polymorphic" mappers. "aliased"
# and "from_joinpoint" take care of most of
# the chaining and aliasing for us.
first = i == 0
middle = i < len(to_join) - 1
second_to_last = i == len(to_join) - 2
last = i == len(to_join) - 1
if first:
attr = getattr(left_alias, key)
if last and effective_entity is not self.mapper:
attr = attr.of_type(effective_entity)
else:
if last and effective_entity is not self.mapper:
attr = getattr(parent_alias, key).\
of_type(effective_entity)
else:
attr = key
if second_to_last:
q = q.join(parent_alias, attr, from_joinpoint=True)
else:
q = q.join(attr, aliased=middle, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = dict(
(k, [vv[0] for vv in v])
for k, v in itertools.groupby(
self.subq,
lambda x: x[1:]
)
)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
path = path[self.parent_property]
subq = path.get(context.attributes, 'subquery')
if subq is None:
return None, None, None
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, 'collections', collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
return self._create_collection_loader(collections, local_cols)
else:
return self._create_scalar_loader(collections, local_cols)
def _create_collection_loader(self, collections, local_cols):
def load_collection_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
()
)
state.get_impl(self.key).\
set_committed_value(state, dict_, collection)
return load_collection_from_subq, None, None, collections.loader
def _create_scalar_loader(self, collections, local_cols):
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
(None,)
)
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
scalar = collection[0]
state.get_impl(self.key).\
set_committed_value(state, dict_, scalar)
return load_scalar_from_subq, None, None, collections.loader
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
def __init__(self, parent):
super(JoinedLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).init_class_attribute(mapper)
def setup_query(self, context, entity, path, loadopt, adapter, \
column_collection=None, parentmapper=None,
allow_innerjoin=True,
**kwargs):
"""Add a left outer join to the statement thats being constructed."""
if not context.query._enable_eagerloads:
return
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
clauses, adapter, add_to_collection = \
self._setup_query_on_user_defined_adapter(
context, entity, path, adapter,
user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
clauses, adapter, add_to_collection, \
allow_innerjoin = self._generate_row_adapter(
context, entity, path, loadopt, adapter,
column_collection, parentmapper, allow_innerjoin
)
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info is not None:
with_polymorphic = with_poly_info.with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.mapper]
for value in self.mapper._iterate_polymorphic_properties(
mappers=with_polymorphic):
value.setup(
context,
entity,
path,
clauses,
parentmapper=self.mapper,
column_collection=add_to_collection,
allow_innerjoin=allow_innerjoin)
if with_poly_info is not None and \
None in set(context.secondary_columns):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(context.attributes,
"user_defined_eager_row_processor", False)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
#from .mapper import Mapper
#from .interfaces import MapperProperty
#assert isinstance(root_mapper, Mapper)
#assert isinstance(prop, MapperProperty)
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(alias,
equivalents=prop.mapper._equivalent_columns)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_info = path.get(context.attributes,
"path_with_polymorphic")
adapter = orm_util.ORMAdapter(
with_poly_info.entity,
equivalents=prop.mapper._equivalent_columns)
else:
adapter = context.query._polymorphic_adapters.get(prop.mapper, None)
path.set(context.attributes,
"user_defined_eager_row_processor",
adapter)
return adapter
def _setup_query_on_user_defined_adapter(self, context, entity,
path, adapter, user_defined_adapter):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
elif adapter:
user_defined_adapter = adapter
path.set(context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _generate_row_adapter(self,
context, entity, path, loadopt, adapter,
column_collection, parentmapper, allow_innerjoin
):
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info:
to_adapt = with_poly_info.entity
else:
to_adapt = orm_util.AliasedClass(self.mapper,
flat=True,
use_mapper_path=True)
clauses = orm_util.ORMAdapter(
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True)
assert clauses.aliased_class is not None
if self.parent_property.direction != interfaces.MANYTOONE:
context.multi_row_eager_loaders = True
innerjoin = allow_innerjoin and (
loadopt.local_opts.get(
'innerjoin', self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all eager joins from
# here must also be outer joins
allow_innerjoin = False
context.create_eager_joins.append(
(self._create_eager_join, context,
entity, path, adapter,
parentmapper, clauses, innerjoin)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, allow_innerjoin
def _create_eager_join(self, context, entity,
path, adapter, parentmapper,
clauses, innerjoin):
if parentmapper is None:
localparent = entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = context.multi_row_eager_loaders and \
context.query._should_nest_selectable
entity_key = None
if entity not in context.eager_joins and \
not should_nest_selectable and \
context.from_clause:
index, clause = \
sql_util.find_join_source(
context.from_clause, entity.selectable)
if clause is not None:
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
entity_key, default_towrap = index, clause
if entity_key is None:
entity_key, default_towrap = entity, entity.selectable
towrap = context.eager_joins.setdefault(entity_key, default_towrap)
if adapter:
if getattr(adapter, 'aliased_class', None):
onclause = getattr(
adapter.aliased_class, self.key,
self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent,
adapter.selectable,
use_mapper_path=True
),
self.key, self.parent_property
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
context.eager_joins[entity_key] = eagerjoin = \
orm_util.join(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
)
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = entity.selectable
if self.parent_property.secondary is None and \
not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin):
if localparent.mapped_table.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
def _create_eager_adapter(self, context, row, adapter, path, loadopt):
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
try:
self.mapper.identity_key_from_row(row, decorator)
return decorator
except KeyError:
# no identity key - dont return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context,
row,
adapter, our_path, loadopt)
if eager_adapter is not False:
key = self.key
_instance = loading.instance_processor(
self.mapper,
context,
our_path[self.mapper],
eager_adapter)
if not self.uselist:
return self._create_scalar_loader(context, key, _instance)
else:
return self._create_collection_loader(context, key, _instance)
else:
return self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
create_row_processor(
context, path, loadopt,
mapper, row, adapter)
def _create_collection_loader(self, context, key, _instance):
def load_collection_from_joined_new_row(state, dict_, row):
collection = attributes.init_state_collection(
state, dict_, key)
result_list = util.UniqueAppender(collection,
'append_without_event')
context.attributes[(state, key)] = result_list
_instance(row, result_list)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(state,
dict_, key)
result_list = util.UniqueAppender(
collection,
'append_without_event')
context.attributes[(state, key)] = result_list
_instance(row, result_list)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row, None)
return load_collection_from_joined_new_row, \
load_collection_from_joined_existing_row, \
None, load_collection_from_joined_exec
def _create_scalar_loader(self, context, key, _instance):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row, None)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row, None)
if existing is not None \
and key in dict_ \
and existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row, None)
return load_scalar_from_joined_new_row, \
load_scalar_from_joined_existing_row, \
None, load_scalar_from_joined_exec
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent." %
(orm_util.instance_str(value), state.class_, prop)
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(desc, 'append', append, raw=True, retval=True,
active_history=True)
event.listen(desc, 'set', set_, raw=True, retval=True,
active_history=True)
|
|
import configparser
import json
import os
import unittest
import uuid
import pandas as pd
from TM1py import Subset
from TM1py.Objects import Process, Dimension, Hierarchy, Cube
from TM1py.Services import TM1Service
from TM1py.Utils import TIObfuscator
from TM1py.Utils import Utils, MDXUtils
from TM1py.Utils.MDXUtils import DimensionSelection, read_dimension_composition_from_mdx, \
read_dimension_composition_from_mdx_set_or_tuple, read_dimension_composition_from_mdx_set, \
read_dimension_composition_from_mdx_tuple, split_mdx, _find_case_and_space_insensitive_first_occurrence
from TM1py.Utils.Utils import dimension_hierarchy_element_tuple_from_unique_name
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.ini'))
PREFIX = "TM1py_Tests_Utils_"
MDX_TEMPLATE = """
SELECT
{rows} ON ROWS,
{columns} ON COLUMNS
FROM {cube}
WHERE {where}
"""
MDX_TEMPLATE_SHORT = """
SELECT
{rows} ON ROWS,
{columns} ON COLUMNS
FROM {cube}
"""
class TestMDXUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Connect to TM1
cls.tm1 = TM1Service(**config['tm1srv01'])
# Build 4 Dimensions
cls.dim1_name = PREFIX + "Dimension1"
cls.dim1_element_names = ["A " + str(i) for i in range(10)]
cls.dim1 = Dimension(cls.dim1_name)
h = Hierarchy(cls.dim1_name, cls.dim1_name)
for element_name in cls.dim1_element_names:
h.add_element(element_name, 'Numeric')
cls.dim1.add_hierarchy(h)
cls.dim2_name = PREFIX + "Dimension2"
cls.dim2_element_names = ["B " + str(i) for i in range(10)]
cls.dim2 = Dimension(cls.dim2_name)
h = Hierarchy(cls.dim2_name, cls.dim2_name)
for element_name in cls.dim2_element_names:
h.add_element(element_name, 'Numeric')
cls.dim2.add_hierarchy(h)
cls.dim3_name = PREFIX + "Dimension3"
cls.dim3_element_names = ["C " + str(i) for i in range(10)]
cls.dim3 = Dimension(cls.dim3_name)
h = Hierarchy(cls.dim3_name, cls.dim3_name)
for element_name in cls.dim3_element_names:
h.add_element(element_name, 'Numeric')
cls.dim3.add_hierarchy(h)
cls.dim4_name = PREFIX + "Dimension4"
cls.dim4_element_names = ["D " + str(i) for i in range(10)]
cls.dim4 = Dimension(cls.dim4_name)
h = Hierarchy(cls.dim4_name, cls.dim4_name)
for element_name in cls.dim4_element_names:
h.add_element(element_name, 'Numeric')
cls.dim4.add_hierarchy(h)
# Define cube with 4 dimensions
cls.cube_name = PREFIX + "Cube"
cls.cube = Cube(
name=cls.cube_name,
dimensions=[cls.dim1_name, cls.dim2_name, cls.dim3_name, cls.dim4_name])
def setUp(self):
if self.tm1.cubes.exists(self.cube_name):
self.tm1.cubes.delete(self.cube_name)
for dimension in (self.dim1, self.dim2, self.dim3, self.dim4):
if self.tm1.dimensions.exists(dimension.name):
self.tm1.dimensions.delete(dimension.name)
self.tm1.dimensions.create(dimension)
self.tm1.cubes.create(self.cube)
# Build Subset
self.dim4_subset_Name = PREFIX + "Subset"
self.tm1.dimensions.subsets.create(Subset(
subset_name=self.dim4_subset_Name,
dimension_name=self.dim4_name,
hierarchy_name=self.dim4_name,
expression="HEAD([{}].Members, 1)".format(self.dim4_name)))
def tearDown(self):
self.tm1.cubes.delete(self.cube_name)
self.tm1.dimensions.delete(self.dim1_name)
self.tm1.dimensions.delete(self.dim2_name)
self.tm1.dimensions.delete(self.dim3_name)
self.tm1.dimensions.delete(self.dim4_name)
def test_construct_mdx(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name))]
contexts = {self.dim4_name: self.dim4_element_names[0]}
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
contexts=contexts,
suppress=None)
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertEqual(number_cells, 1000)
def test_construct_mdx_no_titles(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name)),
DimensionSelection(
dimension_name=self.dim4_name,
subset=self.dim4_subset_Name)]
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress=None)
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertEqual(number_cells, 1000)
def test_construct_mdx_suppress_zeroes(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name)),
DimensionSelection(
dimension_name=self.dim4_name,
subset=self.dim4_subset_Name)]
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress="BOTH")
content = self.tm1.cubes.cells.execute_mdx(mdx)
number_cells = len(content.keys())
self.assertLess(number_cells, 1000)
def test_determine_selection_type(self):
self.assertEqual(
DimensionSelection.determine_selection_type(elements=["e1", "e2"], subset=None, expression=None),
DimensionSelection.ITERABLE)
self.assertEqual(
DimensionSelection.determine_selection_type(["e1", "e2"]),
DimensionSelection.ITERABLE)
self.assertEqual(
DimensionSelection.determine_selection_type(elements=None, subset="something", expression=None),
DimensionSelection.SUBSET)
self.assertEqual(
DimensionSelection.determine_selection_type(None, "something", None),
DimensionSelection.SUBSET)
self.assertEqual(
DimensionSelection.determine_selection_type(elements=None, subset=None, expression="{[d1].[e1]}"),
DimensionSelection.EXPRESSION)
self.assertEqual(
DimensionSelection.determine_selection_type(None, None, "{[d1].[e1]}"),
DimensionSelection.EXPRESSION)
self.assertEqual(
DimensionSelection.determine_selection_type(elements=None, subset=None, expression=None),
None)
self.assertEqual(
DimensionSelection.determine_selection_type(None, None, None),
None)
self.assertEqual(
DimensionSelection.determine_selection_type(),
None)
self.assertRaises(
ValueError,
DimensionSelection.determine_selection_type, ["e2"], "subset1", "{[d1].[e1]}")
self.assertRaises(
ValueError,
DimensionSelection.determine_selection_type, ["e2"], "subset1")
self.assertRaises(
ValueError,
DimensionSelection.determine_selection_type, ["e2"], None, "subset1")
def test_curly_braces(self):
self.assertEqual(
MDXUtils.curly_braces("something"),
"{something}")
self.assertEqual(
MDXUtils.curly_braces("something}"),
"{something}")
self.assertEqual(
MDXUtils.curly_braces("{something"),
"{something}")
self.assertEqual(
MDXUtils.curly_braces("{something}"),
"{something}")
def test_build_pandas_multiindex_dataframe_from_cellset(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name)),
DimensionSelection(
dimension_name=self.dim4_name,
subset=self.dim4_subset_Name)]
suppress = None
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress=suppress)
cellset = self.tm1.cubes.cells.execute_mdx(mdx)
df = Utils.build_pandas_dataframe_from_cellset(cellset, multiindex=True)
self.assertIsInstance(df, pd.DataFrame)
self.assertTrue(df.shape[0] == 1000)
self.assertTrue(df.shape[1] == 1)
cellset = Utils.build_cellset_from_pandas_dataframe(df)
self.assertTrue(len(cellset.keys()) == 1000)
self.assertIsInstance(cellset, Utils.CaseAndSpaceInsensitiveTuplesDict)
def test_build_pandas_dataframe_from_cellset(self):
rows = [DimensionSelection(dimension_name=self.dim1_name),
DimensionSelection(dimension_name=self.dim2_name, elements=self.dim2_element_names)]
columns = [
DimensionSelection(
dimension_name=self.dim3_name,
expression="TM1SubsetAll([{}])".format(self.dim3_name)),
DimensionSelection(
dimension_name=self.dim4_name,
subset=self.dim4_subset_Name)]
suppress = None
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress=suppress)
cellset = self.tm1.cubes.cells.execute_mdx(mdx)
df = Utils.build_pandas_dataframe_from_cellset(cellset, multiindex=False)
self.assertTrue(df.shape[0] == 1000)
self.assertTrue(df.shape[1] == 5)
self.assertIsInstance(df, pd.DataFrame)
cellset = Utils.build_cellset_from_pandas_dataframe(df)
self.assertTrue(len(cellset.keys()) == 1000)
self.assertIsInstance(cellset, Utils.CaseAndSpaceInsensitiveTuplesDict)
def test_build_pandas_dataframe_empty_cellset(self):
self.tm1.cubes.cells.write_value(
value=0,
cube_name=self.cube_name,
element_tuple=(self.dim1_element_names[0], self.dim2_element_names[0],
self.dim3_element_names[0], self.dim4_element_names[0]),
dimensions=(self.dim1_name, self.dim2_name, self.dim3_name, self.dim4_name))
rows = [DimensionSelection(dimension_name=self.dim1_name, elements=(self.dim1_element_names[0],)),
DimensionSelection(dimension_name=self.dim2_name, elements=(self.dim2_element_names[0],))]
columns = [DimensionSelection(dimension_name=self.dim3_name, elements=(self.dim3_element_names[0],)),
DimensionSelection(dimension_name=self.dim4_name, elements=(self.dim4_element_names[0],))]
suppress = "Both"
mdx = MDXUtils.construct_mdx(
cube_name=self.cube_name,
rows=rows,
columns=columns,
suppress=suppress)
empty_cellset = self.tm1.cubes.cells.execute_mdx(mdx)
self.assertRaises(ValueError, Utils.build_pandas_dataframe_from_cellset, empty_cellset, True)
self.assertRaises(ValueError, Utils.build_pandas_dataframe_from_cellset, empty_cellset, False)
@unittest.skip("Not deterministic. Needs improvement.")
def test_read_cube_name_from_mdx(self):
all_cube_names = self.tm1.cubes.get_all_names()
for cube_name in all_cube_names:
private_views, public_views = self.tm1.cubes.views.get_all(cube_name)
for view in private_views + public_views:
mdx = view.MDX
self.assertEquals(
cube_name.upper().replace(" ", ""),
MDXUtils.read_cube_name_from_mdx(mdx))
def test_dimension_hierarchy_element_tuple_from_unique_name(self):
unique_element_name = "[d1].[e1]"
dimension, hierarchy, element = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "d1")
self.assertEqual(element, "e1")
unique_element_name = "[d1].[d1].[e1]"
dimension, hierarchy, element = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "d1")
self.assertEqual(element, "e1")
unique_element_name = "[d1].[leaves].[e1]"
dimension, hierarchy, element = dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)
self.assertEqual(dimension, "d1")
self.assertEqual(hierarchy, "leaves")
self.assertEqual(element, "e1")
def test_read_dimension_composition_from_mdx_simple1(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(self.dim1_name, self.dim2_name),
columns="{{ [{}].MEMBERS }}".format(self.dim3_name),
cube="[{}]".format(self.cube_name),
where="([{}].[{}])".format(self.dim4_name, self.dim4_element_names[0])
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_simple2(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }}".format(self.dim3_name),
columns="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(self.dim1_name, self.dim2_name),
cube="[{}]".format(self.cube_name),
where="( [{}].[{}] )".format(self.dim4_name, self.dim4_element_names[0])
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim3_name])
self.assertEqual(columns, [self.dim1_name, self.dim2_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_simple3(self):
mdx = MDX_TEMPLATE.format(
rows="{[" + self.dim3_name + "].MEMBERS}",
columns="{[" + self.dim1_name + "].MEMBERS}",
cube="[{}]".format(self.cube_name),
where="([{}].[{}], [{}].[{}])".format(self.dim4_name, self.dim4_element_names[0], self.dim2_name,
self.dim2_element_names[0])
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim3_name])
self.assertEqual(columns, [self.dim1_name])
self.assertEqual(titles, [self.dim4_name, self.dim2_name])
def test_read_dimension_composition_from_mdx_without_titles(self):
mdx = MDX_TEMPLATE_SHORT.format(
rows="{[" + self.dim1_name + "].MEMBERS} * {[" + self.dim2_name + "].MEMBERS}",
columns="{[" + self.dim3_name + "].MEMBERS} * {[" + self.dim4_name + "].MEMBERS}",
cube="[{}]".format(self.cube_name)
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name, self.dim4_name])
def test_read_dimension_composition_from_mdx_asynchronous_single(self):
mdx = MDX_TEMPLATE.format(
rows="{([" + self.dim1_name + "].[" + self.dim1_element_names[0] + "], [" + self.dim2_name + "].[" +
self.dim2_element_names[0] + "])}",
columns="{[" + self.dim3_name + "].MEMBERS}",
cube="[{}]".format(self.cube_name),
where="([" + self.dim4_name + "].[" + self.dim4_element_names[0] + "])"
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name])
self.assertEqual(titles, [self.dim4_name])
def test_read_dimension_composition_from_mdx_asynchronous_multi(self):
mdx = MDX_TEMPLATE_SHORT.format(
rows="{([" + self.dim1_name + "].[" + self.dim1_element_names[0] + "], [" + self.dim2_name + "].[" +
self.dim2_element_names[0] + "]),([" + self.dim1_name + "].[" + self.dim1_element_names[
1] + "], [" + self.dim2_name + "].[" +
self.dim2_element_names[1] + "]) }",
columns="{([" + self.dim3_name + "].[" + self.dim3_element_names[0] + "], [" + self.dim4_name + "].[" +
self.dim4_element_names[0] + "]),([" + self.dim3_name + "].[" + self.dim3_element_names[
1] + "], [" + self.dim4_name + "].[" +
self.dim4_element_names[1] + "]) }",
cube="[{}]".format(self.cube_name)
)
cube, rows, columns, titles = read_dimension_composition_from_mdx(mdx=mdx)
self.assertEqual(cube, self.cube_name)
self.assertEqual(rows, [self.dim1_name, self.dim2_name])
self.assertEqual(columns, [self.dim3_name, self.dim4_name])
self.assertEqual(titles, [])
def test_read_dimension_composition_from_mdx_set_or_tuple(self):
mdx_set = "{[dim1].[element1]} * {[dim2].[element2]}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_set = "{[dim1].[element1], [dim1].[element2]}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{[dim1].Members}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{Tm1SubsetAll([dim1])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_tuple = "{([dim1].[element1])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2]), ([dim1].[element8], [dim2].[element5])}"
dimensions = read_dimension_composition_from_mdx_set_or_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
def test_read_dimension_composition_from_mdx_set(self):
mdx_set = "{[dim1].[element1]} * {[dim2].[element2]}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_set = "{[dim1].[element1], [dim1].[element2]}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{[dim1].Members}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
mdx_set = "{Tm1SubsetAll([dim1])}"
dimensions = read_dimension_composition_from_mdx_set(mdx_set)
self.assertEqual(dimensions, ["dim1"])
def test_read_dimension_composition_from_mdx_tuple(self):
mdx_tuple = "{([dim1].[element1], [dim2].[element2])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
mdx_tuple = "{([dim1].[element1])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1"])
mdx_tuple = "{([dim1].[element1], [dim2].[element2]), ([dim1].[element8], [dim2].[element5])}"
dimensions = read_dimension_composition_from_mdx_tuple(mdx_tuple)
self.assertEqual(dimensions, ["dim1", "dim2"])
def test_split_mdx_sets(self):
rows = "{{ [{dim1}].[elem1] , [{dim2}].[{elem2}] }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0]
)
columns = "{{ [{}].MEMBERS }}".format(self.dim3_name)
cube = "[{}]".format(self.cube_name)
where = "([{}].[{}])".format(self.dim4_name, self.dim4_element_names[0])
mdx = MDX_TEMPLATE.format(
rows=rows,
columns=columns,
cube=cube,
where=where
)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_split_mdx_tuples_without_where(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1]
)
columns = "{{([{dim3}].[{elem1}], [{dim4}].[{elem2}])}}".format(
dim3=self.dim3_name,
elem1=self.dim3_element_names[0],
dim4=self.dim4_name,
elem2=self.dim4_element_names[0]
)
cube = "[{}]".format(self.cube_name)
mdx = MDX_TEMPLATE_SHORT.format(
rows=rows,
columns=columns,
cube=cube
)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
def test_split_mdx_tuples_with_where(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1]
)
columns = "{{ ( [{dim3}].[{elem1}] ) }}".format(
dim3=self.dim3_name,
elem1=self.dim3_element_names[0]
)
cube = "[{}]".format(self.cube_name)
where = "( [{dim4}].[{elem1}] )".format(
dim4=self.dim4_name,
elem1=self.dim4_element_names[0]
)
mdx = MDX_TEMPLATE.format(
rows=rows,
columns=columns,
cube=cube,
where=where
)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_split_mdx_sets_and_tuples(self):
rows = "{{ ( [{dim1}].[{elem1}], [{dim2}].[{elem2}] ) , ( [{dim1}].[{elem3}]. [{dim2}].[{elem4}] ) }}".format(
dim1=self.dim1_name,
elem1=self.dim1_element_names[0],
dim2=self.dim2_name,
elem2=self.dim2_element_names[0],
elem3=self.dim2_element_names[1],
elem4=self.dim2_element_names[1]
)
columns = "{{ Tm1SubsetAll ( [{dim3}] ) }}".format(
dim3=self.dim3_name,
elem1=self.dim3_element_names[0]
)
cube = "[{}]".format(self.cube_name)
where = "( [{dim4}].[{elem2}] )".format(
dim4=self.dim4_name,
elem2=self.dim4_element_names[0]
)
mdx = MDX_TEMPLATE.format(
rows=rows,
columns=columns,
cube=cube,
where=where
)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
self.assertEqual(rows.replace(" ", ""), mdx_rows)
self.assertEqual(columns.replace(" ", ""), mdx_columns)
self.assertEqual(cube.replace(" ", ""), mdx_from)
self.assertEqual(where.replace(" ", ""), mdx_where)
def test_find_case_and_space_insensitive_first_occurrence(self):
mdx = MDX_TEMPLATE.format(
rows="{{ [{}].MEMBERS }}".format(self.dim3_name),
columns="{{ [{}].MEMBERS }} * {{ [{}].MEMBERS }}".format(self.dim1_name, self.dim2_name),
cube="[{}]".format(self.cube_name),
where="( [{}].[{}] )".format(self.dim4_name, self.dim4_element_names[0]))
selection, rest = _find_case_and_space_insensitive_first_occurrence(
text=mdx,
pattern_start="ROWS,",
pattern_end="}ON COLUMNS")
self.assertEqual(
"ROWS,{[TM1py_Tests_Utils_Dimension1].MEMBERS}*{[TM1py_Tests_Utils_Dimension2].MEMBERS}",
selection)
self.assertEqual(
"FROM[TM1py_Tests_Utils_Cube]WHERE([TM1py_Tests_Utils_Dimension4].[D0])",
rest)
def test_extract_unique_name_from_members(self):
members = [
{'UniqueName': '[Dimension3].[Dimension3].[Element 592]',
'Element': {'UniqueName': '[Dimension3].[Dimension3].[Element 592]'}}]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension3].[Dimension3].[Element 592]"])
members = [{'UniqueName': '[Dimension1].[Dimension1].[Element 790]',
'Element': {'UniqueName': '[Dimension1].[Dimension1].[Element 790]'}},
{'UniqueName': '[Dimension2].[Dimension2].[Element 541]',
'Element': {'UniqueName': '[Dimension2].[Dimension2].[Element 541]'}}]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension1].[Dimension1].[Element 790]", "[Dimension2].[Dimension2].[Element 541]"])
members = [{'UniqueName': '',
'Element': {'UniqueName': '[Dimension1].[Dimension1].[Element 790]'}},
{'UniqueName': '',
'Element': {'UniqueName': '[Dimension2].[Dimension2].[Element 541]'}}]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension1].[Dimension1].[Element 790]", "[Dimension2].[Dimension2].[Element 541]"])
members = [{'UniqueName': '[Dimension1].[Dimension1].[Element 790]',
'Element': None},
{'UniqueName': '[Dimension2].[Dimension2].[Element 541]',
'Element': None}]
self.assertEqual(
Utils.extract_unique_names_from_members(members),
["[Dimension1].[Dimension1].[Element 790]", "[Dimension2].[Dimension2].[Element 541]"])
def test_extract_axes_from_cellset(self):
with open(os.path.join("resources", "raw_cellset.json")) as file:
raw_cellset_as_dict = json.load(file)
row_axis, column_axis, title_axis = Utils.extract_axes_from_cellset(raw_cellset_as_dict=raw_cellset_as_dict)
self.assertIn("[City].[City].[NYC]", json.dumps(row_axis))
self.assertIn("[City].[City].[Chicago]", json.dumps(row_axis))
self.assertIn("[Date].[Date].[2017-11-26]", json.dumps(column_axis))
self.assertIn("[Date].[Date].[2017-11-27]", json.dumps(column_axis))
self.assertIn("[Version].[Version].[Actual]", json.dumps(title_axis))
@classmethod
def tearDownClass(cls):
cls.tm1.logout()
class TestTIObfuscatorMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Namings
cls.expand_process_name = str(uuid.uuid4())
cls.expand_process_name_obf = str(uuid.uuid4())
cls.process_name = str(uuid.uuid4())
cls.process_name_obf = str(uuid.uuid4())
cls.dimension_name = str(uuid.uuid4())
cls.dimension_name_cloned = str(uuid.uuid4())
cls.cube_name = str(uuid.uuid4())
cls.cube_name_cloned = str(uuid.uuid4())
# Connect to TM1
cls.tm1 = TM1Service(**config['tm1srv01'])
# create process
prolog = "\r\nSaveDataAll;\r\nsText='abcABC';\r\n"
epilog = "SaveDataAll;"
cls.process = Process(
name=cls.process_name,
prolog_procedure=prolog,
epilog_procedure=epilog)
# create process with expand in TM1
if cls.tm1.processes.exists(cls.process.name):
cls.tm1.processes.delete(cls.process.name)
cls.tm1.processes.create(cls.process)
# create process with expand
prolog = "\r\nnRevenue = 20;\r\nsRevenue = EXPAND('%nrevenue%');\r\nIF(sRevenue @ <> '20.000');\r\n" \
"ProcessBreak;\r\nENDIF;"
cls.expand_process = Process(
name=cls.expand_process_name,
prolog_procedure=prolog)
# create process with expand in TM1
if cls.tm1.processes.exists(cls.expand_process.name):
cls.tm1.processes.delete(cls.expand_process.name)
cls.tm1.processes.create(cls.expand_process)
# create dimension that we clone through obfuscated bedrock as part of the test
if not cls.tm1.dimensions.exists(cls.dimension_name):
d = Dimension(cls.dimension_name)
h = Hierarchy(cls.dimension_name, cls.dimension_name)
h.add_element('Total Years', 'Consolidated')
h.add_element('No Year', 'Numeric')
for year in range(1989, 2040, 1):
h.add_element(str(year), 'Numeric')
h.add_edge('Total Years', str(year), 1)
d.add_hierarchy(h)
cls.tm1.dimensions.create(d)
# Create 2 Attributes through TI
ti_statements = ["AttrInsert('{}','','Previous Year', 'S')".format(cls.dimension_name),
"AttrInsert('{}','','Next Year', 'S');".format(cls.dimension_name)]
ti = ';'.join(ti_statements)
cls.tm1.processes.execute_ti_code(lines_prolog=ti)
# create }ElementAttribute values
cellset = {}
for year in range(1989, 2040, 1):
cellset[(str(year), 'Previous Year')] = year - 1
cellset[(str(year), 'Next Year')] = year + 1
cls.tm1.cubes.cells.write_values("}ElementAttributes_" + cls.dimension_name, cellset)
# create a simple cube to be cloned through bedrock
if not cls.tm1.cubes.exists(cls.cube_name):
cube = Cube(cls.cube_name, ["}Dimensions", "}Cubes"], "[]=S:'TM1py';")
cls.tm1.cubes.create(cube)
# create bedrocks if they doesn't exist
for bedrock in ("Bedrock.Dim.Clone", "Bedrock.Cube.Clone"):
if not cls.tm1.processes.exists(bedrock):
with open(os.path.join("resources", bedrock + ".json"), "r") as file:
process = Process.from_json(file.read())
cls.tm1.processes.create(process)
def test_split_into_statements(self):
code = "sText1 = 'abcdefgh';\r\n" \
" nElem = 2;\r\n" \
" # dasjd; dasjdas '' qdawdas\r\n" \
"# daskldlaskjdla aksdlas;das \r\n" \
" # dasdwad\r\n" \
"sText2 = 'dasjnd;jkas''dasdas'';dasdas';\r\n" \
"SaveDataAll;"
code = TIObfuscator.remove_comment_lines(code)
statements = TIObfuscator.split_into_statements(code)
self.assertEqual(len(statements), 4)
def test_expand(self):
if self.tm1.processes.exists(self.expand_process_name_obf):
self.tm1.processes.delete(self.expand_process_name_obf)
process = self.tm1.processes.get(self.expand_process_name)
process_obf = TIObfuscator.obfuscate_process(process, self.expand_process_name_obf)
self.tm1.processes.create(process_obf)
self.tm1.processes.execute(process_obf.name, {})
def test_remove_generated_code(self):
code = "#****Begin: Generated Statements***\r\n" \
"DIMENSIONELEMENTINSERT('Employee','',V1,'s');\r\n" \
"DIMENSIONELEMENTINSERT('Employee','',V2,'s');\r\n" \
"DIMENSIONELEMENTINSERT('Employee','',V3,'s');\r\n" \
"DIMENSIONELEMENTINSERT('Employee','',V4,'s');\r\n" \
"#****End: Generated Statements****\r\n" \
"\r\n" \
"sText = 'test';"
code = TIObfuscator.remove_generated_code(code)
self.assertNotIn("#****Begin", code)
self.assertNotIn("DIMENSIONELEMENTINSERT", code)
self.assertNotIn("#****End", code)
self.assertIn("sText = 'test';", code)
def test_obfuscate_code(self):
if self.tm1.processes.exists(self.process_name_obf):
self.tm1.processes.delete(self.process_name_obf)
process_obf = TIObfuscator.obfuscate_process(self.process, self.process_name_obf)
self.tm1.processes.create(process_obf)
def test_bedrock_clone_dim(self):
if self.tm1.processes.exists("Bedrock.Dim.Clone.Obf"):
self.tm1.processes.delete("Bedrock.Dim.Clone.Obf")
p = self.tm1.processes.get("Bedrock.Dim.Clone")
p_obf = TIObfuscator.obfuscate_process(
process=p,
new_name='Bedrock.Dim.Clone.Obf')
self.tm1.processes.create(p_obf)
# call obfuscated process
parameters = {
"Parameters":
[
{"Name": "pSourceDim", "Value": self.dimension_name},
{"Name": "pTargetDim", "Value": self.dimension_name_cloned},
{"Name": "pAttr", "Value": "1"}
]
}
self.tm1.processes.execute("Bedrock.Dim.Clone.Obf", parameters)
def test_bedrock_clone_cube(self):
if self.tm1.processes.exists("Bedrock.Cube.Clone.Obf"):
self.tm1.processes.delete("Bedrock.Cube.Clone.Obf")
p = self.tm1.processes.get("Bedrock.Cube.Clone")
p_obf = TIObfuscator.obfuscate_process(process=p, new_name='Bedrock.Cube.Clone.Obf')
self.tm1.processes.create(p_obf)
# call obfuscated process
parameters = {
"Parameters":
[
{"Name": "pSourceCube", "Value": self.cube_name},
{"Name": "pTargetCube", "Value": self.cube_name_cloned},
{"Name": "pIncludeRules", "Value": "1"},
{"Name": "pIncludeData", "Value": "1"},
{"Name": "pDebug", "Value": "1"}
]
}
self.tm1.processes.execute("Bedrock.Cube.Clone.Obf", parameters)
@classmethod
def tearDownClass(cls):
# delete all this stuff
cls.tm1.processes.delete(cls.expand_process_name)
cls.tm1.processes.delete(cls.expand_process_name_obf)
cls.tm1.processes.delete(cls.process_name)
cls.tm1.processes.delete(cls.process_name_obf)
cls.tm1.processes.delete("Bedrock.Dim.Clone.Obf")
cls.tm1.processes.delete("Bedrock.Cube.Clone.Obf")
cls.tm1.dimensions.delete(cls.dimension_name)
cls.tm1.dimensions.delete(cls.dimension_name_cloned)
cls.tm1.cubes.delete(cls.cube_name)
cls.tm1.cubes.delete(cls.cube_name_cloned)
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
|
|
"""Test for certbot_nginx._internal.nginxparser."""
import copy
import operator
import tempfile
import unittest
from pyparsing import ParseException
from certbot_nginx._internal.nginxparser import dump
from certbot_nginx._internal.nginxparser import dumps
from certbot_nginx._internal.nginxparser import load
from certbot_nginx._internal.nginxparser import loads
from certbot_nginx._internal.nginxparser import RawNginxParser
from certbot_nginx._internal.nginxparser import UnspacedList
import test_util as util
FIRST = operator.itemgetter(0)
class TestRawNginxParser(unittest.TestCase):
"""Test the raw low-level Nginx config parser."""
def test_assignments(self):
parsed = RawNginxParser.assignment.parseString('root /test;').asList()
self.assertEqual(parsed, ['root', ' ', '/test'])
parsed = RawNginxParser.assignment.parseString('root /test;foo bar;').asList()
self.assertEqual(parsed, ['root', ' ', '/test'], ['foo', ' ', 'bar'])
def test_blocks(self):
parsed = RawNginxParser.block.parseString('foo {}').asList()
self.assertEqual(parsed, [['foo', ' '], []])
parsed = RawNginxParser.block.parseString('location /foo{}').asList()
self.assertEqual(parsed, [['location', ' ', '/foo'], []])
parsed = RawNginxParser.block.parseString('foo { bar foo ; }').asList()
self.assertEqual(parsed, [['foo', ' '], [[' ', 'bar', ' ', 'foo', ' '], ' ']])
def test_nested_blocks(self):
parsed = RawNginxParser.block.parseString('foo { bar {} }').asList()
block, content = parsed
self.assertEqual(FIRST(content), [[' ', 'bar', ' '], []])
self.assertEqual(FIRST(block), 'foo')
def test_dump_as_string(self):
dumped = dumps(UnspacedList([
['user', ' ', 'www-data'],
[['\n', 'server', ' '], [
['\n ', 'listen', ' ', '80'],
['\n ', 'server_name', ' ', 'foo.com'],
['\n ', 'root', ' ', '/home/ubuntu/sites/foo/'],
[['\n\n ', 'location', ' ', '/status', ' '], [
['\n ', 'check_status', ''],
[['\n\n ', 'types', ' '],
[['\n ', 'image/jpeg', ' ', 'jpg']]],
]]
]]]))
self.assertEqual(dumped.split('\n'),
'user www-data;\n'
'server {\n'
' listen 80;\n'
' server_name foo.com;\n'
' root /home/ubuntu/sites/foo/;\n'
'\n'
' location /status {\n'
' check_status;\n'
'\n'
' types {\n'
' image/jpeg jpg;}}}'.split('\n'))
def test_parse_from_file(self):
with open(util.get_data_filename('foo.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[['user', 'www-data'],
[['http'],
[[['server'], [
['listen', '*:80', 'default_server', 'ssl'],
['server_name', '*.www.foo.com', '*.www.example.com'],
['root', '/home/ubuntu/sites/foo/'],
[['location', '/status'], [
[['types'], [['image/jpeg', 'jpg']]],
]],
[['location', '~', r'case_sensitive\.php$'], [
['index', 'index.php'],
['root', '/var/root'],
]],
[['location', '~*', r'case_insensitive\.php$'], []],
[['location', '=', r'exact_match\.php$'], []],
[['location', '^~', r'ignore_regex\.php$'], []]
]]]]]
)
def test_parse_from_file2(self):
with open(util.get_data_filename('edge_cases.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[[['server'], [['server_name', 'simple']]],
[['server'],
[['server_name', 'with.if'],
[['location', '~', '^/services/.+$'],
[[['if', '($request_filename', '~*', '\\.(ttf|woff)$)'],
[['add_header', 'Access-Control-Allow-Origin', '"*"']]]]]]],
[['server'],
[['server_name', 'with.complicated.headers'],
[['location', '~*', '\\.(?:gif|jpe?g|png)$'],
[['add_header', 'Pragma', 'public'],
['add_header',
'Cache-Control', '\'public, must-revalidate, proxy-revalidate\'',
'"test,;{}"', 'foo'],
['blah', '"hello;world"'],
['try_files', '$uri', '@rewrites']]]]]])
def test_parse_from_file3(self):
with open(util.get_data_filename('multiline_quotes.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[[['http'],
[[['server'],
[['listen', '*:443'],
[['location', '/'],
[['body_filter_by_lua',
'\'ngx.ctx.buffered = (ngx.ctx.buffered or "")'
' .. string.sub(ngx.arg[1], 1, 1000)\n'
' '
'if ngx.arg[2] then\n'
' '
'ngx.var.resp_body = ngx.ctx.buffered\n'
' end\'']]]]]]]])
def test_abort_on_parse_failure(self):
with open(util.get_data_filename('broken.conf')) as handle:
self.assertRaises(ParseException, load, handle)
def test_dump_as_file(self):
with open(util.get_data_filename('nginx.conf')) as handle:
parsed = load(handle)
parsed[-1][-1].append(UnspacedList([['server'],
[['listen', ' ', '443', ' ', 'ssl'],
['server_name', ' ', 'localhost'],
['ssl_certificate', ' ', 'cert.pem'],
['ssl_certificate_key', ' ', 'cert.key'],
['ssl_session_cache', ' ', 'shared:SSL:1m'],
['ssl_session_timeout', ' ', '5m'],
['ssl_ciphers', ' ', 'HIGH:!aNULL:!MD5'],
[['location', ' ', '/'],
[['root', ' ', 'html'],
['index', ' ', 'index.html', ' ', 'index.htm']]]]]))
with tempfile.TemporaryFile(mode='w+t') as f:
dump(parsed, f)
f.seek(0)
parsed_new = load(f)
self.assertEqual(parsed, parsed_new)
def test_comments(self):
with open(util.get_data_filename('minimalistic_comments.conf')) as handle:
parsed = load(handle)
with tempfile.TemporaryFile(mode='w+t') as f:
dump(parsed, f)
f.seek(0)
parsed_new = load(f)
self.assertEqual(parsed, parsed_new)
self.assertEqual(parsed_new, [
['#', " Use bar.conf when it's a full moon!"],
['include', 'foo.conf'],
['#', ' Kilroy was here'],
['check_status'],
[['server'],
[['#', ''],
['#', " Don't forget to open up your firewall!"],
['#', ''],
['listen', '1234'],
['#', ' listen 80;']]],
])
def test_issue_518(self):
parsed = loads('if ($http_accept ~* "webp") { set $webp "true"; }')
self.assertEqual(parsed, [
[['if', '($http_accept', '~*', '"webp")'],
[['set', '$webp', '"true"']]]
])
def test_comment_in_block(self):
parsed = loads("""http {
# server{
}""")
self.assertEqual(parsed, [
[['http'],
[['#', ' server{']]]
])
def test_access_log(self):
# see issue #3798
parsed = loads('access_log syslog:server=unix:/dev/log,facility=auth,'
'tag=nginx_post,severity=info custom;')
self.assertEqual(parsed, [
['access_log',
'syslog:server=unix:/dev/log,facility=auth,tag=nginx_post,severity=info',
'custom']
])
def test_add_header(self):
# see issue #3798
parsed = loads('add_header Cache-Control no-cache,no-store,must-revalidate,max-age=0;')
self.assertEqual(parsed, [
['add_header', 'Cache-Control', 'no-cache,no-store,must-revalidate,max-age=0']
])
def test_map_then_assignment_in_block(self):
# see issue #3798
test_str = """http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
"~Opera Mini" 1;
*.example.com 1;
}
one;
}"""
parsed = loads(test_str)
self.assertEqual(parsed, [
[['http'], [
[['map', '$http_upgrade', '$connection_upgrade'], [
['default', 'upgrade'],
["''", 'close'],
['"~Opera Mini"', '1'],
['*.example.com', '1']
]],
['one']
]]
])
def test_variable_name(self):
parsed = loads('try_files /typo3temp/tx_ncstaticfilecache/'
'$host${request_uri}index.html @nocache;')
self.assertEqual(parsed, [
['try_files',
'/typo3temp/tx_ncstaticfilecache/$host${request_uri}index.html',
'@nocache']
])
def test_weird_blocks(self):
test = r"""
if ($http_user_agent ~ MSIE) {
rewrite ^(.*)$ /msie/$1 break;
}
if ($http_cookie ~* "id=([^;]+)(?:;|$)") {
set $id $1;
}
if ($request_method = POST) {
return 405;
}
if ($request_method) {
return 403;
}
if ($args ~ post=140){
rewrite ^ http://example.com/;
}
location ~ ^/users/(.+\.(?:gif|jpe?g|png))$ {
alias /data/w3/images/$1;
}
proxy_set_header X-Origin-URI ${scheme}://${http_host}/$request_uri;
"""
parsed = loads(test)
self.assertEqual(parsed, [[['if', '($http_user_agent', '~', 'MSIE)'],
[['rewrite', '^(.*)$', '/msie/$1', 'break']]],
[['if', '($http_cookie', '~*', '"id=([^;]+)(?:;|$)")'], [['set', '$id', '$1']]],
[['if', '($request_method', '=', 'POST)'], [['return', '405']]],
[['if', '($request_method)'],
[['return', '403']]], [['if', '($args', '~', 'post=140)'],
[['rewrite', '^', 'http://example.com/']]],
[['location', '~', '^/users/(.+\\.(?:gif|jpe?g|png))$'],
[['alias', '/data/w3/images/$1']]],
['proxy_set_header', 'X-Origin-URI', '${scheme}://${http_host}/$request_uri']]
)
def test_edge_cases(self):
# quotes
parsed = loads(r'"hello\""; # blah "heh heh"')
self.assertEqual(parsed, [['"hello\\""'], ['#', ' blah "heh heh"']])
# if with comment
parsed = loads("""if ($http_cookie ~* "id=([^;]+)(?:;|$)") { # blah )
}""")
self.assertEqual(parsed, [[['if', '($http_cookie', '~*', '"id=([^;]+)(?:;|$)")'],
[['#', ' blah )']]]])
# end paren
test = """
one"test";
("two");
"test")red;
"test")"blue";
"test")"three;
(one"test")one;
one";
one"test;
one"test"one;
"""
parsed = loads(test)
self.assertEqual(parsed, [
['one"test"'],
['("two")'],
['"test")red'],
['"test")"blue"'],
['"test")"three'],
['(one"test")one'],
['one"'],
['one"test'],
['one"test"one']
])
self.assertRaises(ParseException, loads, r'"test"one;') # fails
self.assertRaises(ParseException, loads, r'"test;') # fails
# newlines
test = """
server_name foo.example.com bar.example.com \
baz.example.com qux.example.com;
server_name foo.example.com bar.example.com
baz.example.com qux.example.com;
"""
parsed = loads(test)
self.assertEqual(parsed, [
['server_name', 'foo.example.com', 'bar.example.com',
'baz.example.com', 'qux.example.com'],
['server_name', 'foo.example.com', 'bar.example.com',
'baz.example.com', 'qux.example.com']
])
# variable weirdness
parsed = loads("directive $var ${var} $ ${};")
self.assertEqual(parsed, [['directive', '$var', '${var}', '$', '${}']])
self.assertRaises(ParseException, loads, "server {server_name test.com};")
self.assertEqual(loads("blag${dfgdfg};"), [['blag${dfgdfg}']])
self.assertRaises(ParseException, loads, "blag${dfgdf{g};")
# empty file
parsed = loads("")
self.assertEqual(parsed, [])
class TestUnspacedList(unittest.TestCase):
"""Test the UnspacedList data structure"""
def setUp(self):
self.a = ["\n ", "things", " ", "quirk"]
self.b = ["y", " "]
self.l = self.a[:]
self.l2 = self.b[:]
self.ul = UnspacedList(self.l)
self.ul2 = UnspacedList(self.l2)
def test_construction(self):
self.assertEqual(self.ul, ["things", "quirk"])
self.assertEqual(self.ul2, ["y"])
def test_append(self):
ul3 = copy.deepcopy(self.ul)
ul3.append("wise")
self.assertEqual(ul3, ["things", "quirk", "wise"])
self.assertEqual(ul3.spaced, self.a + ["wise"])
def test_add(self):
ul3 = self.ul + self.ul2
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
self.assertEqual(self.ul.spaced, self.a)
ul3 = self.ul + self.l2
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
def test_extend(self):
ul3 = copy.deepcopy(self.ul)
ul3.extend(self.ul2)
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
self.assertEqual(self.ul.spaced, self.a)
def test_set(self):
ul3 = copy.deepcopy(self.ul)
ul3[0] = "zither"
l = ["\n ", "zather", "zest"]
ul3[1] = UnspacedList(l)
self.assertEqual(ul3, ["zither", ["zather", "zest"]])
self.assertEqual(ul3.spaced, [self.a[0], "zither", " ", l])
def test_get(self):
self.assertRaises(IndexError, self.ul2.__getitem__, 2)
self.assertRaises(IndexError, self.ul2.__getitem__, -3)
def test_insert(self):
x = UnspacedList(
[['\n ', 'listen', ' ', '69.50.225.155:9000'],
['\n ', 'listen', ' ', '127.0.0.1'],
['\n ', 'server_name', ' ', '.example.com'],
['\n ', 'server_name', ' ', 'example.*'], '\n',
['listen', ' ', '5001', ' ', 'ssl']])
x.insert(5, "FROGZ")
self.assertEqual(x,
[['listen', '69.50.225.155:9000'], ['listen', '127.0.0.1'],
['server_name', '.example.com'], ['server_name', 'example.*'],
['listen', '5001', 'ssl'], 'FROGZ'])
self.assertEqual(x.spaced,
[['\n ', 'listen', ' ', '69.50.225.155:9000'],
['\n ', 'listen', ' ', '127.0.0.1'],
['\n ', 'server_name', ' ', '.example.com'],
['\n ', 'server_name', ' ', 'example.*'], '\n',
['listen', ' ', '5001', ' ', 'ssl'],
'FROGZ'])
def test_rawlists(self):
ul3 = copy.deepcopy(self.ul)
ul3.insert(0, "some")
ul3.append("why")
ul3.extend(["did", "whether"])
del ul3[2]
self.assertEqual(ul3, ["some", "things", "why", "did", "whether"])
def test_is_dirty(self):
self.assertEqual(False, self.ul2.is_dirty())
ul3 = UnspacedList([])
ul3.append(self.ul)
self.assertEqual(False, self.ul.is_dirty())
self.assertEqual(True, ul3.is_dirty())
ul4 = UnspacedList([[1], [2, 3, 4]])
self.assertEqual(False, ul4.is_dirty())
ul4[1][2] = 5
self.assertEqual(True, ul4.is_dirty())
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
|
import argparse
from . import RedmineCliException
from arguments import Arguments as A
from formatter import BaseFormatter, ListFormatter, ResourceFormatter, UpdateFormatter
from redminelib.resultsets import ResourceSet
BASE_LIST_COMMAND_ARGS = [
A('--limit', type=int, help='Limit', default=100),
A('--offset', type=int, help='Offset'),
A('--order', type=str, help='Order field. field or field:desc', default='id')
]
def int_or_string(value):
return int(value) if value.isdigit() else value
class BaseCommand(object):
formatter_class = BaseFormatter
params_map = {}
def __init__(self, resource):
self.resource = resource
self.redmine = resource.redminecli.redmine
self.config = resource.redminecli.config
def get_formatter(self, *args, **kwargs):
return self.formatter_class(self, *args, **kwargs)
def get_command_params(self):
result = {}
for args_param, command_param in self.params_map.iteritems():
value = self.config.get_arg(args_param)
if value is None:
continue
result[command_param] = value
return result
def get_command_args(self):
return []
def run(self):
formatter = self.get_formatter(orderby=self.config.get_arg('order'))
redmine_resource_name = getattr(self.resource, 'redmine_name', self.resource.name)
redmine_resource = getattr(self.redmine, redmine_resource_name, None)
if not redmine_resource:
raise RedmineCliException('Redmine has no resource %s' % redmine_resource_name)
command_name = getattr(self, 'redmine_name', self.name)
func = getattr(redmine_resource, command_name, None)
if not func or not callable(func):
raise RedmineCliException('Redmine resource %s has no callable %s' % (redmine_resource_name, command_name))
result = func(*self.get_command_args(), **self.get_command_params())
if isinstance(result, ResourceSet):
result = list(result.values(*formatter.values))
formatter.prepare_result(result)
formatter.print_result(result)
class ProjectListCommand(BaseCommand):
formatter_class = ListFormatter
name = 'list'
redmine_name = 'all'
description = 'Projects list'
arguments = BASE_LIST_COMMAND_ARGS
params_map = {
'limit': 'limit',
'offset': 'offset'
}
def assigned_type(value):
if value.isdigit():
return int(value)
if value == 'me':
return value
raise argparse.ArgumentTypeError('%s is not valid value for assigned' % value)
def status_type(value):
if value.isdigit():
return int(value)
if value in ['open', 'closed', '*']:
return value
raise argparse.ArgumentTypeError('%s is not valid value for status' % value)
class IssueListCommand(BaseCommand):
formatter_class = ListFormatter
name = 'list'
redmine_name = 'filter'
description = 'Issue list'
arguments = [
A('--project', type=int_or_string, help='Project id or project identifier'),
A('--query', type=int, help='Query id'),
A('--status', type=status_type, help='Status: open, closed, * or status id'),
A('--assigned', type=assigned_type, help='Assigned to: me or user id'),
A('--tracker', type=assigned_type, help='Tracker id')
] + BASE_LIST_COMMAND_ARGS
params_map = {
'limit': 'limit',
'offset': 'offset',
'order': 'sort',
'project': 'project_id',
'tracker': 'tracker_id',
'query': 'query_id',
'assigned': 'assigned_to_id',
'status': 'status_id'
}
class IssueShowCommand(BaseCommand):
formatter_class = ResourceFormatter
name = 'show'
redmine_name = 'get'
description = 'Show issue details'
arguments = [
A('issue_id', type=int, help='Issue id')
]
def get_command_args(self):
return [self.config.get_arg('issue_id')]
BASE_ISSUE_PROPS_ARGS = [
A('--tracker', type=int, help='Tracker id'),
A('--description', help='Description'),
A('--notes', help='Add journal note'),
A('--private_notes', action='store_true', help='Notes are private'),
A('--status', type=int, help='Status id'),
A('--priority', type=int, help='Priority id'),
A('--category', type=int, help='Category id'),
A('--version', type=int, help='Version id'),
A('--private', action='store_true', help='Issue is private'),
A('--assigned', type=int, help='User id'),
A('--parent_issue', type=int, help='Parent issue id'),
A('--done_ratio', type=int, help='Issue done ratio')
]
BASE_ISSUE_PROPS_MAP = {
'project': 'project_id',
'subject': 'subject',
'tracker': 'tracker_id',
'description': 'description',
'notes': 'notes',
'private_notes': 'private_notes',
'status': 'status_id',
'priority': 'priority_id',
'category': 'category',
'version': 'fixed_version_id',
'private': 'is_private',
'assigned': 'assigned_to_id',
'parent_issue': 'parent_issue_id',
'done_ratio': 'done_ratio'
}
class IssueUpdateCommand(BaseCommand):
formatter_class = UpdateFormatter
name = 'update'
description = 'Update issue'
arguments = [
A('issue_id', type=int, help='Issue id'),
A('--project', type=int, help='Project id'),
A('--subject', help='Subject'),
] + BASE_ISSUE_PROPS_ARGS
def get_command_args(self):
return [self.config.get_arg('issue_id')]
params_map = BASE_ISSUE_PROPS_MAP
class IssueCreateCommand(BaseCommand):
formatter_class = ResourceFormatter
name = 'create'
description = 'Create issue'
arguments = [
A('project', type=int_or_string, help='Project id or identifier'),
A('subject', help='Subject'),
] + BASE_ISSUE_PROPS_ARGS
params_map = BASE_ISSUE_PROPS_MAP
def get_formatter(self, *args, **kwargs):
kwargs.update(base_key='%s_%s' % (self.resource.name, IssueShowCommand.name))
return super(IssueCreateCommand, self).get_formatter(*args, **kwargs)
class UserListCommand(BaseCommand):
formatter_class = ListFormatter
name = 'list'
redmine_name = 'filter'
description = 'User list'
arguments = [
A('--status', type=int, help='User status. 0 - anonymous, 1 - active (default), 2 - registered, 3 - locked'),
A('--name', help='Filter users on their login, firstname, lastname and mail. If the pattern contains a space, it will also return users whose firstname match the first word or lastname match the second word.'),
A('--group', type=int, help='Group id')
] + BASE_LIST_COMMAND_ARGS
params_map = {
'limit': 'limit',
'offset': 'offset',
'status': 'status',
'name': 'name',
'group': 'group_id'
}
class VersionListCommand(BaseCommand):
formatter_class = ListFormatter
name = 'list'
redmine_name = 'filter'
description = 'Version list'
arguments = [
A('project', type=int_or_string, help='Project id or identifier')
] + BASE_LIST_COMMAND_ARGS
params_map = {
'limit': 'limit',
'offset': 'offset',
'project': 'project_id'
}
class IssueStatusList(BaseCommand):
formatter_class = ListFormatter
name = 'list'
redmine_name = 'all'
description = 'Issue status list'
arguments = BASE_LIST_COMMAND_ARGS
params_map = {
'limit': 'limit',
'offset': 'offset',
}
|
|
from array import array
import sys
from py.test import raises
from pyvx import vx
import pyvx
if sys.version_info > (3,):
unicode = str
class TestVX(object):
def test_context(self):
c = vx.CreateContext()
assert vx.GetStatus(vx.reference(c)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(c), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_CONTEXT)
s, v = vx.QueryContext(c, vx.CONTEXT_ATTRIBUTE_VENDOR_ID, 'vx_uint16')
assert s == vx.SUCCESS
assert isinstance(v, int)
s, v = vx.QueryContext(c, vx.CONTEXT_ATTRIBUTE_IMPLEMENTATION, 'vx_char[VX_MAX_IMPLEMENTATION_NAME]', str)
assert s == vx.SUCCESS
assert isinstance(v, unicode)
s = vx.SetContextAttribute(c, vx.CONTEXT_ATTRIBUTE_IMMEDIATE_BORDER_MODE,
vx.border_mode_t(vx.BORDER_MODE_CONSTANT, 42))
assert s == vx.SUCCESS
s, v = vx.QueryContext(c, vx.CONTEXT_ATTRIBUTE_IMMEDIATE_BORDER_MODE, 'vx_border_mode_t')
assert s == vx.SUCCESS
assert v.mode == vx.BORDER_MODE_CONSTANT
assert v.constant_value == 42
vx.Hint(vx.reference(c), vx.HINT_SERIALIZE)
vx.Directive(vx.reference(c), vx.DIRECTIVE_DISABLE_LOGGING)
assert vx.GetContext(vx.reference(c)) == c
vx.RegisterUserStruct(c, 42)
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_image(self):
c = vx.CreateContext()
img = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_RGB)
assert vx.GetStatus(vx.reference(img)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(img), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_IMAGE)
roi = vx.CreateImageFromROI(img, vx.rectangle_t(10, 10, 100, 100))
assert vx.GetStatus(vx.reference(roi)) == vx.SUCCESS
assert vx.ReleaseImage(roi) == vx.SUCCESS
roi = None
const = vx.CreateUniformImage(c, 640, 480, vx.DF_IMAGE_S16, 7, 'vx_int16')
assert vx.GetStatus(vx.reference(const)) == vx.SUCCESS
const = vx.CreateUniformImage(c, 640, 480, vx.DF_IMAGE_RGB, (7, 8, 9), 'vx_uint8[]')
assert vx.GetStatus(vx.reference(const)) == vx.SUCCESS
addr = vx.imagepatch_addressing_t(640, 480, 1, 640, vx.SCALE_UNITY, vx.SCALE_UNITY, 1, 1)
data = array('B', [0x42]) * (640 * 480)
hand = vx.CreateImageFromHandle(c, vx.DF_IMAGE_U8, (addr,), (data,), vx.IMPORT_TYPE_HOST)
assert vx.GetStatus(vx.reference(hand)) == vx.SUCCESS
hand = vx.CreateImageFromHandle(c, vx.DF_IMAGE_U8, addr, data, vx.IMPORT_TYPE_HOST)
assert vx.GetStatus(vx.reference(hand)) == vx.SUCCESS
hand = vx.CreateImageFromHandle(c, vx.DF_IMAGE_RGB, (addr, addr, addr), (data, data, data), vx.IMPORT_TYPE_HOST)
assert vx.GetStatus(vx.reference(hand)) == vx.SUCCESS
assert vx.QueryImage(img, vx.IMAGE_ATTRIBUTE_WIDTH, 'vx_uint32') == (vx.SUCCESS, 640)
assert vx.SetImageAttribute(img, vx.IMAGE_ATTRIBUTE_SPACE, vx.COLOR_SPACE_BT601_525, 'vx_enum') == vx.SUCCESS
assert vx.QueryImage(img, vx.IMAGE_ATTRIBUTE_SPACE, 'vx_enum') == (vx.SUCCESS, vx.COLOR_SPACE_BT601_525)
assert vx.GetContext(vx.reference(img)) == c
r = vx.rectangle_t(10, 20, 30, 40)
s = vx.ComputeImagePatchSize(img, r, 0)
status, addr, ptr = vx.AccessImagePatch(img, r, 0, None, None, vx.READ_AND_WRITE)
assert status == vx.SUCCESS
assert addr.dim_x == addr.dim_y == 20
ptr[0] = b'H'
assert vx.CommitImagePatch(img, r, 0, addr, ptr) == vx.SUCCESS
status, addr, ptr = vx.AccessImagePatch(img, r, 0, None, None, vx.READ_AND_WRITE)
assert status == vx.SUCCESS
assert ptr[0] == b'H'
pixel = vx.FormatImagePatchAddress1d(ptr, 0, addr)
assert pixel[0] == b'H'
assert vx.CommitImagePatch(img, r, 0, addr, ptr) == vx.SUCCESS
assert 7 not in data
addr = vx.imagepatch_addressing_t(20, 20, 1, 20, vx.SCALE_UNITY, vx.SCALE_UNITY, 1, 1)
rdata = array('B', [0]) * (20 * 20)
status, addr, ptr = vx.AccessImagePatch(hand, r, 0, addr, rdata, vx.READ_AND_WRITE)
assert rdata[1] == 0x42
rdata[1] = 7
pixel = vx.FormatImagePatchAddress1d(ptr, 1, addr)
assert pixel[0] == b'\x07'
pixel = vx.FormatImagePatchAddress2d(ptr, 0, 0, addr)
assert pixel[0] == b'\x42'
assert vx.CommitImagePatch(hand, r, 0, addr, ptr) == vx.SUCCESS
assert data[11 + 20*640] == 7
status, r = vx.GetValidRegionImage(const)
assert status == vx.SUCCESS
assert r.end_x == 640
assert r.end_y == 480
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_kernel(self):
c = vx.CreateContext()
kernel = vx.GetKernelByName(c, b"org.khronos.openvx.sobel_3x3")
assert vx.GetStatus(vx.reference(kernel)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(kernel), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_KERNEL)
k = vx.GetKernelByEnum(c, vx.KERNEL_SOBEL_3x3)
assert kernel == k
s, i = vx.QueryKernel(kernel, vx.KERNEL_ATTRIBUTE_ENUM, 'vx_enum')
assert i == vx.KERNEL_SOBEL_3x3
vx.ReleaseKernel(k)
param = vx.GetKernelParameterByIndex(kernel, 0)
assert vx.GetStatus(vx.reference(param)) == vx.SUCCESS
assert vx.LoadKernels(c, "openvx-extras") == vx.SUCCESS
k = vx.GetKernelByName(c, b"org.khronos.extra.edge_trace")
assert vx.GetStatus(vx.reference(k)) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_graph(self):
c = vx.CreateContext()
g = vx.CreateGraph(c)
assert vx.GetStatus(vx.reference(g)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(g), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_GRAPH)
assert vx.IsGraphVerified(g) == vx.false_e
assert vx.QueryGraph(g, vx.GRAPH_ATTRIBUTE_NUMNODES, 'vx_uint32') == (vx.SUCCESS, 0)
img = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_U8)
dx = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_S16)
dy = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_S16)
node = vx.Sobel3x3Node(g, img, dx, dy)
assert vx.VerifyGraph(g) == vx.SUCCESS
assert vx.ProcessGraph(g) == vx.SUCCESS
assert vx.ScheduleGraph(g) == vx.SUCCESS
assert vx.WaitGraph(g) == vx.SUCCESS
p = vx.GetParameterByIndex(node, 0)
assert vx.AddParameterToGraph(g, p) == vx.SUCCESS
p2 = vx.GetGraphParameterByIndex(g, 0)
assert vx.SetGraphParameterByIndex(g, 0, vx.reference(dx)) == vx.SUCCESS
assert vx.VerifyGraph(g) != vx.SUCCESS
assert vx.SetGraphParameterByIndex(g, 0, vx.reference(img)) == vx.SUCCESS
assert vx.VerifyGraph(g) == vx.SUCCESS
assert vx.IsGraphVerified(g) == vx.true_e
def callback(node):
callback.called = True
return vx.SUCCESS
assert vx.AssignNodeCallback(node, callback) == vx.SUCCESS
assert vx.ProcessGraph(g) == vx.SUCCESS
assert callback.called
def callback(node):
raise TypeError('Escaping from callback')
return vx.SUCCESS
assert vx.AssignNodeCallback(node, callback) != vx.SUCCESS
assert vx.AssignNodeCallback(node, None) == vx.SUCCESS
assert vx.AssignNodeCallback(node, callback) == vx.SUCCESS
assert vx.VerifyGraph(g) == vx.SUCCESS
assert vx.ProcessGraph(g) != vx.SUCCESS
img = vx.CreateVirtualImage(g, 640, 480, vx.DF_IMAGE_RGB)
assert vx.GetStatus(vx.reference(img)) == vx.SUCCESS
assert vx.ReleaseGraph(g) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_node(self):
c = vx.CreateContext()
g = vx.CreateGraph(c)
k = vx.GetKernelByEnum(c, vx.KERNEL_SOBEL_3x3)
assert vx.GetStatus(vx.reference(k)) == vx.SUCCESS
node = vx.CreateGenericNode(g, k)
assert vx.GetStatus(vx.reference(node)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(node), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_NODE)
s = vx.SetNodeAttribute(node, vx.NODE_ATTRIBUTE_BORDER_MODE,
vx.border_mode_t(vx.BORDER_MODE_CONSTANT, 42))
assert s == vx.SUCCESS
s, v = vx.QueryNode(node, vx.NODE_ATTRIBUTE_BORDER_MODE, 'vx_border_mode_t')
assert v.mode == vx.BORDER_MODE_CONSTANT
assert v.constant_value == 42
assert vx.ReleaseNode(node) == vx.SUCCESS
node = vx.CreateGenericNode(g, k)
assert vx.RemoveNode(node) == vx.SUCCESS
assert vx.ReleaseGraph(g) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_parameter(self):
c = vx.CreateContext()
g = vx.CreateGraph(c)
img = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_U8)
dx = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_S16)
dy = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_S16)
node = vx.Sobel3x3Node(g, img, dx, dy)
param = vx.GetParameterByIndex(node, 0)
assert vx.GetStatus(vx.reference(param)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(param), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_PARAMETER)
s, v = vx.QueryParameter(param, vx.PARAMETER_ATTRIBUTE_REF, "vx_reference")
assert s == vx.SUCCESS
assert v == img
assert vx.SetParameterByIndex(node, 0, vx.reference(dx)) == vx.SUCCESS
s, v = vx.QueryParameter(param, vx.PARAMETER_ATTRIBUTE_REF, "vx_reference")
assert s == vx.SUCCESS
assert v == dx
assert vx.SetParameterByReference(param, vx.reference(dy)) == vx.SUCCESS
s, v = vx.QueryParameter(param, vx.PARAMETER_ATTRIBUTE_REF, "vx_reference")
assert s == vx.SUCCESS
assert v == dy
assert vx.ReleaseParameter(param) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_scalar(self):
c = vx.CreateContext()
scalar = vx.CreateScalar(c, vx.TYPE_INT16, 7)
assert vx.GetStatus(vx.reference(scalar)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(scalar), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_SCALAR)
assert vx.QueryScalar(scalar, vx.SCALAR_ATTRIBUTE_TYPE, "vx_enum") == (vx.SUCCESS, vx.TYPE_INT16)
assert vx.ReadScalarValue(scalar) == (vx.SUCCESS, 7)
assert vx.WriteScalarValue(scalar, 42) == vx.SUCCESS
assert vx.ReadScalarValue(scalar) == (vx.SUCCESS, 42)
assert vx.ReleaseScalar(scalar) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_reference(self):
with raises(TypeError):
vx.reference(vx.imagepatch_addressing_t())
def test_delay(self):
c = vx.CreateContext()
img = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_RGB)
delay = vx.CreateDelay(c, vx.reference(img), 3)
assert vx.GetStatus(vx.reference(delay)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(delay), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_DELAY)
assert vx.QueryDelay(delay, vx.DELAY_ATTRIBUTE_SLOTS, 'vx_size') == (vx.SUCCESS, 3)
ref0 = vx.GetReferenceFromDelay(delay, 0)
ref1 = vx.GetReferenceFromDelay(delay, 1)
ref2 = vx.GetReferenceFromDelay(delay, 2)
g = vx.CreateGraph(c)
node = vx.Sobel3x3Node(g, vx.from_reference(ref0), vx.from_reference(ref1), vx.from_reference(ref2))
param = vx.GetParameterByIndex(node, 1)
s, v = vx.QueryParameter(param, vx.PARAMETER_ATTRIBUTE_REF, "vx_reference")
assert s == vx.SUCCESS
assert v == ref1
assert vx.AgeDelay(delay) == vx.SUCCESS
s, v = vx.QueryParameter(param, vx.PARAMETER_ATTRIBUTE_REF, "vx_reference")
assert s == vx.SUCCESS
assert v == ref0
assert vx.ReleaseDelay(delay) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_log(self):
c = vx.CreateContext()
def callback(context, ref, status, string):
assert status == vx.FAILURE
assert string == b"Test"
callback.called = True
vx.RegisterLogCallback(c, callback, vx.false_e)
vx.AddLogEntry(vx.reference(c), vx.FAILURE, b"Test")
assert callback.called
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_lut(self):
c = vx.CreateContext()
lut = vx.CreateLUT(c, vx.TYPE_UINT8, 256)
assert vx.GetStatus(vx.reference(lut)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(lut), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_LUT)
assert vx.QueryLUT(lut, vx.LUT_ATTRIBUTE_COUNT, 'vx_size') == (vx.SUCCESS, 256)
s, data = vx.AccessLUT(lut, None, vx.READ_AND_WRITE)
assert s == vx.SUCCESS
data[1] = b'H'
assert vx.CommitLUT(lut, data) == vx.SUCCESS
s, data = vx.AccessLUT(lut, None, vx.READ_ONLY)
assert data[1] == b'H'
assert vx.CommitLUT(lut, data) == vx.SUCCESS
data = array('B', [0]) * 256
vx.AccessLUT(lut, data, vx.READ_ONLY)
assert data[1] == ord('H')
assert vx.CommitLUT(lut, data) == vx.SUCCESS
assert vx.ReleaseLUT(lut) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_distribution(self):
c = vx.CreateContext()
distribution = vx.CreateDistribution(c, 8, 0, 16)
assert vx.GetStatus(vx.reference(distribution)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(distribution), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_DISTRIBUTION)
assert vx.QueryDistribution(distribution, vx.DISTRIBUTION_ATTRIBUTE_BINS, 'vx_size') == (vx.SUCCESS, 8)
s, data = vx.AccessDistribution(distribution, None, vx.READ_AND_WRITE)
assert s == vx.SUCCESS
data[:4] = b'HHHH'
assert vx.CommitDistribution(distribution, data) == vx.SUCCESS
s, data = vx.AccessDistribution(distribution, None, vx.READ_ONLY)
assert data[:4] == b'HHHH'
assert vx.CommitDistribution(distribution, data) == vx.SUCCESS
data = array('I', [0]) * 256
vx.AccessDistribution(distribution, data, vx.READ_ONLY)
assert data[0] == sum(ord('H') * i for i in [2**24 + 2**16 + 2**8 + 1])
assert vx.CommitDistribution(distribution, data) == vx.SUCCESS
assert vx.ReleaseDistribution(distribution) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_threshold(self):
c = vx.CreateContext()
threshold = vx.CreateThreshold(c, vx.THRESHOLD_TYPE_BINARY, vx.TYPE_UINT8)
assert vx.GetStatus(vx.reference(threshold)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(threshold), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_THRESHOLD)
assert vx.QueryThreshold(threshold, vx.THRESHOLD_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.THRESHOLD_TYPE_BINARY)
assert vx.SetThresholdAttribute(threshold, vx.THRESHOLD_ATTRIBUTE_THRESHOLD_VALUE, 7, 'vx_int32') == vx.SUCCESS
assert vx.QueryThreshold(threshold, vx.THRESHOLD_ATTRIBUTE_THRESHOLD_VALUE, 'vx_int32') == (vx.SUCCESS, 7)
assert vx.ReleaseThreshold(threshold) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_matrix(self):
c = vx.CreateContext()
matrix = vx.CreateMatrix(c, vx.TYPE_INT32, 3, 3)
assert vx.GetStatus(vx.reference(matrix)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(matrix), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_MATRIX)
assert vx.QueryMatrix(matrix, vx.MATRIX_ATTRIBUTE_COLUMNS, 'vx_size') == (vx.SUCCESS, 3)
data = array('i', [42]) * 9
assert vx.WriteMatrix(matrix, data) == vx.SUCCESS
data = array('i', [0]) * 9
assert vx.ReadMatrix(matrix, data) == vx.SUCCESS
assert data[0] == 42
assert vx.ReleaseMatrix(matrix) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_convolution(self):
c = vx.CreateContext()
convolution = vx.CreateConvolution(c, 3, 3)
assert vx.GetStatus(vx.reference(convolution)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(convolution), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_CONVOLUTION)
assert vx.QueryConvolution(convolution, vx.CONVOLUTION_ATTRIBUTE_ROWS, 'vx_size') == (vx.SUCCESS, 3)
data = array('i', [42]) * 9
assert vx.WriteConvolutionCoefficients(convolution, data) == vx.SUCCESS
data = array('i', [0]) * 9
assert vx.ReadConvolutionCoefficients(convolution, data) == vx.SUCCESS
assert data[0] == 42
assert vx.SetConvolutionAttribute(convolution, vx.CONVOLUTION_ATTRIBUTE_SCALE, 8, 'vx_uint32') == vx.SUCCESS
assert vx.QueryConvolution(convolution, vx.CONVOLUTION_ATTRIBUTE_SCALE, 'vx_uint32') == (vx.SUCCESS, 8)
assert vx.ReleaseConvolution(convolution) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_pyramid(self):
c = vx.CreateContext()
pyramid = vx.CreatePyramid(c, 4, vx.SCALE_PYRAMID_HALF, 640, 480, vx.DF_IMAGE_U8)
assert vx.GetStatus(vx.reference(pyramid)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(pyramid), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_PYRAMID)
assert vx.QueryPyramid(pyramid, vx.PYRAMID_ATTRIBUTE_WIDTH, 'vx_uint32') == (vx.SUCCESS, 640)
img = vx.GetPyramidLevel(pyramid, 1)
assert img
assert vx.QueryImage(img, vx.IMAGE_ATTRIBUTE_WIDTH, 'vx_uint32') == (vx.SUCCESS, 320)
g = vx.CreateGraph(c)
p = vx.CreateVirtualPyramid(g, 3, vx.SCALE_PYRAMID_HALF, 0, 0, vx.DF_IMAGE_VIRT)
assert vx.ReleasePyramid(p) == vx.SUCCESS
assert vx.ReleasePyramid(pyramid) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_remap(self):
c = vx.CreateContext()
remap = vx.CreateRemap(c, 640, 480, 320, 240)
assert vx.GetStatus(vx.reference(remap)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(remap), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_REMAP)
assert vx.QueryRemap(remap, vx.REMAP_ATTRIBUTE_DESTINATION_HEIGHT, 'vx_uint32') == (vx.SUCCESS, 240)
assert vx.SetRemapPoint(remap, 10, 15, 20.5, 30.5) == vx.SUCCESS
assert vx.GetRemapPoint(remap, 10, 15) == (vx.SUCCESS, 20.5, 30.5)
assert vx.ReleaseRemap(remap) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_array(self):
c = vx.CreateContext()
arr = vx.CreateArray(c, vx.TYPE_COORDINATES2D, 64)
assert vx.GetStatus(vx.reference(arr)) == vx.SUCCESS
assert vx.QueryReference(vx.reference(arr), vx.REF_ATTRIBUTE_TYPE, 'vx_enum') == (vx.SUCCESS, vx.TYPE_ARRAY)
assert vx.QueryArray(arr, vx.ARRAY_ATTRIBUTE_CAPACITY, 'vx_size') == (vx.SUCCESS, 64)
s, size = vx.QueryArray(arr, vx.ARRAY_ATTRIBUTE_ITEMSIZE, 'vx_size')
data = array('B', [0]) * size * 10
d = vx.ArrayItem('vx_coordinates2d_t', data, 0, size)
d.x, d.y, d[1].x, d[1].y = 1, 2, 3, 4
assert vx.AddArrayItems(arr, 10, data, size) == vx.SUCCESS
assert vx.AddArrayItems(arr, 10, d, size) == vx.SUCCESS
assert vx.QueryArray(arr, vx.ARRAY_ATTRIBUTE_NUMITEMS, 'vx_size') == (vx.SUCCESS, 20)
assert vx.TruncateArray(arr, 15) == vx.SUCCESS
assert vx.QueryArray(arr, vx.ARRAY_ATTRIBUTE_NUMITEMS, 'vx_size') == (vx.SUCCESS, 15)
status, stride, ptr = vx.AccessArrayRange(arr, 0, 14, None, None, vx.READ_AND_WRITE)
assert status == vx.SUCCESS
d0 = vx.ArrayItem('vx_coordinates2d_t', ptr, 0, stride)
d1 = vx.ArrayItem('vx_coordinates2d_t', ptr, 1, stride)
assert (d0.x, d0.y, d1.x, d1.y) == (1, 2, 3, 4)
d1.y = 42
assert vx.CommitArrayRange(arr, 0, 14, ptr) == vx.SUCCESS
data = array('B', [0]) * size * 15 * 2
status, stride, ptr = vx.AccessArrayRange(arr, 0, 14, size*2, data, vx.READ_AND_WRITE)
assert stride == size*2
d = vx.ArrayItem('vx_coordinates2d_t', data, 0, size)
assert (d.x, d.y, d[2].x, d[2].y) == (1, 2, 3, 42)
assert vx.CommitArrayRange(arr, 0, 14, ptr) == vx.SUCCESS
g = vx.CreateGraph(c)
a = vx.CreateVirtualArray(g, vx.TYPE_KEYPOINT, 64)
assert vx.ReleaseArray(a) == vx.SUCCESS
assert vx.ReleaseArray(arr) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_user_kernel(self):
c = vx.CreateContext()
def func(node, parameters, num):
func.called = True
assert num == 2
input, output = vx.from_reference(parameters[0]), vx.from_reference(parameters[0])
_, r = vx.GetValidRegionImage(input)
_, addr, indata = vx.AccessImagePatch(input, r, 0, None, None, vx.READ_ONLY)
_, addr, outdata = vx.AccessImagePatch(output, r, 0, None, None, vx.WRITE_ONLY)
outdata[0], outdata[100] = indata[100], indata[0]
vx.CommitImagePatch(output, r, 0, addr, outdata)
vx.CommitImagePatch(input, r, 0, addr, indata)
return vx.SUCCESS
def validate_input(node, index):
validate_input.called = True
assert index == 0
param = vx.GetParameterByIndex(node, index)
image = vx.QueryParameter(param, vx.PARAMETER_ATTRIBUTE_REF, 'vx_image')[1]
if vx.QueryImage(image, vx.IMAGE_ATTRIBUTE_FORMAT, 'vx_df_image') == (vx.SUCCESS, vx.DF_IMAGE_U8):
s = vx.SUCCESS
else:
s = vx.ERROR_INVALID_VALUE
vx.ReleaseImage(image)
vx.ReleaseParameter(param)
return s
def validate_output(node, index, meta):
validate_output.called = True
assert index == 1
param0 = vx.GetParameterByIndex(node, 0)
input = vx.QueryParameter(param0, vx.PARAMETER_ATTRIBUTE_REF, 'vx_image')[1]
param1 = vx.GetParameterByIndex(node, index)
output = vx.QueryParameter(param1, vx.PARAMETER_ATTRIBUTE_REF, 'vx_image')[1]
_, width = vx.QueryImage(input, vx.IMAGE_ATTRIBUTE_WIDTH, 'vx_uint32')
_, height = vx.QueryImage(input, vx.IMAGE_ATTRIBUTE_HEIGHT, 'vx_uint32')
vx.SetMetaFormatAttribute(meta, vx.IMAGE_ATTRIBUTE_WIDTH, width, 'vx_uint32')
vx.SetMetaFormatAttribute(meta, vx.IMAGE_ATTRIBUTE_HEIGHT, height, 'vx_uint32')
vx.SetMetaFormatAttribute(meta, vx.IMAGE_ATTRIBUTE_FORMAT, vx.DF_IMAGE_U8, 'vx_df_image')
vx.ReleaseImage(input)
vx.ReleaseImage(output)
vx.ReleaseParameter(param0)
vx.ReleaseParameter(param1)
return vx.SUCCESS
enum = vx.KERNEL_BASE(vx.ID_DEFAULT, 7) + 1
kernel = vx.AddKernel(c, b"org.test.hello", enum, func, 2, validate_input, validate_output, None, None)
assert vx.GetStatus(vx.reference(kernel)) == vx.SUCCESS
assert vx.AddParameterToKernel(kernel, 0, vx.INPUT, vx.TYPE_IMAGE, vx.PARAMETER_STATE_REQUIRED) == vx.SUCCESS
assert vx.AddParameterToKernel(kernel, 1, vx.OUTPUT, vx.TYPE_IMAGE, vx.PARAMETER_STATE_REQUIRED) == vx.SUCCESS
assert vx.SetKernelAttribute(kernel, vx.KERNEL_ATTRIBUTE_LOCAL_DATA_SIZE, 42, 'vx_size') == vx.SUCCESS
assert vx.FinalizeKernel(kernel) == vx.SUCCESS
g = vx.CreateGraph(c)
img = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_U8)
_, r = vx.GetValidRegionImage(img)
assert r.start_x == 0
assert r.end_x == 640
_, addr, data = vx.AccessImagePatch(img, r, 0, None, None, vx.WRITE_ONLY)
data[0], data[100] = b'H', b'I'
assert vx.CommitImagePatch(img, r, 0, addr, data) == vx.SUCCESS
virt = vx.CreateVirtualImage(g, 0, 0, vx.DF_IMAGE_VIRT)
# virt = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_U8)
node = vx.CreateGenericNode(g, kernel)
vx.SetParameterByIndex(node, 0, vx.reference(img))
vx.SetParameterByIndex(node, 1, vx.reference(virt))
assert vx.VerifyGraph(g) == vx.SUCCESS
assert validate_input.called
assert validate_output.called
assert vx.QueryImage(virt, vx.IMAGE_ATTRIBUTE_WIDTH, 'vx_uint32') == (vx.SUCCESS, 640)
assert vx.QueryImage(virt, vx.IMAGE_ATTRIBUTE_HEIGHT, 'vx_uint32') == (vx.SUCCESS, 480)
assert vx.ProcessGraph(g) == vx.SUCCESS
assert func.called == True
_, addr, data = vx.AccessImagePatch(virt, r, 0, None, None, vx.READ_ONLY)
assert data[0], data[100] == 'IH'
vx.CommitImagePatch(virt, r, 0, addr, data)
img2 = vx.CreateImage(c, 640, 480, vx.DF_IMAGE_U16)
vx.SetParameterByIndex(node, 0, vx.reference(img2))
assert vx.VerifyGraph(g) == vx.ERROR_INVALID_VALUE
enum = vx.KERNEL_BASE(vx.ID_DEFAULT, 7) + 2
kernel = vx.AddKernel(c, b"org.test.hello2", enum, func, 2, validate_input, validate_output, None, None)
assert vx.RemoveKernel(kernel) == vx.SUCCESS
assert vx.ReleaseGraph(g) == vx.SUCCESS
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_module(self):
c = vx.CreateContext()
assert vx.LoadKernels(c, "test.module") == vx.SUCCESS
kernel = vx.GetKernelByName(c, b"org.test.module")
assert vx.GetStatus(vx.reference(kernel)) == vx.SUCCESS
assert vx.QueryKernel(kernel, vx.KERNEL_ATTRIBUTE_PARAMETERS, 'vx_uint32') == (vx.SUCCESS, 1)
assert vx.ReleaseContext(c) == vx.SUCCESS
def test_use_backend(self):
for l in range(2):
pyvx.use_backend("mock_backend")
from pyvx import vx
assert vx.CreateContext() == 42
from test import mock_backend2
pyvx.use_backend(mock_backend2)
from pyvx import vx
assert vx.CreateContext() == 7
with raises(ImportError):
pyvx.use_backend("none_excistant")
pyvx.use_backend("_default")
from pyvx import vx
assert not isinstance(vx.CreateContext(), int)
|
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import pkg_resources
import sys
import tempfile
from pkg_resources import (
Distribution,
DistributionNotFound,
EggMetadata)
from twitter.common.lang import Compatibility
from twitter.common.dirutil import chmod_plus_x
from twitter.common.dirutil.chroot import Chroot
from twitter.common.python.importer import EggZipImporter
from twitter.common.python.interpreter import PythonIdentity
from twitter.common.python.marshaller import CodeMarshaller
from twitter.common.python.pex_info import PexInfo
from twitter.common.python.pex import PEX
from twitter.common.python.util import DistributionHelper
BOOTSTRAP_ENVIRONMENT = b"""
import os
import sys
__entry_point__ = None
if '__file__' in locals() and __file__ is not None:
__entry_point__ = os.path.dirname(__file__)
elif '__loader__' in locals():
from zipimport import zipimporter
from pkgutil import ImpLoader
#if isinstance(__loader__, (builtin_zipimport.zipimporter, EggZipImporter)):
if hasattr(__loader__, 'archive'):
__entry_point__ = __loader__.archive
elif isinstance(__loader__, ImpLoader):
__entry_point__ = os.path.dirname(__loader__.get_filename())
if __entry_point__ is None:
sys.stderr.write('Could not launch python executable!\\n')
sys.exit(2)
sys.path[0] = os.path.abspath(sys.path[0])
sys.path.insert(0, os.path.abspath(os.path.join(__entry_point__, '.bootstrap')))
from twitter.common.python.importer import monkeypatch
monkeypatch()
del monkeypatch
from twitter.common.python.pex import PEX
PEX(__entry_point__).execute()
"""
class PEXBuilder(object):
class InvalidDependency(Exception): pass
class InvalidExecutableSpecification(Exception): pass
DEPENDENCY_DIR = ".deps"
BOOTSTRAP_DIR = ".bootstrap"
def __init__(self, path=None):
self._chroot = Chroot(path or tempfile.mkdtemp())
self._pex_info = PexInfo.default()
self._frozen = False
def chroot(self):
return self._chroot
def path(self):
return self.chroot().path()
def info(self):
return self._pex_info
def add_source(self, filename, env_filename):
self._chroot.link(filename, env_filename, "source")
if filename.endswith('.py'):
env_filename_pyc = os.path.splitext(env_filename)[0] + '.pyc'
# with PEX.timed('Compiling %s' % env_filename_pyc):
with open(filename) as fp:
pyc_object = CodeMarshaller.from_py(fp.read(), env_filename)
self._chroot.write(pyc_object.to_pyc(), env_filename_pyc, 'source')
def add_resource(self, filename, env_filename):
self._chroot.link(filename, env_filename, "resource")
def add_requirement(self, req, dynamic=False, repo=None):
self._pex_info.add_requirement(req, repo=repo, dynamic=dynamic)
def add_dependency_file(self, filename, env_filename):
# TODO(wickman) This is broken. The build cache abstraction just breaks down here.
if filename.endswith('.egg'):
self.add_egg(filename)
else:
self._chroot.link(filename, os.path.join(PEXBuilder.DEPENDENCY_DIR, env_filename))
def add_egg(self, egg):
"""
helper for add_distribution
"""
metadata = EggMetadata(EggZipImporter(egg))
dist = Distribution.from_filename(egg, metadata)
self.add_distribution(dist)
self.add_requirement(dist.as_requirement(), dynamic=False, repo=None)
def add_distribution(self, dist):
if not dist.location.endswith('.egg'):
raise PEXBuilder.InvalidDependency('Non-egg dependencies not yet supported.')
self._chroot.link(dist.location,
os.path.join(PEXBuilder.DEPENDENCY_DIR, os.path.basename(dist.location)))
def set_executable(self, filename, env_filename=None):
if env_filename is None:
env_filename = os.path.basename(filename)
if self._chroot.get("executable"):
raise PEXBuilder.InvalidExecutableSpecification(
"Setting executable on a PEXBuilder that already has one!")
self._chroot.link(filename, env_filename, "executable")
entry_point = env_filename
entry_point.replace(os.path.sep, '.')
self._pex_info.entry_point = entry_point.rpartition('.')[0]
def _prepare_inits(self):
relative_digest = self._chroot.get("source")
init_digest = set()
for path in relative_digest:
split_path = path.split(os.path.sep)
for k in range(1, len(split_path)):
sub_path = os.path.sep.join(split_path[0:k] + ['__init__.py'])
if sub_path not in relative_digest and sub_path not in init_digest:
self._chroot.touch(sub_path)
init_digest.add(sub_path)
def _prepare_manifest(self):
self._chroot.write(self._pex_info.dump().encode('utf-8'), PexInfo.PATH, label='manifest')
def _prepare_main(self):
self._chroot.write(BOOTSTRAP_ENVIRONMENT, '__main__.py', label='main')
def _prepare_bootstrap(self):
"""
Write enough of distribute and pip into the .pex .bootstrap directory so that
we can be fully self-contained.
"""
bare_env = pkg_resources.Environment()
pip_req = pkg_resources.Requirement.parse('pip>=1.1')
distribute_req = pkg_resources.Requirement.parse('distribute>=0.6.24')
pip_dist = distribute_dist = None
for dist in DistributionHelper.all_distributions(sys.path):
if dist in pip_req and bare_env.can_add(dist):
pip_dist = dist
if dist in distribute_req and bare_env.can_add(dist):
distribute_dist = dist
if pip_dist and distribute_dist:
break
if not pip_dist:
raise DistributionNotFound('Could not find pip!')
if not distribute_dist:
raise DistributionNotFound('Could not find distribute!')
PEX.debug('Writing .bootstrap library.')
for fn, content in DistributionHelper.walk_data(pip_dist):
if fn.startswith('pip/'):
# PEX.debug('BOOTSTRAP: Writing %s' % fn)
self._chroot.write(content, os.path.join(self.BOOTSTRAP_DIR, fn), 'resource')
for fn, content in DistributionHelper.walk_data(distribute_dist):
if fn.startswith('pkg_resources.py') or fn.startswith('setuptools'):
# PEX.debug('BOOTSTRAP: Writing %s' % fn)
self._chroot.write(content, os.path.join(self.BOOTSTRAP_DIR, fn), 'resource')
libraries = (
'twitter.common.dirutil',
'twitter.common.collections',
'twitter.common.contextutil',
'twitter.common.lang',
'twitter.common.python'
)
for name in libraries:
dirname = name.replace('.', '/')
provider = pkg_resources.get_provider(name)
if not isinstance(provider, pkg_resources.DefaultProvider):
mod = __import__(name, fromlist=['wutttt'])
provider = pkg_resources.ZipProvider(mod)
for fn in provider.resource_listdir(''):
if fn.endswith('.py'):
# PEX.debug('BOOTSTRAP: Writing %s' % os.path.join(dirname, fn))
self._chroot.write(provider.get_resource_string(name, fn),
os.path.join(self.BOOTSTRAP_DIR, dirname, fn), 'resource')
for initdir in ('twitter', 'twitter/common'):
self._chroot.write(
b"__import__('pkg_resources').declare_namespace(__name__)",
os.path.join(self.BOOTSTRAP_DIR, initdir, '__init__.py'),
'resource')
def freeze(self):
if self._frozen:
return
self._prepare_inits()
self._prepare_manifest()
self._prepare_bootstrap()
self._prepare_main()
self._frozen = True
def build(self, filename):
self.freeze()
try:
os.unlink(filename + '~')
print('WARNING: Previous binary unexpectedly exists, cleaning: %s' % (filename + '~'))
except OSError:
# The expectation is that the file does not exist, so continue
pass
with open(filename + '~', 'ab') as pexfile:
assert os.path.getsize(pexfile.name) == 0
# TODO(wickman) Make this tunable
pexfile.write(Compatibility.to_bytes('%s\n' % PythonIdentity.get().hashbang()))
self._chroot.zip(filename + '~', mode='a')
if os.path.exists(filename):
os.unlink(filename)
os.rename(filename + '~', filename)
chmod_plus_x(filename)
|
|
"""
homeassistant.components.api
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides a Rest API for Home Assistant.
"""
import re
import logging
import threading
import json
import homeassistant as ha
from homeassistant.helpers.state import TrackStates
import homeassistant.remote as rem
from homeassistant.const import (
URL_API, URL_API_STATES, URL_API_EVENTS, URL_API_SERVICES, URL_API_STREAM,
URL_API_EVENT_FORWARD, URL_API_STATES_ENTITY, URL_API_COMPONENTS,
URL_API_CONFIG, URL_API_BOOTSTRAP,
EVENT_TIME_CHANGED, EVENT_HOMEASSISTANT_STOP, MATCH_ALL,
HTTP_OK, HTTP_CREATED, HTTP_BAD_REQUEST, HTTP_NOT_FOUND,
HTTP_UNPROCESSABLE_ENTITY)
DOMAIN = 'api'
DEPENDENCIES = ['http']
STREAM_PING_PAYLOAD = "ping"
STREAM_PING_INTERVAL = 50 # seconds
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
""" Register the API with the HTTP interface. """
if 'http' not in hass.config.components:
_LOGGER.error('Dependency http is not loaded')
return False
# /api - for validation purposes
hass.http.register_path('GET', URL_API, _handle_get_api)
# /api/stream
hass.http.register_path('GET', URL_API_STREAM, _handle_get_api_stream)
# /api/config
hass.http.register_path('GET', URL_API_CONFIG, _handle_get_api_config)
# /api/bootstrap
hass.http.register_path(
'GET', URL_API_BOOTSTRAP, _handle_get_api_bootstrap)
# /states
hass.http.register_path('GET', URL_API_STATES, _handle_get_api_states)
hass.http.register_path(
'GET', re.compile(r'/api/states/(?P<entity_id>[a-zA-Z\._0-9]+)'),
_handle_get_api_states_entity)
hass.http.register_path(
'POST', re.compile(r'/api/states/(?P<entity_id>[a-zA-Z\._0-9]+)'),
_handle_post_state_entity)
hass.http.register_path(
'PUT', re.compile(r'/api/states/(?P<entity_id>[a-zA-Z\._0-9]+)'),
_handle_post_state_entity)
# /events
hass.http.register_path('GET', URL_API_EVENTS, _handle_get_api_events)
hass.http.register_path(
'POST', re.compile(r'/api/events/(?P<event_type>[a-zA-Z\._0-9]+)'),
_handle_api_post_events_event)
# /services
hass.http.register_path('GET', URL_API_SERVICES, _handle_get_api_services)
hass.http.register_path(
'POST',
re.compile((r'/api/services/'
r'(?P<domain>[a-zA-Z\._0-9]+)/'
r'(?P<service>[a-zA-Z\._0-9]+)')),
_handle_post_api_services_domain_service)
# /event_forwarding
hass.http.register_path(
'POST', URL_API_EVENT_FORWARD, _handle_post_api_event_forward)
hass.http.register_path(
'DELETE', URL_API_EVENT_FORWARD, _handle_delete_api_event_forward)
# /components
hass.http.register_path(
'GET', URL_API_COMPONENTS, _handle_get_api_components)
return True
def _handle_get_api(handler, path_match, data):
""" Renders the debug interface. """
handler.write_json_message("API running.")
def _handle_get_api_stream(handler, path_match, data):
""" Provide a streaming interface for the event bus. """
gracefully_closed = False
hass = handler.server.hass
wfile = handler.wfile
write_lock = threading.Lock()
block = threading.Event()
def write_message(payload):
""" Writes a message to the output. """
with write_lock:
msg = "data: {}\n\n".format(payload)
try:
wfile.write(msg.encode("UTF-8"))
wfile.flush()
except IOError:
block.set()
def forward_events(event):
""" Forwards events to the open request. """
nonlocal gracefully_closed
if block.is_set() or event.event_type == EVENT_TIME_CHANGED:
return
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
gracefully_closed = True
block.set()
return
write_message(json.dumps(event, cls=rem.JSONEncoder))
handler.send_response(HTTP_OK)
handler.send_header('Content-type', 'text/event-stream')
handler.end_headers()
hass.bus.listen(MATCH_ALL, forward_events)
while True:
write_message(STREAM_PING_PAYLOAD)
block.wait(STREAM_PING_INTERVAL)
if block.is_set():
break
if not gracefully_closed:
_LOGGER.info("Found broken event stream to %s, cleaning up",
handler.client_address[0])
hass.bus.remove_listener(MATCH_ALL, forward_events)
def _handle_get_api_config(handler, path_match, data):
""" Returns the Home Assistant config. """
handler.write_json(handler.server.hass.config.as_dict())
def _handle_get_api_bootstrap(handler, path_match, data):
""" Returns all data needed to bootstrap Home Assistant. """
hass = handler.server.hass
handler.write_json({
'config': hass.config.as_dict(),
'states': hass.states.all(),
'events': _events_json(hass),
'services': _services_json(hass),
})
def _handle_get_api_states(handler, path_match, data):
""" Returns a dict containing all entity ids and their state. """
handler.write_json(handler.server.hass.states.all())
def _handle_get_api_states_entity(handler, path_match, data):
""" Returns the state of a specific entity. """
entity_id = path_match.group('entity_id')
state = handler.server.hass.states.get(entity_id)
if state:
handler.write_json(state)
else:
handler.write_json_message("State does not exist.", HTTP_NOT_FOUND)
def _handle_post_state_entity(handler, path_match, data):
""" Handles updating the state of an entity.
This handles the following paths:
/api/states/<entity_id>
"""
entity_id = path_match.group('entity_id')
try:
new_state = data['state']
except KeyError:
handler.write_json_message("state not specified", HTTP_BAD_REQUEST)
return
attributes = data['attributes'] if 'attributes' in data else None
is_new_state = handler.server.hass.states.get(entity_id) is None
# Write state
handler.server.hass.states.set(entity_id, new_state, attributes)
state = handler.server.hass.states.get(entity_id)
status_code = HTTP_CREATED if is_new_state else HTTP_OK
handler.write_json(
state.as_dict(),
status_code=status_code,
location=URL_API_STATES_ENTITY.format(entity_id))
def _handle_get_api_events(handler, path_match, data):
""" Handles getting overview of event listeners. """
handler.write_json(_events_json(handler.server.hass))
def _handle_api_post_events_event(handler, path_match, event_data):
""" Handles firing of an event.
This handles the following paths:
/api/events/<event_type>
Events from /api are threated as remote events.
"""
event_type = path_match.group('event_type')
if event_data is not None and not isinstance(event_data, dict):
handler.write_json_message(
"event_data should be an object", HTTP_UNPROCESSABLE_ENTITY)
event_origin = ha.EventOrigin.remote
# Special case handling for event STATE_CHANGED
# We will try to convert state dicts back to State objects
if event_type == ha.EVENT_STATE_CHANGED and event_data:
for key in ('old_state', 'new_state'):
state = ha.State.from_dict(event_data.get(key))
if state:
event_data[key] = state
handler.server.hass.bus.fire(event_type, event_data, event_origin)
handler.write_json_message("Event {} fired.".format(event_type))
def _handle_get_api_services(handler, path_match, data):
""" Handles getting overview of services. """
handler.write_json(_services_json(handler.server.hass))
# pylint: disable=invalid-name
def _handle_post_api_services_domain_service(handler, path_match, data):
""" Handles calling a service.
This handles the following paths:
/api/services/<domain>/<service>
"""
domain = path_match.group('domain')
service = path_match.group('service')
with TrackStates(handler.server.hass) as changed_states:
handler.server.hass.services.call(domain, service, data, True)
handler.write_json(changed_states)
# pylint: disable=invalid-name
def _handle_post_api_event_forward(handler, path_match, data):
""" Handles adding an event forwarding target. """
try:
host = data['host']
api_password = data['api_password']
except KeyError:
handler.write_json_message(
"No host or api_password received.", HTTP_BAD_REQUEST)
return
try:
port = int(data['port']) if 'port' in data else None
except ValueError:
handler.write_json_message(
"Invalid value received for port", HTTP_UNPROCESSABLE_ENTITY)
return
api = rem.API(host, api_password, port)
if not api.validate_api():
handler.write_json_message(
"Unable to validate API", HTTP_UNPROCESSABLE_ENTITY)
return
if handler.server.event_forwarder is None:
handler.server.event_forwarder = \
rem.EventForwarder(handler.server.hass)
handler.server.event_forwarder.connect(api)
handler.write_json_message("Event forwarding setup.")
def _handle_delete_api_event_forward(handler, path_match, data):
""" Handles deleting an event forwarding target. """
try:
host = data['host']
except KeyError:
handler.write_json_message("No host received.", HTTP_BAD_REQUEST)
return
try:
port = int(data['port']) if 'port' in data else None
except ValueError:
handler.write_json_message(
"Invalid value received for port", HTTP_UNPROCESSABLE_ENTITY)
return
if handler.server.event_forwarder is not None:
api = rem.API(host, None, port)
handler.server.event_forwarder.disconnect(api)
handler.write_json_message("Event forwarding cancelled.")
def _handle_get_api_components(handler, path_match, data):
""" Returns all the loaded components. """
handler.write_json(handler.server.hass.config.components)
def _services_json(hass):
""" Generate services data to JSONify. """
return [{"domain": key, "services": value}
for key, value in hass.services.services.items()]
def _events_json(hass):
""" Generate event data to JSONify. """
return [{"event": key, "listener_count": value}
for key, value in hass.bus.listeners.items()]
|
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, paul.nilsson@cern.ch, 2019
import os
import re
import tarfile
from pilot.common.exception import FileHandlingFailure, PilotException
from pilot.util.filehandling import write_file, mkdirs, rmdirs
import logging
logger = logging.getLogger(__name__)
def extract_version(name):
"""
Try to extract the version from the DBRelease string.
:param name: DBRelease (string).
:return: version (string).
"""
version = ""
re_v = re.compile(r'DBRelease-(\d+\.\d+\.\d+)\.tar\.gz') # Python 3 (added r)
v = re_v.search(name)
if v:
version = v.group(1)
else:
re_v = re.compile(r'DBRelease-(\d+\.\d+\.\d+\.\d+)\.tar\.gz') # Python 3 (added r)
v = re_v.search(name)
if v:
version = v.group(1)
return version
def get_dbrelease_version(jobpars):
"""
Get the DBRelease version from the job parameters.
:param jobpars: job parameters (string).
:return: DBRelease version (string).
"""
return extract_version(jobpars)
def get_dbrelease_dir():
"""
Return the proper DBRelease directory
:return: path to DBRelease (string).
"""
path = os.path.expandvars('$VO_ATLAS_SW_DIR/database/DBRelease') if 'VO_ATLAS_SW_DIR' in os.environ else os.path.expandvars('$OSG_APP/database/DBRelease')
if path == "" or path.startswith('OSG_APP'):
logger.warning("note: the DBRelease database directory is not available (will not attempt to skip DBRelease stage-in)")
else:
if os.path.exists(path):
logger.info("local DBRelease path verified: %s (will attempt to skip DBRelease stage-in)" % path)
else:
logger.warning("note: local DBRelease path does not exist: %s (will not attempt to skip DBRelease stage-in)" % path)
return path
def is_dbrelease_available(version):
"""
Check whether a given DBRelease file is already available.
:param version: DBRelease version (string).
:return: Boolean (True is DBRelease is locally available).
"""
status = False
# do not proceed if
if 'ATLAS_DBREL_DWNLD' in os.environ:
logger.info("ATLAS_DBREL_DWNLD is set: do not skip DBRelease stage-in")
return status
# get the local path to the DBRelease directory
path = get_dbrelease_dir()
if path != "" and os.path.exists(path):
# get the list of available DBRelease directories
dir_list = os.listdir(path)
# is the required DBRelease version available?
if dir_list:
if version in dir_list:
logger.info("found version %s in path %s (%d releases found)" % (version, path, len(dir_list)))
status = True
else:
logger.warning("did not find version %s in path %s (%d releases found)" % (version, path, len(dir_list)))
else:
logger.warning("empty DBRelease directory list: %s" % path)
else:
logger.warning('no such DBRelease path: %s' % path)
return status
def create_setup_file(version, path):
"""
Create the DBRelease setup file.
:param version: DBRelease version (string).
:param path: path to local DBReleases (string).
:return: Boolean (True if DBRelease setup file was successfully created).
"""
status = False
# get the DBRelease directory
d = get_dbrelease_dir()
if d != "" and version != "":
# create the python code string to be written to file
txt = "import os\n"
txt += "os.environ['DBRELEASE'] = '%s'\n" % version
txt += "os.environ['DATAPATH'] = '%s/%s:' + os.environ['DATAPATH']\n" % (d, version)
txt += "os.environ['DBRELEASE_REQUIRED'] = '%s'\n" % version
txt += "os.environ['DBRELEASE_REQUESTED'] = '%s'\n" % version
txt += "os.environ['CORAL_DBLOOKUP_PATH'] = '%s/%s/XMLConfig'\n" % (d, version)
try:
status = write_file(path, txt)
except FileHandlingFailure as e:
logger.warning('failed to create DBRelease setup file: %s' % e)
else:
logger.info("Created setup file with the following content:.................................\n%s" % txt)
logger.info("...............................................................................")
else:
logger.warning('failed to create %s for DBRelease version=%s and directory=%s' % (path, version, d))
return status
def create_dbrelease(version, path):
"""
Create the DBRelease file only containing a setup file.
:param version: DBRelease version (string).
:param path: path to DBRelease (string).
:return: Boolean (True is DBRelease file was successfully created).
"""
status = False
# create the DBRelease and version directories
dbrelease_path = os.path.join(path, 'DBRelease')
_path = os.path.join(dbrelease_path, version)
try:
mkdirs(_path, chmod=None)
except PilotException as e:
logger.warning('failed to create directories for DBRelease: %s' % e)
else:
logger.debug('created directories: %s' % _path)
# create the setup file in the DBRelease directory
version_path = os.path.join(dbrelease_path, version)
setup_filename = "setup.py"
_path = os.path.join(version_path, setup_filename)
if create_setup_file(version, _path):
logger.info("created DBRelease setup file: %s" % _path)
# now create a new DBRelease tarball
filename = os.path.join(path, "DBRelease-%s.tar.gz" % version)
logger.info("creating file: %s" % filename)
try:
tar = tarfile.open(filename, "w:gz")
except Exception as e:
logger.warning("could not create DBRelease tar file: %s" % e)
else:
if tar:
# add the setup file to the tar file
tar.add("%s/DBRelease/%s/%s" % (path, version, setup_filename))
# create the symbolic link DBRelease/current -> 12.2.1
try:
_link = os.path.join(path, "DBRelease/current")
os.symlink(version, _link)
except Exception as e:
logger.warning("failed to create symbolic link %s: %s" % (_link, e))
else:
logger.warning("created symbolic link: %s" % _link)
# add the symbolic link to the tar file
tar.add(_link)
# done with the tar archive
tar.close()
logger.info("created new DBRelease tar file: %s" % filename)
status = True
else:
logger.warning("failed to open DBRelease tar file")
# clean up
if rmdirs(dbrelease_path):
logger.debug("cleaned up directories in path: %s" % dbrelease_path)
else:
logger.warning("failed to create DBRelease setup file")
if rmdirs(dbrelease_path):
logger.debug("cleaned up directories in path: %s" % dbrelease_path)
return status
|
|
import unittest
import sys
import rpy2.rinterface as ri
ri.initr()
def evalr(string):
rstring = ri.StrSexpVector((string, ))
res = ri.baseenv["parse"](text = rstring)
res = ri.baseenv["eval"](res)
return res
def floatEqual(x, y, epsilon = 0.00000001):
return abs(x - y) < epsilon
class WrapperSexpVectorTestCase(unittest.TestCase):
def testInt(self):
sexp = ri.IntSexpVector([1, ])
isInteger = ri.globalenv.get("is.integer")
ok = isInteger(sexp)[0]
self.assertTrue(ok)
def testFloat(self):
sexp = ri.IntSexpVector([1.0, ])
isNumeric = ri.globalenv.get("is.numeric")
ok = isNumeric(sexp)[0]
self.assertTrue(ok)
def testStr(self):
sexp = ri.StrSexpVector(["a", ])
isStr = ri.globalenv.get("is.character")
ok = isStr(sexp)[0]
self.assertTrue(ok)
def testBool(self):
sexp = ri.BoolSexpVector([True, ])
isBool = ri.globalenv.get("is.logical")
ok = isBool(sexp)[0]
self.assertTrue(ok)
def testComplex(self):
sexp = ri.ComplexSexpVector([1+2j, ])
is_complex = ri.globalenv.get("is.complex")
ok = is_complex(sexp)[0]
self.assertTrue(ok)
class NAValuesTestCase(unittest.TestCase):
def testRtoNAInteger(self):
na_int = ri.NAIntegerType()
r_na_int = evalr("NA_integer_")[0]
self.assertTrue(r_na_int is na_int)
def testNAIntegertoR(self):
na_int = ri.NAIntegerType()
self.assertEquals(True, ri.baseenv["is.na"](na_int)[0])
def testNAIntegerBinaryfunc(self):
na_int = ri.NAIntegerType()
self.assertTrue((na_int + 2) is na_int)
def testNAIntegerInVector(self):
na_int = ri.NAIntegerType()
x = ri.IntSexpVector((1, na_int, 2))
self.assertTrue(x[1] is na_int)
self.assertEquals(1, x[0])
self.assertEquals(2, x[2])
def testNAIntegerRepr(self):
na_int = ri.NAIntegerType()
self.assertEquals("NA_integer_", repr(na_int))
def testRtoNALogical(self):
na_lgl = ri.NALogicalType()
r_na_lgl = evalr("NA")[0]
self.assertTrue(r_na_lgl is na_lgl)
def testNALogicaltoR(self):
na_lgl = ri.NALogicalType()
self.assertEquals(True, ri.baseenv["is.na"](na_lgl)[0])
def testNALogicalInVector(self):
na_bool = ri.NALogicalType()
x = ri.BoolSexpVector((True, na_bool, False))
self.assertTrue(x[1] is na_bool)
self.assertEquals(True, x[0])
self.assertEquals(False, x[2])
def testNAIntegerRepr(self):
na_bool = ri.NALogicalType()
self.assertEquals("NA", repr(na_bool))
def testRtoNAReal(self):
na_real = ri.NARealType()
r_na_real = evalr("NA_real_")[0]
self.assertTrue(r_na_real is na_real)
def testNARealtoR(self):
na_real = ri.NARealType()
self.assertEquals(True, ri.baseenv["is.na"](na_real)[0])
def testNARealBinaryfunc(self):
na_real = ri.NARealType()
self.assertTrue((na_real + 2.0) is na_real)
def testNARealInVector(self):
na_float = ri.NARealType()
x = ri.FloatSexpVector((1.1, na_float, 2.2))
self.assertTrue(x[1] is na_float)
self.assertEquals(1.1, x[0])
self.assertEquals(2.2, x[2])
def testNARealRepr(self):
na_float = ri.NARealType()
self.assertEquals("NA_real_", repr(na_float))
def testRtoNACharacter(self):
na_character = ri.NACharacterType()
r_na_character = evalr("NA_character_")[0]
self.assertTrue(r_na_character is na_character)
def testNACharactertoR(self):
na_character = ri.NACharacterType()
self.assertEquals(True, ri.baseenv["is.na"](ri.StrSexpVector((na_character, )))[0])
def testNACharacterInVector(self):
na_str = ri.NACharacterType()
x = ri.StrSexpVector(("ab", na_str, "cd"))
self.assertTrue(x[1] is na_str)
self.assertEquals("ab", x[0])
self.assertEquals("cd", x[2])
def testNACharacterRepr(self):
na_str = ri.NACharacterType()
self.assertEquals("NA_character_", repr(na_str))
class SexpVectorTestCase(unittest.TestCase):
def testMissinfType(self):
self.assertRaises(ValueError, ri.SexpVector, [2, ])
#FIXME: end and initializing again causes currently a lot a trouble...
def testNewWithoutInit(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
self.assertTrue(False) # cannot be tested with Python < 2.6
return None
import multiprocessing
def foo(queue):
import rpy2.rinterface as rinterface
rinterface.endr(1)
try:
tmp = ri.SexpVector([1,2], ri.INTSXP)
res = (False, None)
except RuntimeError, re:
res = (True, re)
except Exception, e:
res = (False, e)
queue.put(res)
q = multiprocessing.Queue()
p = multiprocessing.Process(target = foo, args = (q,))
p.start()
res = q.get()
p.join()
self.assertTrue(res[0])
def testNewBool(self):
sexp = ri.SexpVector([True, ], ri.LGLSXP)
isLogical = ri.globalenv.get("is.logical")
ok = isLogical(sexp)[0]
self.assertTrue(ok)
self.assertTrue(sexp[0])
sexp = ri.SexpVector(["a", ], ri.LGLSXP)
isLogical = ri.globalenv.get("is.logical")
ok = isLogical(sexp)[0]
self.assertTrue(ok)
self.assertTrue(sexp[0])
def testNewInt(self):
sexp = ri.SexpVector([1, ], ri.INTSXP)
isInteger = ri.globalenv.get("is.integer")
ok = isInteger(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector(["a", ], ri.INTSXP)
isNA = ri.globalenv.get("is.na")
ok = isNA(sexp)[0]
self.assertTrue(ok)
def testNewReal(self):
sexp = ri.SexpVector([1.0, ], ri.REALSXP)
isNumeric = ri.globalenv.get("is.numeric")
ok = isNumeric(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector(["a", ], ri.REALSXP)
isNA = ri.globalenv.get("is.na")
ok = isNA(sexp)[0]
self.assertTrue(ok)
def testNewComplex(self):
sexp = ri.SexpVector([1.0 + 1.0j, ], ri.CPLXSXP)
isComplex = ri.globalenv.get("is.complex")
ok = isComplex(sexp)[0]
self.assertTrue(ok)
def testNewString(self):
sexp = ri.SexpVector(["abc", ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector([1, ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
def testNewUnicode(self):
sexp = ri.SexpVector([u'abc', ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
self.assertEquals('abc', sexp[0])
def testNewList(self):
vec = ri.ListSexpVector([1,'b',3,'d',5])
ok = ri.baseenv["is.list"](vec)[0]
self.assertTrue(ok)
self.assertEquals(5, len(vec))
self.assertEquals(1, vec[0][0])
self.assertEquals('b', vec[1][0])
def testNewVector(self):
sexp_char = ri.SexpVector(["abc", ],
ri.STRSXP)
sexp_int = ri.SexpVector([1, ],
ri.INTSXP)
sexp = ri.SexpVector([sexp_char, sexp_int],
ri.VECSXP)
isList = ri.globalenv.get("is.list")
ok = isList(sexp)[0]
self.assertTrue(ok)
self.assertEquals(2, len(sexp))
def testNew_InvalidType_NotAType(self):
self.assertRaises(ValueError, ri.SexpVector, [1, ], -1)
self.assertRaises(ValueError, ri.SexpVector, [1, ], 250)
def testNew_InvalidType_NotAVectorType(self):
self.assertRaises(ValueError, ri.SexpVector, [1, ], ri.ENVSXP)
def testNew_InvalidType_NotASequence(self):
self.assertRaises(ValueError, ri.SexpVector, 1, ri.INTSXP)
def testGetItem(self):
letters_R = ri.globalenv.get("letters")
self.assertTrue(isinstance(letters_R, ri.SexpVector))
letters = (('a', 0), ('b', 1), ('c', 2),
('x', 23), ('y', 24), ('z', 25))
for l, i in letters:
self.assertTrue(letters_R[i] == l)
Rlist = ri.globalenv.get("list")
seq_R = ri.globalenv.get("seq")
mySeq = seq_R(ri.SexpVector([0, ], ri.INTSXP),
ri.SexpVector([10, ], ri.INTSXP))
myList = Rlist(s=mySeq, l=letters_R)
idem = ri.globalenv.get("identical")
self.assertTrue(idem(mySeq, myList[0]))
self.assertTrue(idem(letters_R, myList[1]))
letters_R = ri.globalenv.get("letters")
self.assertEquals('z', letters_R[-1])
def testGetItemLang(self):
formula = ri.baseenv.get('formula')
f = formula(ri.StrSexpVector(['y ~ x', ]))
y = f[0]
self.assertEquals(ri.SYMSXP, y.typeof)
def testGetItemExpression(self):
expression = ri.baseenv.get('expression')
e = expression(ri.StrSexpVector(['a', ]),
ri.StrSexpVector(['b', ]))
y = e[0]
self.assertEquals(ri.STRSXP, y.typeof)
def testGetItemPairList(self):
pairlist = ri.baseenv.get('pairlist')
pl = pairlist(a = ri.StrSexpVector([1, ]))
y = pl[0]
self.assertEquals(ri.LISTSXP, y.typeof)
def testGetItemNegativeOutOfBound(self):
letters_R = ri.globalenv.get("letters")
self.assertRaises(IndexError, letters_R.__getitem__,
-100)
def testGetItemOutOfBound(self):
myVec = ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP)
self.assertRaises(IndexError, myVec.__getitem__, 10)
if (sys.maxint > ri.R_LEN_T_MAX):
self.assertRaises(IndexError, myVec.__getitem__,
ri.R_LEN_T_MAX+1)
def testGetSliceFloat(self):
vec = ri.FloatSexpVector([1.0,2.0,3.0])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(1.0, vec[0])
self.assertEquals(2.0, vec[1])
def testGetSliceInt(self):
vec = ri.IntSexpVector([1,2,3])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(1, vec[0])
self.assertEquals(2, vec[1])
def testGetSliceIntNegative(self):
vec = ri.IntSexpVector([1,2,3])
vec = vec[-2:-1]
self.assertEquals(1, len(vec))
self.assertEquals(2, vec[0])
def testGetSliceBool(self):
vec = ri.BoolSexpVector([True,False,True])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(True, vec[0])
self.assertEquals(False, vec[1])
def testGetSliceStr(self):
vec = ri.StrSexpVector(['a','b','c'])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals('a', vec[0])
self.assertEquals('b', vec[1])
def testGetSliceComplex(self):
vec = ri.ComplexSexpVector([1+2j,2+3j,3+4j])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(1+2j, vec[0])
self.assertEquals(2+3j, vec[1])
def testGetSliceList(self):
vec = ri.ListSexpVector([1,'b',True])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(1, vec[0][0])
self.assertEquals('b', vec[1][0])
def testAssignItemDifferentType(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
self.assertRaises(ValueError, myVec.__setitem__, 0,
ri.SexpVector(["a", ], ri.STRSXP))
def testAssignItemOutOfBound(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
self.assertRaises(IndexError, myVec.__setitem__, 10,
ri.SexpVector([1, ], ri.INTSXP))
def testAssignItemInt(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
myVec[0] = ri.SexpVector([100, ], ri.INTSXP)
self.assertTrue(myVec[0] == 100)
myVec[3] = ri.SexpVector([100, ], ri.INTSXP)
self.assertTrue(myVec[3] == 100)
myVec[-1] = ri.SexpVector([200, ], ri.INTSXP)
self.assertTrue(myVec[5] == 200)
def testAssignItemReal(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0.0, 1.0, 2.0, 3.0, 4.0, 5.0],
ri.REALSXP))
myVec[0] = ri.SexpVector([100.0, ], ri.REALSXP)
self.assertTrue(floatEqual(myVec[0], 100.0))
myVec[3] = ri.SexpVector([100.0, ], ri.REALSXP)
self.assertTrue(floatEqual(myVec[3], 100.0))
def testAssignItemLogical(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([True, False, True, True, False],
ri.LGLSXP))
myVec[0] = ri.SexpVector([False, ], ri.LGLSXP)
self.assertFalse(myVec[0])
myVec[3] = ri.SexpVector([False, ], ri.LGLSXP)
self.assertFalse(myVec[3])
def testAssignItemComplex(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([1.0+2.0j, 2.0+2.0j, 3.0+2.0j,
4.0+2.0j, 5.0+2.0j],
ri.CPLXSXP))
myVec[0] = ri.SexpVector([100.0+200.0j, ], ri.CPLXSXP)
self.assertTrue(floatEqual(myVec[0].real, 100.0))
self.assertTrue(floatEqual(myVec[0].imag, 200.0))
myVec[3] = ri.SexpVector([100.0+200.0j, ], ri.CPLXSXP)
self.assertTrue(floatEqual(myVec[3].real, 100.0))
self.assertTrue(floatEqual(myVec[3].imag, 200.0))
def testAssignItemList(self):
myVec = ri.SexpVector([ri.StrSexpVector(["a", ]),
ri.IntSexpVector([1, ]),
ri.IntSexpVector([3, ])],
ri.VECSXP)
myVec[0] = ri.SexpVector([ri.FloatSexpVector([100.0, ]), ],
ri.VECSXP)
self.assertTrue(floatEqual(myVec[0][0][0], 100.0))
myVec[2] = ri.SexpVector([ri.StrSexpVector(["a", ]), ],
ri.VECSXP)
self.assertTrue(myVec[2][0][0] == "a")
def testAssignItemString(self):
letters_R = ri.SexpVector("abcdefghij", ri.STRSXP)
self.assertRaises(ValueError, letters_R.__setitem__, 0,
ri.SexpVector([1, ],
ri.INTSXP))
letters_R[0] = ri.SexpVector(["z", ], ri.STRSXP)
self.assertTrue(letters_R[0] == "z")
def testSetSliceFloat(self):
vec = ri.FloatSexpVector([1.0,2.0,3.0])
vec[0:2] = ri.FloatSexpVector([11.0, 12.0])
self.assertEquals(3, len(vec))
self.assertEquals(11.0, vec[0])
self.assertEquals(12.0, vec[1])
self.assertEquals(3.0, vec[2])
def testSetSliceInt(self):
vec = ri.IntSexpVector([1,2,3])
vec[0:2] = ri.IntSexpVector([11,12])
self.assertEquals(3, len(vec))
self.assertEquals(11, vec[0])
self.assertEquals(12, vec[1])
def testSetSliceIntNegative(self):
vec = ri.IntSexpVector([1,2,3])
vec[-2:-1] = ri.IntSexpVector([33,])
self.assertEquals(3, len(vec))
self.assertEquals(33, vec[1])
def testSetSliceBool(self):
vec = ri.BoolSexpVector([True,False,True])
vec[0:2] = ri.BoolSexpVector([False, False])
self.assertEquals(3, len(vec))
self.assertEquals(False, vec[0])
self.assertEquals(False, vec[1])
def testSetSliceStr(self):
vec = ri.StrSexpVector(['a','b','c'])
vec[0:2] = ri.StrSexpVector(['d','e'])
self.assertEquals(3, len(vec))
self.assertEquals('d', vec[0])
self.assertEquals('e', vec[1])
def testSetSliceComplex(self):
vec = ri.ComplexSexpVector([1+2j,2+3j,3+4j])
vec[0:2] = ri.ComplexSexpVector([11+2j,12+3j])
self.assertEquals(3, len(vec))
self.assertEquals(11+2j, vec[0])
self.assertEquals(12+3j, vec[1])
def testSetSliceList(self):
vec = ri.ListSexpVector([1,'b',True])
vec[0:2] = ri.ListSexpVector([False, 2])
self.assertEquals(3, len(vec))
self.assertEquals(False, vec[0][0])
self.assertEquals(2, vec[1][0])
def testMissingRPreserveObjectBug(self):
rgc = ri.baseenv['gc']
xx = range(100000)
x = ri.SexpVector(xx, ri.INTSXP)
rgc()
self.assertEquals(0, x[0])
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(SexpVectorTestCase)
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(WrapperSexpVectorTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(NAValuesTestCase))
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
tr.run(suite())
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=missing-docstring
import argparse
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.initialize_all_variables()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
def main(_):
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=2000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default='/tmp/tensorflow/mnist/logs/fully_connected_feed',
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS = parser.parse_args()
tf.app.run()
|
|
import wx
import sys
try:
from wx import glcanvas
haveGLCanvas = True
except ImportError:
haveGLCanvas = False
try:
# The Python OpenGL package can be found at
# http://PyOpenGL.sourceforge.net/
from OpenGL.GL import *
from OpenGL.GLUT import *
haveOpenGL = True
except ImportError:
haveOpenGL = False
from jumeg.tsvgl.plot2d.jumeg_tsv_plot2d_ogl import JuMEG_TSV_PLOT2D_OGL
class JuMEG_TSV_PLOT2D_CanvasBase(glcanvas.GLCanvas):
def __init__(self, parent):
attribList = (glcanvas.WX_GL_RGBA, # RGBA
glcanvas.WX_GL_DOUBLEBUFFER, # Double Buffered
glcanvas.WX_GL_DEPTH_SIZE,16) # 24 bit
glcanvas.GLCanvas.__init__(self, parent, -1,attribList=attribList,style = wx.DEFAULT_FRAME_STYLE)
self.is_initGL = False
self.is_on_draw = False
self.is_on_paint = False
self.is_on_size = False
self.init = False
self.context = glcanvas.GLContext(self)
# initial mouse position
self.lastx = self.x = 30
self.lasty = self.y = 30
self.size = None
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
#self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown)
#self.Bind(wx.EVT_LEFT_UP, self.OnMouseUp)
#self.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
def OnEraseBackground(self, event):
pass # Do nothing, to avoid flashing on MSW.
def OnSize(self, event):
wx.CallAfter(self.DoSetViewport)
event.Skip()
def DoSetViewport(self):
size = self.size = self.GetClientSize()
self.SetCurrent(self.context)
glViewport(0, 0, size.width, size.height)
def OnPaint(self, event):
if self.is_on_draw:
return
dc = wx.PaintDC(self)
self.SetCurrent(self.context)
if not self.is_initGL:
self.InitGL()
self.is_on_paint = True
self.OnDraw( size_mm=dc.GetSizeMM() )
self.is_on_paint = False
def initGL(self):
print" ToDo dummy def initGL overwrite"
def OnDraw(self):
print" ToDo dummy def OnDraw overwrite"
def OnKeyDown(self, e):
key = e.GetKeyCode()
# print"GLCanvas EVT OnKeyDown: " + str(key)
#---escape to quit
if key == wx.WXK_ESCAPE:
self.click_on_exit(e)
"""
def OnMouseDown(self, evt):
self.CaptureMouse()
self.x, self.y = self.lastx, self.lasty = evt.GetPosition()
def OnMouseUp(self, evt):
self.ReleaseMouse()
def OnMouseMotion(self, evt):
if evt.Dragging() and evt.LeftIsDown():
self.lastx, self.lasty = self.x, self.y
self.x, self.y = evt.GetPosition()
self.Refresh(False)
"""
class JuMEG_TSV_PLOT2D_WX(JuMEG_TSV_PLOT2D_CanvasBase):
def __init__(self, parent=None, *args, **kwargs):
super(JuMEG_TSV_PLOT2D_WX,self).__init__(parent) #, *args, **kwargs)
def OnKeyDown(self, evt):
action = None
if not self.is_initGL :
evt.skip() #---escape to quit
key = evt.GetKeyCode()
#--- scroll time fast by window
if (wx.GetKeyState(wx.WXK_CONTROL) == True):
if key == (wx.WXK_LEFT):
#print"FAST REW"
action = "FAST_REWIND"
elif key == (wx.WXK_RIGHT):
action = "FAST_FORWARD"
elif key == (wx.WXK_HOME):
action ="START"
elif key == (wx.WXK_END):
action = "END"
#----
elif key == (wx.WXK_F11):
action = "TIME_DISPLAY_ALL"
elif key ==(wx.WXK_F12):
action = "CHANNELS_DISPLAY_ALL"
#--- scroll time by scroll step
elif key == wx.WXK_LEFT:
#print"LEFT"
action = "REWIND"
elif key == wx.WXK_RIGHT:
#print "RIGHT"
action = "FORWARD"
#--- scroll channels
elif key == wx.WXK_UP:
action = "UP"
elif key == wx.WXK_DOWN:
action = "DOWN"
elif key == wx.WXK_PAGEUP:
action = "PAGEUP"
elif key == wx.WXK_PAGEDOWN:
action = "PAGEDOWN"
elif key == wx.WXK_HOME:
action = "TOP"
elif key == wx.WXK_END:
action = "BOTTOM"
#---
if action:
self.plot2d.opt.action(action)
self.update()
else:
evt.Skip()
def InitGL(self):
self.is_initGL=False
glutInit(sys.argv)
self.SetCurrent()
glMatrixMode(GL_PROJECTION)
# glutInit(sys.argv)
glShadeModel(GL_SMOOTH)
#glutInit(sys.argv)
self.plot2d = JuMEG_TSV_PLOT2D_OGL()
self.plot2d.size_in_pixel = self.GetClientSize()
self.plot2d.init_glwindow( )
#glutInit(sys.argv)
self.is_initGL = True
return self.is_initGL
def OnDraw(self,size_mm=None):
if self.is_on_draw:
return
self.is_on_draw = True
if self.is_initGL:
self.SetCurrent()
else:
self.InitGL()
self.plot2d.size_in_pixel = self.GetClientSize()
self.plot2d.size_in_mm = size_mm
print " ---> "+self.__class__.__name__ +" OnDraw -> plot size"
print self.plot2d.size_in_pixel
print self.plot2d.size_in_mm
self.plot2d.display()
self.SwapBuffers()
self.is_on_draw = False
def update(self,raw=None): #,do_scroll_channels=True,do_scroll_time=True):
if self.is_initGL :
self.SetCurrent()
if raw :
self.plot2d.init_raw_data(raw=raw)
elif self.plot2d.data_is_init:
self.plot2d.update_data() #do_scroll_channels=True,do_scroll_time=True,)
#self.plot_axis.range_max = self.plot2d.timepoints[-1]
#self.plot_axis.range_min = self.plot2d.timepoints[0]
if self.plot2d.opt.do_scroll:
self.Refresh()
# self.plot_axis.range_max = self.plot2d.timepoints[-1]
#self.plot_axis.range_min = self.plot2d.timepoints[0]
#self.plot_axis.Refresh()
|
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Str, Bool, List, Instance, Event
from traitsui.api import View, ListEditor, InstanceEditor, UItem, VGroup, HGroup, VSplit
# ============= standard library imports ========================
import random
import struct
import time
import yaml
from traits.api import Str, Bool, List, Instance, Event
from traitsui.api import View, ListEditor, InstanceEditor, UItem, VGroup, HGroup, VSplit
# ============= local library imports ==========================
from pychron.core.helpers.filetools import unique_path2
from pychron.dashboard.conditional import DashboardConditional
from pychron.dashboard.process_value import ProcessValue
from pychron.globals import globalv
from pychron.graph.stream_graph import StreamStackedGraph
from pychron.hardware.core.i_core_device import ICoreDevice
from pychron.loggable import Loggable
from pychron.paths import paths
class DashboardDevice(Loggable):
name = Str
use = Bool
values = List
hardware_device = Instance(ICoreDevice)
update_value_event = Event
conditional_event = Event
graph = Instance(StreamStackedGraph)
@property
def value_keys(self):
return [pv.tag for pv in self.values]
@property
def units(self):
return [pv.units for pv in self.values]
@property
def current_values(self):
return [pv.last_value for pv in self.values]
def setup_graph(self):
self.graph = g = StreamStackedGraph()
for i, vi in enumerate(self.values):
vi.plotid = i
p = g.new_plot()
if i == 0:
p.padding_bottom = 25
p.padding_right = 10
g.new_series(plotid=i)
g.set_y_title(vi.display_name, plotid=i)
g.set_scan_width(24 * 60 * 60, plotid=i)
g.set_data_limits(24 * 60 * 60, plotid=i)
def trigger(self):
"""
trigger a new value if appropriate
"""
for value in self.values:
if not value.enabled:
continue
st = time.time()
dt = st - value.last_time
if value.period == 'on_change':
if value.timeout and dt > value.timeout:
self.debug('Force trigger. timeout={}'.format(value.timeout))
self._trigger(value, force=True)
elif dt > value.period:
self._trigger(value)
def _trigger(self, value, **kw):
try:
self.debug('triggering value device={} value={} func={}'.format(self.hardware_device.name,
value.name,
value.func_name))
nv = None
func = getattr(self.hardware_device, value.func_name)
if func is not None:
nv = func(**kw)
if nv is None and globalv.dashboard_simulation:
nv = random.random()
if nv is not None:
self._push_value(value, nv)
except BaseException:
import traceback
print(self.hardware_device, self.hardware_device.name, value.func_name)
self.debug(traceback.format_exc())
# value.use_pv = False
def add_value(self, name, tag, func_name, period, enabled, threshold, units, timeout, record, bindname):
pv = ProcessValue(name=name,
tag=tag,
func_name=func_name,
period=period,
enabled=enabled,
timeout=float(timeout),
units=units,
change_threshold=threshold,
record=record)
if period == 'on_change':
if self.hardware_device:
if bindname:
self.debug('bind to {}'.format(bindname))
if hasattr(self.hardware_device, bindname):
self.hardware_device.on_trait_change(lambda a, b, c, d: self._handle_change(pv, a, b, c, d),
bindname)
else:
self.debug('{} has not attribute "{}"'.format(self.hardware_device, bindname))
# else:
# self.warning('need to set bindname for {}'.format(self.name, name))
# self._device.on_trait_change(lambda new: self._push_value(pv, new), n)
self.values.append(pv)
return pv
def add_conditional(self, pv, severity, **kw):
cond = DashboardConditional(severity=severity, **kw)
pv.conditionals.append(cond)
def _handle_change(self, pv, obj, name, old, new):
self.debug('handle change {} {}'.format(name, new))
self._push_value(pv, new)
def _push_value(self, pv, new):
if pv.enabled:
pv.last_time = time.time()
try:
v = float(new)
except (ValueError, TypeError) as e:
self.warning('failed to push value pv.name={}, value={}, error={}'.format(pv.name, new, e))
return
tripped = pv.is_different(v)
if tripped:
self.update_value_event = (pv.name, new, pv.units)
# self.update_value_event = '{} {}'.format(pv.tag, new)
self.graph.record(v, plotid=pv.plotid)
if pv.record:
self._record(pv, v)
self._check_conditional(pv, new)
def _record(self, pv, v):
path = pv.path
if not path:
path, _ = unique_path2(paths.device_scan_dir, pv.name)
pv.path = path
self.info('Saving {} to {}'.format(pv.name, path))
with open(path, 'a') as wfile:
wfile.write('{},{}\n'.format(time.time(), v))
def _check_conditional(self, pv, new):
conds = pv.conditionals
if conds:
if pv.flag:
self.debug('not checking conditionals. already tripped')
else:
for cond in conds:
self.debug('checking conditional {}.{}.{}, value={}'.format(self.name, pv.name, cond.teststr, new))
if cond.check(new):
pv.flag = cond.severity
self.debug('conditional triggered. severity={}'.format(cond.severity))
msg = '{}.{}.{} is True. value={}'.format(self.name, pv.name, cond.teststr, new)
self.conditional_event = '{}|{}|{}|{}'.format(cond.severity,
cond.script,
cond.emails, msg)
def dump_meta(self):
d = []
for pv in self.values:
dd = dict(((a, getattr(pv, a))
for a in ('name', 'tag', 'enabled', 'func_name', 'period', 'timeout')))
d.append(dd)
return yaml.dump(d)
def get_scan_fmt(self):
n = len(self.values) * 2
fmt = '>{}'.format('f' * n)
return fmt
def append_scan_blob(self, blob=None, fmt=None):
new_args = [a for v in self.values
for a in (v.last_time, v.last_value)]
if blob:
step = 4 * fmt.count('f')
args = zip(*[struct.unpack(fmt, blob[i:i + step]) for i in range(0, len(blob), step)])
ns = []
for blobv, lastv in zip(args, new_args):
blobv = list(blobv)
blobv.append(lastv)
ns.append(blobv)
blob = ''.join([struct.pack(fmt, *v) for v in zip(*ns)])
else:
fmt = '>{}'.format('f' * len(new_args))
blob = struct.pack(fmt, *new_args)
return blob
def traits_view(self):
hgrp = HGroup(UItem('use'), UItem('name', style='readonly'))
dgrp = VGroup(UItem('values',
editor=ListEditor(editor=InstanceEditor(),
style='custom',
mutable=False), ),
show_border=True,
enabled_when='use')
ggrp = UItem('graph', style='custom')
v = View(VGroup(hgrp,
VSplit(dgrp,
ggrp)))
return v
# ============= EOF =============================================
|
|
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (reverse, reverse_lazy, resolve, get_callable,
get_resolver, NoReverseMatch, Resolver404, ResolverMatch, RegexURLResolver,
RegexURLPattern)
from django.http import HttpRequest, HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import redirect
from django.test import TestCase
from django.utils import six
from . import urlconf_outer, middleware, views
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views will be resolved to the function/class name
('/unnamed/normal/42/37/', 'urlpatterns_reverse.views.empty_view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', 'urlpatterns_reverse.views.ViewClass', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', views.empty_view, ('42','37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', views.empty_view, ('42','37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', views.empty_view, tuple(), {'outer':'78', 'extra':'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/%2B%5C%24%2A/', [r'+\$*'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('kwargs_view', '/arg_view/', [], {}),
('kwargs_view', '/arg_view/10/', [], {'arg1':10}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1':10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
)
class NoURLPatternsTests(TestCase):
urls = 'urlpatterns_reverse.no_urls'
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', self.urls)
self.assertRaisesMessage(ImproperlyConfigured,
"The included urlconf urlpatterns_reverse.no_urls "\
"doesn't have any patterns in it", getattr, resolver, 'url_patterns')
class URLPatternReverse(TestCase):
urls = 'urlpatterns_reverse.urls'
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
def test_prefix_braces(self):
self.assertEqual('/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include', prefix='/{{invalid}}/'))
def test_prefix_parenthesis(self):
self.assertEqual('/bogus%29/includes/non_path_include/',
reverse('non_path_include', prefix='/bogus)/'))
def test_prefix_format_char(self):
self.assertEqual('/bump%2520map/includes/non_path_include/',
reverse('non_path_include', prefix='/bump%20map/'))
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022
self.assertEqual('/%7Eme/places/1/',
reverse('places', args=[1], prefix='/~me/'))
def test_patterns_reported(self):
# Regression for #17076
try:
# this url exists, but requires an argument
reverse("people", args=[])
except NoReverseMatch as e:
pattern_description = r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"
self.assertIn(pattern_description, str(e))
else:
# we can't use .assertRaises, since we want to inspect the
# exception
self.fail("Expected a NoReverseMatch, but none occurred.")
class ResolverTests(unittest.TestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced urlconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
try:
match = resolver.resolve(proxy_url)
except TypeError:
self.fail('Failed to coerce lazy object to text')
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404 as e:
# make sure we at least matched the root ('/') url resolver:
self.assertTrue('tried' in e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertTrue(t.name is None, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
class ReverseLazyTest(TestCase):
urls = 'urlpatterns_reverse.reverse_lazy_urls'
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=301)
def test_user_permission_with_lazy_reverse(self):
user = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
class ReverseShortcutTests(TestCase):
urls = 'urlpatterns_reverse.urls'
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
class NamespaceTests(TestCase):
urls = 'urlpatterns_reverse.namespace_urls'
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1':42, 'arg2':37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1':42, 'arg2':37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37,42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37,42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37,42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37,42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using a include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37,42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer':'78', 'extra':'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78','foobar']))
class RequestURLconfTests(TestCase):
def setUp(self):
self.root_urlconf = settings.ROOT_URLCONF
self.middleware_classes = settings.MIDDLEWARE_CLASSES
settings.ROOT_URLCONF = urlconf_outer.__name__
def tearDown(self):
settings.ROOT_URLCONF = self.root_urlconf
settings.MIDDLEWARE_CLASSES = self.middleware_classes
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,'
b'inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
def test_urlconf_overridden(self):
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
)
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
def test_urlconf_overridden_with_null(self):
settings.MIDDLEWARE_CLASSES += (
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
)
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
)
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
)
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(TestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
from django.core.urlresolvers import RegexURLResolver
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve400(), handler)
self.assertEqual(self.resolver.resolve404(), handler)
self.assertEqual(self.resolver.resolve500(), handler)
def test_callable_handers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve400(), handler)
self.assertEqual(self.callable_resolver.resolve404(), handler)
self.assertEqual(self.callable_resolver.resolve500(), handler)
class DefaultErrorHandlerTests(TestCase):
urls = 'urlpatterns_reverse.urls_without_full_import'
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
class NoRootUrlConfTests(TestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
urls = None
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ResolverMatchTests(TestCase):
urls = 'urlpatterns_reverse.namespace_urls'
def test_urlpattern_resolve(self):
for path, name, app_name, namespace, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, name)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.func, func)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
class ErroneousViewTests(TestCase):
urls = 'urlpatterns_reverse.erroneous_urls'
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable/')
def test_erroneous_reverse(self):
"""
Ensure that a useful exception is raised when a regex is invalid in the
URLConf.
Refs #6170.
"""
# The regex error will be hit before NoReverseMatch can be raised
self.assertRaises(ImproperlyConfigured, reverse, 'whatever blah blah')
class ViewLoadingTests(TestCase):
def test_view_loading(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
six.assertRaisesRegex(self, ViewDoesNotExist, ".*View does not exist in.*",
get_callable,
'urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
self.assertRaises(AttributeError, get_callable,
'urlpatterns_reverse.views_broken.i_am_broken')
|
|
#!/usr/bin/env python
"""
Based on Matt Weber es2graphite
https://github.com/mattweber/es2graphite
"""
import re
import sys
import json
import time
import pickle
import struct
import socket
import thread
import urllib2
import argparse
from pprint import pprint
from datetime import datetime
NODES = {}
CLUSTER_NAME = ''
STATUS = {'red': 0, 'yellow': 1, 'green': 2}
SHARD_STATE = {'CREATED': 0, 'RECOVERING': 1, 'STARTED': 2, 'RELOCATED': 3, 'CLOSED': 4}
HOST_IDX = -1
def log(what, force=False):
if args.verbose or force:
pprint(what)
def get_es_host():
global HOST_IDX
HOST_IDX = (HOST_IDX + 1) % len(args.es) # round-robin
return args.es[HOST_IDX]
def normalize(what):
if not isinstance(what, (list, tuple)):
return re.sub('\W+', '_', what.strip().lower()).encode('utf-8')
elif len(what) == 1:
return normalize(what[0])
else:
return '%s.%s' % (normalize(what[0]), normalize(what[1:]))
def add_metric(metrics, prefix, stat, val, timestamp):
if isinstance(val, bool):
val = int(val)
if prefix[-1] == 'translog' and stat == 'id':
return
elif isinstance(val, (int, long, float)) and stat != 'timestamp':
metrics.append((normalize((prefix, stat)), (timestamp, val)))
elif stat == 'status' and val in STATUS:
metrics.append((normalize((prefix, stat)), (timestamp, STATUS[val])))
elif stat == 'state' and val in SHARD_STATE:
metrics.append((normalize((prefix, stat)), (timestamp, SHARD_STATE[val])))
def process_node_stats(prefix, stats):
metrics = []
global CLUSTER_NAME
CLUSTER_NAME = stats['cluster_name']
for node_id in stats['nodes']:
node_stats = stats['nodes'][node_id]
NODES[node_id] = node_stats['name']
process_section(int(time.time()), metrics, (prefix, CLUSTER_NAME, NODES[node_id]), node_stats)
return metrics
def process_cluster_health(prefix, health):
metrics = []
process_section(int(time.time()), metrics, (prefix, CLUSTER_NAME), health)
return metrics
def process_indices_status(prefix, status):
metrics = []
process_section(int(time.time()), metrics, (prefix, CLUSTER_NAME, 'indices'), status['indices'])
return metrics
def process_indices_stats(prefix, stats):
metrics = []
process_section(int(time.time()), metrics, (prefix, CLUSTER_NAME, 'indices', '_all'), stats['_all'])
process_section(int(time.time()), metrics, (prefix, CLUSTER_NAME, 'indices'), stats['indices'])
return metrics
def process_segments_status(prefix, status):
metrics = []
process_section(int(time.time()), metrics, (prefix, CLUSTER_NAME, 'indices'), status['indices'])
return metrics
def process_section(timestamp, metrics, prefix, section):
for stat in section:
stat_val = section[stat]
if 'timestamp' in section:
timestamp = int(section['timestamp'] / 1000) # es has epoch in ms, graphite needs seconds
if isinstance(stat_val, dict):
process_section(timestamp, metrics, (prefix, stat), stat_val)
elif isinstance(stat_val, list):
if prefix[-1] == 'fs' and stat == 'data':
for disk in stat_val:
mount = disk['mount']
process_section(timestamp, metrics, (prefix, stat, mount), disk)
elif prefix[-1] == 'os' and stat == 'load_average':
add_metric(metrics, prefix, (stat, '1min_avg'), stat_val[0], timestamp)
add_metric(metrics, prefix, (stat, '5min_avg'), stat_val[1], timestamp)
add_metric(metrics, prefix, (stat, '15min_avg'), stat_val[2], timestamp)
elif prefix[-1] == 'shards' and re.match('\d+', stat) is not None:
for shard in stat_val:
shard_node = NODES[shard['routing']['node']]
process_section(timestamp, metrics, (prefix, stat, shard_node), shard)
else:
for stat_idx, sub_stat_val in enumerate(stat_val):
if isinstance(sub_stat_val, dict):
process_section(timestamp, metrics, (prefix, stat, str(stat_idx)), sub_stat_val)
else:
add_metric(metrics, prefix, (stat, str(stat_idx)), sub_stat_val, timestamp)
else:
add_metric(metrics, prefix, stat, stat_val, timestamp)
def send_to_graphite(metrics):
if args.debug:
for m, mval in metrics:
log('%s %s = %s' % (mval[0], m, mval[1]), True)
else:
payload = pickle.dumps(metrics)
header = struct.pack('!L', len(payload))
log("sending {} bytes, {} data points".format(len(header)+len(payload), len(metrics)))
sock = socket.socket()
sock.connect((args.graphite_host, args.graphite_port))
sock.sendall('%s%s' % (header, payload))
sock.close()
def get_metrics():
dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
node_stats_url = 'http://%s/_nodes/stats?all=true' % get_es_host()
log('%s: GET %s' % (dt, node_stats_url))
node_stats_data = urllib2.urlopen(node_stats_url).read()
node_stats = json.loads(node_stats_data)
node_stats_metrics = process_node_stats(args.prefix, node_stats)
send_to_graphite(node_stats_metrics)
cluster_health_url = 'http://%s/_cluster/health?level=%s' % (get_es_host(), args.health_level)
log('%s: GET %s' % (dt, cluster_health_url))
cluster_health_data = urllib2.urlopen(cluster_health_url).read()
cluster_health = json.loads(cluster_health_data)
cluster_health_metrics = process_cluster_health(args.prefix, cluster_health)
send_to_graphite(cluster_health_metrics)
if args.status:
indices_status_url = 'http://%s/_status' % get_es_host()
log('%s: GET %s' % (dt, indices_status_url))
indices_status_data = urllib2.urlopen(indices_status_url).read()
indices_status = json.loads(indices_status_data)
indices_status_metrics = process_indices_status(args.prefix, indices_status)
send_to_graphite(indices_status_metrics)
if args.indices or args.shard_stats:
indices_stats_url = 'http://%s/_stats?all=true' % get_es_host()
if args.shard_stats:
indices_stats_url = '%s&level=shards' % indices_stats_url
log('%s: GET %s' % (dt, indices_stats_url))
indices_stats_data = urllib2.urlopen(indices_stats_url).read()
indices_stats = json.loads(indices_stats_data)
indices_stats_metrics = process_indices_stats(args.prefix, indices_stats)
send_to_graphite(indices_stats_metrics)
if args.segments:
segments_status_url = 'http://%s/_segments' % get_es_host()
log('%s: GET %s' % (dt, segments_status_url))
segments_status_data = urllib2.urlopen(segments_status_url).read()
segments_status = json.loads(segments_status_data)
segments_status_metrics = process_segments_status(args.prefix, segments_status)
send_to_graphite(segments_status_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Send elasticsearch metrics to graphite')
parser.add_argument('-p', '--prefix', default='services', help='graphite metric prefix. Default: %(default)s')
parser.add_argument('-g', '--graphite-host', default='localhost', help='graphite hostname. Default: %(default)s')
parser.add_argument('-o', '--graphite-port', default=2004, type=int, help='graphite pickle protocol port. Default: %(default)s')
parser.add_argument('-i', '--interval', default=60, type=int, help='interval in seconds. Default: %(default)s')
parser.add_argument('--health-level', choices=['cluster', 'indices', 'shards'], default='cluster', help='The level of health metrics. Default: %(default)s')
parser.add_argument('--indices', action='store_true', help='Collect indice level metrics.')
parser.add_argument('--shard-stats', action='store_true', help='Collect shard level stats metrics.')
parser.add_argument('--segments', action='store_true', help='Collect low-level segment metrics.')
parser.add_argument('--status', action='store_true', help='Collect low-level metrics for each index.')
parser.add_argument('-d', '--debug', action='store_true', help='Print metrics, don\'t send to graphite')
parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output')
parser.add_argument('es', nargs='+', help='elasticsearch host:port', metavar='ES_HOST')
args = parser.parse_args()
while True:
thread.start_new_thread(get_metrics, ())
time.sleep(args.interval)
|
|
"""topology.py test suite."""
import unittest
import numpy as np
import topology as t
class AtomOffsetInvariantTestCase(unittest.TestCase):
def test_molecule(self):
top = t.Molecule('water', ['H', 'O', 'H'])
for idx in range(top.num_atoms):
self.assertEqual(top.offsets_by_atom[idx].shape, (3,))
offsets = top.atom_offsets
reshaped = top.offsets_by_atom.reshape(len(offsets))
self.assertEqual(list(reshaped), list(offsets))
def test_polymer(self):
ala = t.Molecule('ALA', ['N', 'CA', 'CB', 'C', 'O'])
gly = t.Molecule('GLY', ['N', 'CA', 'C', 'O'])
class Polymer(t.Polymer):
monomers = ['ALA', 'GLY']
top = t.Polymer('polymer', [ala, gly])
for idx in range(top.num_atoms):
self.assertEqual(top.offsets_by_atom[idx].shape, (3,))
offsets = top.atom_offsets
reshaped = top.offsets_by_atom.reshape(len(offsets))
self.assertEqual(list(reshaped), list(offsets))
class MoleculeAtomTestCase(unittest.TestCase):
ndof = 3
def test_num_atoms(self):
top = t.Molecule('water', ['H', 'O', 'H'], ndof=self.ndof)
self.assertEqual(top.num_atoms, 3)
def test_default_offsets(self):
top = t.Molecule('water', ['H', 'O', 'H'], ndof=self.ndof)
self.assertEqual(list(top.atom_offsets), range(3 * self.ndof))
def test_get_atoms(self):
top = t.Molecule('water', ['H', 'O', 'H'], ndof=self.ndof)
h_atoms = top.get_atoms('H')
o_atoms = top.get_atoms('O')
self.assertEqual(len(h_atoms), 2)
self.assertEqual(len(o_atoms), 1)
def test_get_atom_coords(self):
top = t.Molecule('water', ['H', 'O', 'H'], ndof=self.ndof)
h_atoms = top.get_atoms('H')
o_atoms = top.get_atoms('O')
x = np.array(range(self.ndof * top.num_atoms))
h_x = h_atoms.get_coords(x)
o_x = o_atoms.get_coords(x)
expected_h_x = np.array(range(0 * self.ndof, 1 * self.ndof) + range(2 * self.ndof, 3 * self.ndof))
expected_o_x = np.array(range(1 * self.ndof, 2 * self.ndof))
self.assertEqual(list(expected_h_x), list(h_x))
self.assertEqual(list(expected_o_x), list(o_x))
def test_set_atom_coords(self):
top = t.Molecule('water', ['H', 'O', 'H'], ndof=self.ndof)
h_atoms = top.get_atoms('H')
o_atoms = top.get_atoms('O')
x = np.array(range(self.ndof * top.num_atoms))
# Set the coordinates to something crazy
expected_h_x = np.array(range(9 * self.ndof, 11 * self.ndof))
expected_o_x = np.array(range(9 * self.ndof, 10 * self.ndof))
h_atoms.set_coords(x, expected_h_x)
o_atoms.set_coords(x, expected_o_x)
h_x = h_atoms.get_coords(x)
o_x = o_atoms.get_coords(x)
self.assertEqual(list(expected_h_x), list(h_x))
self.assertEqual(list(expected_o_x), list(o_x))
class MoleculeAtom1DTestCase(MoleculeAtomTestCase):
ndof = 1
class PolymerAtomTestCase(unittest.TestCase):
def make_top(self):
ala = t.Molecule('ALA', ['N', 'CA', 'CB', 'C', 'O'])
gly = t.Molecule('GLY', ['N', 'CA', 'C', 'O'])
return t.Polymer('poly', [ala, gly])
def test_num_atoms(self):
top = self.make_top()
self.assertEqual(top.num_atoms, 9)
def test_get_atom_num_atoms(self):
top = self.make_top()
ca_atoms = top.get_atoms('C')
cb_atoms = top.get_atoms('CB')
self.assertEqual(len(ca_atoms), 2)
self.assertEqual(len(cb_atoms), 1)
def test_get_atom_num_monomers(self):
top = self.make_top()
ca_atoms = top.get_atoms('C')
cb_atoms = top.get_atoms('CB')
self.assertEqual(ca_atoms.num_monomers, 2)
self.assertEqual(cb_atoms.num_monomers, 2)
def test_get_atom_coords(self):
top = self.make_top()
c_atoms = top.get_atoms('C')
cb_atoms = top.get_atoms('CB')
x = np.array(range(3 * top.num_atoms))
c_x = c_atoms.get_coords(x)
cb_x = cb_atoms.get_coords(x)
expected_c_x = np.array(range(3 * 3, 3 * 4) + range(3 * 5 + 3 * 2, 3 * 5 + 3*3))
expected_cb_x = np.array(range(3 * 2, 3 * 3))
self.assertEqual(list(c_x), list(expected_c_x))
self.assertEqual(list(cb_x), list(expected_cb_x))
def test_get_regex_atom_coords(self):
top = self.make_top()
c_atoms = top.regex_get_atoms('^C$')
no_atoms = top.regex_get_atoms('^(N|O)$')
x = np.array(range(3 * top.num_atoms))
c_x = c_atoms.get_coords(x)
no_x = no_atoms.get_coords(x)
expected_c_x = np.array(range(3 * 3, 3 * 4) + range(3 * 5 + 3 * 2, 3 * 5 + 3*3))
expected_no_x = np.array(range(3 * 0, 3 * 1) + range(3 * 4, 3 * 5)
+ range(3 * 5 + 3 * 0, 3 * 5 + 3 * 1)
+ range(3 * 5 + 3 * 3, 3 * 5 + 3 * 4))
self.assertEqual(list(c_x), list(expected_c_x))
self.assertEqual(list(no_x), list(expected_no_x))
def test_get_atomset_coords(self):
top = self.make_top()
c_atoms = top.get_atomset(['C'])
no_atoms = top.get_atomset(['N', 'O'])
x = np.array(range(3 * top.num_atoms))
c_x = c_atoms.get_coords(x)
no_x = no_atoms.get_coords(x)
expected_c_x = np.array(range(3 * 3, 3 * 4) + range(3 * 5 + 3 * 2, 3 * 5 + 3*3))
expected_no_x = np.array(range(3 * 0, 3 * 1) + range(3 * 4, 3 * 5)
+ range(3 * 5 + 3 * 0, 3 * 5 + 3 * 1)
+ range(3 * 5 + 3 * 3, 3 * 5 + 3 * 4))
self.assertEqual(list(c_x), list(expected_c_x))
self.assertEqual(list(no_x), list(expected_no_x))
def test_get_regex_not_atom_coords(self):
top = self.make_top()
not_c_atoms = top.regex_get_other_atoms('^C$')
not_no_atoms = top.regex_get_other_atoms('^(N|O)$')
x = np.array(range(3 * top.num_atoms))
not_c_x = not_c_atoms.get_coords(x)
not_no_x = not_no_atoms.get_coords(x)
expected_not_c_x = np.array(range(3 * 0, 3 * 3) + range(3 * 4, 3 * 7) + range(3 * 8, 3 * 9))
expected_not_no_x = np.array(range(3 * 1, 3 * 4) + range(3 * 6, 3 * 8))
self.assertEqual(list(not_c_x), list(expected_not_c_x))
self.assertEqual(list(not_no_x), list(expected_not_no_x))
def test_set_atom_coords(self):
top = self.make_top()
c_atoms = top.get_atoms('C')
cb_atoms = top.get_atoms('CB')
x = np.array(range(3 * top.num_atoms))
expected_c_x = np.array(range(6, 12))
expected_cb_x = np.array(range(10,13))
c_atoms.set_coords(x, expected_c_x)
cb_atoms.set_coords(x, expected_cb_x)
c_x = c_atoms.get_coords(x)
cb_x = cb_atoms.get_coords(x)
self.assertEqual(list(expected_c_x), list(c_x))
self.assertEqual(list(expected_cb_x), list(cb_x))
class PolymerMonomerTestCase(unittest.TestCase):
def make_top(self):
ala = t.Molecule('ALA', ['N', 'CA', 'CB', 'C', 'O'])
gly = t.Molecule('GLY', ['N', 'CA', 'C', 'O'])
return t.Polymer('poly', [ala, gly])
def test_monomer_number(self):
top = self.make_top()
self.assertEqual(top.num_monomers, 2)
def test_default_atom_offsets(self):
top = self.make_top()
self.assertEqual(list(top.atom_offsets), range(3 * (5 + 4)))
def test_default_target_offsets(self):
top = self.make_top()
self.assertEqual(list(top.target_offsets), range(3 * (5 + 4)))
def test_get_monomer_by_index(self):
top = self.make_top()
ala = top.get_monomer_by_index(0)
gly = top.get_monomer_by_index(1)
self.assertEqual(ala.name, 'ALA')
self.assertEqual(gly.name, 'GLY')
def test_monomer_atom_offsets(self):
top = self.make_top()
ala = top.get_monomer_by_index(0)
gly = top.get_monomer_by_index(1)
self.assertEqual(list(ala.atom_offsets), range(3 * 5))
self.assertEqual(list(gly.atom_offsets), range(3 * 5, 3 * 5 + 3 * 4))
def test_get_monomer(self):
top = self.make_top()
ala = top.get_monomer('ALA')
self.assertEqual(ala.num_atoms, 5)
self.assertEqual(ala.atoms, ['N', 'CA', 'CB', 'C', 'O'])
gly = top.get_monomer('GLY')
self.assertEqual(gly.num_atoms, 4)
self.assertEqual(gly.atoms, ['N', 'CA', 'C', 'O'])
self.assertEqual(top.num_monomers, 2)
def test_get_monomer_coords(self):
top = self.make_top()
x = np.array(range(3 * top.num_atoms))
ala = top.get_monomer('ALA')
expected_ala_x = np.array(range(3*5))
ala_x = ala.get_coords(x)
self.assertEqual(list(ala_x), list(expected_ala_x))
gly = top.get_monomer('GLY')
expected_gly_x = np.array(range(3 * 5, 3 * 5 + 3 * 4))
gly_x = gly.get_coords(x)
self.assertEqual(list(gly_x), list(expected_gly_x))
def test_set_monomer_coords(self):
top = self.make_top()
x = np.array(range(3 * top.num_atoms))
ala = top.get_monomer('ALA')
expected_ala_x = np.array(range(20, 35))
ala.set_coords(x, expected_ala_x)
ala_x = ala.get_coords(x)
self.assertEqual(list(ala_x), list(expected_ala_x))
gly = top.get_monomer('GLY')
expected_gly_x = np.array(range(30, 30 + 3 * 4))
gly.set_coords(x, expected_gly_x)
gly_x = gly.get_coords(x)
self.assertEqual(list(gly_x), list(expected_gly_x))
def test_multi_get_monomer_coords(self):
ala = t.Molecule('ALA', ['N', 'CA', 'CB', 'C', 'O'])
gly = t.Molecule('GLY', ['N', 'CA', 'C', 'O'])
top = t.Polymer('poly', [ala, gly, ala])
x = np.array(range(3 * top.num_atoms))
ala = top.get_monomer('ALA')
expected_ala_x = np.array(range(3*5) + range(3*9,3*(9+5)))
ala_x = ala.get_coords(x)
self.assertEqual(list(ala_x), list(expected_ala_x))
def test_multi_set_monomer_coords(self):
ala = t.Molecule('ALA', ['N', 'CA', 'CB', 'C', 'O'])
gly = t.Molecule('GLY', ['N', 'CA', 'C', 'O'])
top = t.Polymer('poly', [ala, gly, ala])
x = np.array(range(3 * top.num_atoms))
ala = top.get_monomer('ALA')
expected_ala_x = np.array(range(100,130))
ala.set_coords(x, expected_ala_x)
ala_x = ala.get_coords(x)
self.assertEqual(len(list(ala_x)), len(list(expected_ala_x)))
self.assertEqual(list(ala_x), list(expected_ala_x))
class MoleculeLiftTestCase(unittest.TestCase):
def test_reorder_get_namemap(self):
first_ala = t.Molecule('FirstALA', ['N', 'CA', 'C', 'O', 'G'])
second_ala = t.Molecule('SecondALA', ['N', 'CA', 'C', 'O', 'CB'])
first_coords = np.array(range(3 * first_ala.num_atoms))
second_coords = np.array(range(0 * 3, 2 * 3) + range(3 * 3, 5 * 3) + range(2 * 3, 3 * 3))
self.assertEqual(list(first_ala.get_atoms('C').get_coords(first_coords)), list(second_ala.get_atoms('CB').get_coords(second_coords)))
first_to_second = {'C':'CB',
'O': 'C',
'G': 'O'}
second_to_first = dict((kv[1], kv[0]) for kv in first_to_second.iteritems())
first_lift_second = first_ala.lift_topology(second_ala,
namemap=t.namedict(second_to_first))
self.assertEqual(list(first_coords), list(first_lift_second.lift_coords(second_coords)))
def test_lift_subset_shape(self):
first_ala = t.Molecule('FirstALA', ['N', 'CA', 'CB', 'C', 'O'])
second_ala = t.Molecule('SecondALA', ['N', 'CA', 'C', 'O'])
first_coords = np.array(range(3 * first_ala.num_atoms))
second_coords = np.array(range(0 * 3, 2 * 3) + range(3 * 3, 5 * 3))
self.assertEqual(list(first_ala.get_atoms('C').get_coords(first_coords)), list(second_ala.get_atoms('C').get_coords(second_coords)))
first_lift_second = first_ala.lift_topology(second_ala)
z = first_lift_second.get_coords(second_coords)
self.assertEqual(z.shape, second_coords.shape)
def test_lift_subset_coords(self):
first_ala = t.Molecule('FirstALA', ['N', 'CA', 'CB', 'C', 'O'])
second_ala = t.Molecule('SecondALA', ['N', 'CA', 'C', 'O'])
first_coords = np.array(range(3 * first_ala.num_atoms))
second_coords = np.array(range(0 * 3, 2 * 3) + range(3 * 3, 5 * 3))
self.assertEqual(list(first_ala.get_atoms('C').get_coords(first_coords)), list(second_ala.get_atoms('C').get_coords(second_coords)))
first_lift_second = first_ala.lift_topology(second_ala)
subfirst_second = first_ala.get_atomset(second_ala.atoms)
z = first_lift_second.get_coords(second_coords)
subfirst_coords = subfirst_second.get_coords(first_coords)
self.assertEqual(list(z), list(subfirst_coords))
def test_lift_subset_listshape(self):
first_ala = t.Molecule('FirstALA', ['N', 'CA', 'CB', 'C', 'O'])
second_ala = t.Molecule('SecondALA', ['N', 'CA', 'C', 'O'])
first_coords = np.array(range(3 * first_ala.num_atoms))
second_coords = np.array(range(0 * 3, 2 * 3) + range(3 * 3, 5 * 3))
self.assertEqual(list(first_ala.get_atoms('C').get_coords(first_coords)), list(second_ala.get_atoms('C').get_coords(second_coords)))
first_lift_second = first_ala.lift_topology(second_ala)
z = first_lift_second.lift_coords(second_coords)
self.assertEqual(z.shape, first_coords.shape)
def test_lift_subset_liftcoords(self):
first_ala = t.Molecule('FirstALA', ['N', 'CA', 'CB', 'C', 'O'])
second_ala = t.Molecule('SecondALA', ['N', 'CA', 'C', 'O'])
first_coords = np.array(range(3 * first_ala.num_atoms))
second_coords = np.array(range(0 * 3, 2 * 3) + range(3 * 3, 5 * 3))
self.assertEqual(list(first_ala.get_atoms('C').get_coords(first_coords)), list(second_ala.get_atoms('C').get_coords(second_coords)))
first_lift_second = first_ala.lift_topology(second_ala)
subfirst_second = first_ala.get_atomset(second_ala.atoms)
z = first_lift_second.lift_coords(second_coords)
subfirst_coords = np.zeros(first_coords.shape)
subfirst_second.set_coords(subfirst_coords, subfirst_second.get_coords(first_coords))
self.assertEqual(list(first_ala.get_atoms('C').get_coords(z)), list(first_ala.get_atoms('C').get_coords(subfirst_coords)))
self.assertEqual(list(z), list(subfirst_coords))
class PolymerLiftTestCase(unittest.TestCase):
def make_tops(self):
first_monomers = t.Monomers([t.Molecule('ALA',
['N', 'CA', 'CB', 'C', 'O']),
t.Molecule('GLY',
['N', 'CA', 'C', 'O'])])
second_monomers = t.Monomers([t.Molecule('ALA',
['N', 'CA', 'C', 'O', 'CB']),
t.Molecule('GLY',
['N', 'CA', 'C', 'O'])])
first_polymer = t.Polymer('firstpoly', first_monomers.sequence(['ALA', 'GLY']))
second_polymer = t.Polymer('secondpoly', second_monomers.sequence(['ALA', 'GLY']))
return first_polymer, second_polymer
def test_reorder(self):
first_polymer, second_polymer = self.make_tops()
# first_coords = np.array(range(3 * 0, 3 * 9))
second_coords = np.array(range(3 * 0, 3 * 2) + range(3 * 3, 3 * 5) + range(3 * 2, 3 * 3) + range(3 * 5, 3 * 9))
first_lift_second = first_polymer.lift_topology(second_polymer)
first_coords = first_lift_second.lift_coords(second_coords)
first_c_atoms = first_polymer.get_atoms('C')
second_c_atoms = second_polymer.get_atoms('C')
first_c_x = first_c_atoms.get_coords(first_coords)
second_c_x = second_c_atoms.get_coords(second_coords)
self.assertEqual(list(first_c_x), list(second_c_x))
def test_lift_subset_shape(self):
first_polymer, second_polymer = self.make_tops()
first_coords = np.array(range(3 * 0, 3 * 9))
# second_coords = np.array(range(3 * 0, 3 * 2) + range(3 * 3, 3 * 5) + range(3 * 2, 3 * 3) + range(3 * 5, 3 * 9))
first_n = first_polymer.get_atoms('N')
first_n_coords = first_n.get_coords(first_coords)
self.assertEqual(first_n_coords.shape, (2 * 3,))
first_lift_n = first_polymer.lift_topology(first_n.get_contiguous_topology())
self.assertEqual(first_lift_n.shape, first_coords.shape)
first_lift_n_liftcoords = first_lift_n.lift_coords(first_coords)
self.assertEqual(first_lift_n_liftcoords.shape, first_coords.shape)
class MoleculeChangeNDOFTestCase(unittest.TestCase):
initial_ndof = 3
final_ndof = 1
def make_top(self, ndof):
return t.Molecule('water', ['H', 'O', 'H'], ndof=ndof)
def test_default_offsets(self):
initial_top = self.make_top(self.initial_ndof)
final_top = initial_top.change_ndof(self.final_ndof)
self.assertEqual(list(final_top.atom_offsets), range(3 * self.final_ndof))
def test_get_atoms(self):
initial_top = self.make_top(self.initial_ndof)
top = initial_top.change_ndof(self.final_ndof)
h_atoms = top.get_atoms('H')
o_atoms = top.get_atoms('O')
self.assertEqual(len(h_atoms), 2)
self.assertEqual(len(o_atoms), 1)
def test_get_atom_coords(self):
initial_top = self.make_top(self.initial_ndof)
top = initial_top.change_ndof(self.final_ndof)
h_atoms = top.get_atoms('H')
o_atoms = top.get_atoms('O')
x = np.array(range(self.final_ndof * top.num_atoms))
h_x = h_atoms.get_coords(x)
o_x = o_atoms.get_coords(x)
expected_h_x = np.array(range(0 * self.final_ndof, 1 * self.final_ndof) + range(2 * self.final_ndof, 3 * self.final_ndof))
expected_o_x = np.array(range(1 * self.final_ndof, 2 * self.final_ndof))
self.assertEqual(list(expected_h_x), list(h_x))
self.assertEqual(list(expected_o_x), list(o_x))
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import absolute_import
from collections import defaultdict
import functools
import itertools
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.compat import expanduser
from pip.download import (url_to_path, unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.logging import indent_log
from pip.vcs import vcs
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None,
wheel_cache=None):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers():
logger.warning("Ignoring %s: markers %r don't match your "
"environment", install_req.name,
install_req.markers)
return []
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
if not existing_req.constraint:
# No need to scan, we've already encountered this for
# scanning.
result = []
elif not install_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def _walk_req_to_install(self, handler):
"""Call handler for all pending reqs.
:param handler: Handle a single requirement. Should take a requirement
to install. Can optionally return an iterable of additional
InstallRequirements to cover.
"""
# The list() here is to avoid potential mutate-while-iterating bugs.
discovered_reqs = []
reqs = itertools.chain(
list(self.unnamed_requirements), list(self.requirements.values()),
discovered_reqs)
for req_to_install in reqs:
more_reqs = handler(req_to_install)
if more_reqs:
discovered_reqs.extend(more_reqs)
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
self._walk_req_to_install(
functools.partial(self._prepare_file, finder))
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self, finder, req_to_install):
"""Prepare a single requirements files.
:return: A list of addition InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(finder, self.upgrade)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not self.ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
|
from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import os
import pickle
import copy
import sys
import html
from utils import TextLoader
from model import Model
def main():
assert sys.version_info >= (3, 3), \
"Must be run in Python 3.3 or later. You are running {}".format(sys.version)
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='models/reddit',
help='model directory to store checkpointed models')
parser.add_argument('-n', type=int, default=500,
help='number of characters to sample')
parser.add_argument('--prime', type=str, default=' ',
help='prime text')
parser.add_argument('--beam_width', type=int, default=2,
help='Width of the beam for beam search, default 2')
parser.add_argument('--temperature', type=float, default=1.0,
help='sampling temperature'
'(lower is more conservative, default is 1.0, which is neutral)')
parser.add_argument('--topn', type=int, default=-1,
help='at each step, choose from only this many most likely characters;'
'set to <0 to disable top-n filtering.')
parser.add_argument('--relevance', type=float, default=-1.,
help='amount of "relevance masking/MMI (disabled by default):"'
'higher is more pressure, 0.4 is probably as high as it can go without'
'noticeably degrading coherence;'
'set to <0 to disable relevance masking')
args = parser.parse_args()
sample_main(args)
def get_paths(input_path):
if os.path.isfile(input_path):
# Passed a model rather than a checkpoint directory
model_path = input_path
save_dir = os.path.dirname(model_path)
elif os.path.exists(input_path):
# Passed a checkpoint directory
save_dir = input_path
checkpoint = tf.train.get_checkpoint_state(save_dir)
if checkpoint:
model_path = checkpoint.model_checkpoint_path
else:
raise ValueError('Checkpoint not found in {}.'.format(save_dir))
else:
raise ValueError('save_dir is not a valid path.')
return model_path, os.path.join(save_dir, 'config.pkl'), os.path.join(save_dir, 'chars_vocab.pkl')
def sample_main(args):
model_path, config_path, vocab_path = get_paths(args.save_dir)
# Arguments passed to sample.py direct us to a saved model.
# Load the separate arguments by which that model was previously trained.
# That's saved_args. Use those to load the model.
with open(config_path, 'rb') as f:
saved_args = pickle.load(f)
# Separately load chars and vocab from the save directory.
with open(vocab_path, 'rb') as f:
chars, vocab = pickle.load(f)
# Create the model from the saved arguments, in inference mode.
print("Creating model...")
saved_args.batch_size = args.beam_width
net = Model(saved_args, True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Make tensorflow less verbose; filter out info (1+) and warnings (2+) but not errors (3).
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(net.save_variables_list())
# Restore the saved variables, replacing the initialized values.
print("Restoring weights...")
saver.restore(sess, model_path)
chatbot(net, sess, chars, vocab, args.n, args.beam_width,
args.relevance, args.temperature, args.topn)
def initial_state(net, sess):
# Return freshly initialized model states.
return sess.run(net.zero_state)
def forward_text(net, sess, states, relevance, vocab, prime_text=None):
if prime_text is not None:
for char in prime_text:
if relevance > 0.:
# Automatically forward the primary net.
_, states[0] = net.forward_model(sess, states[0], vocab[char])
# If the token is newline, reset the mask net state; else, forward it.
if vocab[char] == '\n':
states[1] = initial_state(net, sess)
else:
_, states[1] = net.forward_model(sess, states[1], vocab[char])
else:
_, states = net.forward_model(sess, states, vocab[char])
return states
def sanitize_text(vocab, text): # Strip out characters that are not part of the net's vocab.
return ''.join(i for i in text if i in vocab)
def initial_state_with_relevance_masking(net, sess, relevance):
if relevance <= 0.: return initial_state(net, sess)
else: return [initial_state(net, sess), initial_state(net, sess)]
def possibly_escaped_char(raw_chars):
if raw_chars[-1] == ';':
for i, c in enumerate(reversed(raw_chars[:-1])):
if c == ';' or i > 8:
return raw_chars[-1]
elif c == '&':
escape_seq = "".join(raw_chars[-(i + 2):])
new_seq = html.unescape(escape_seq)
backspace_seq = "".join(['\b'] * (len(escape_seq)-1))
diff_length = len(escape_seq) - len(new_seq) - 1
return backspace_seq + new_seq + "".join([' '] * diff_length) + "".join(['\b'] * diff_length)
return raw_chars[-1]
def chatbot(net, sess, chars, vocab, max_length, beam_width, relevance, temperature, topn):
states = initial_state_with_relevance_masking(net, sess, relevance)
while True:
user_input = input('\n> ')
user_command_entered, reset, states, relevance, temperature, topn, beam_width = process_user_command(
user_input, states, relevance, temperature, topn, beam_width)
if reset: states = initial_state_with_relevance_masking(net, sess, relevance)
if not user_command_entered:
states = forward_text(net, sess, states, relevance, vocab, sanitize_text(vocab, "> " + user_input + "\n>"))
computer_response_generator = beam_search_generator(sess=sess, net=net,
initial_state=copy.deepcopy(states), initial_sample=vocab[' '],
early_term_token=vocab['\n'], beam_width=beam_width, forward_model_fn=forward_with_mask,
forward_args={'relevance':relevance, 'mask_reset_token':vocab['\n'], 'forbidden_token':vocab['>'],
'temperature':temperature, 'topn':topn})
out_chars = []
for i, char_token in enumerate(computer_response_generator):
out_chars.append(chars[char_token])
print(possibly_escaped_char(out_chars), end='', flush=True)
states = forward_text(net, sess, states, relevance, vocab, chars[char_token])
if i >= max_length: break
states = forward_text(net, sess, states, relevance, vocab, sanitize_text(vocab, "\n> "))
def process_user_command(user_input, states, relevance, temperature, topn, beam_width):
user_command_entered = False
reset = False
try:
if user_input.startswith('--temperature '):
user_command_entered = True
temperature = max(0.001, float(user_input[len('--temperature '):]))
print("[Temperature set to {}]".format(temperature))
elif user_input.startswith('--relevance '):
user_command_entered = True
new_relevance = float(user_input[len('--relevance '):])
if relevance <= 0. and new_relevance > 0.:
states = [states, copy.deepcopy(states)]
elif relevance > 0. and new_relevance <= 0.:
states = states[0]
relevance = new_relevance
print("[Relevance disabled]" if relevance <= 0. else "[Relevance set to {}]".format(relevance))
elif user_input.startswith('--topn '):
user_command_entered = True
topn = int(user_input[len('--topn '):])
print("[Top-n filtering disabled]" if topn <= 0 else "[Top-n filtering set to {}]".format(topn))
elif user_input.startswith('--beam_width '):
user_command_entered = True
beam_width = max(1, int(user_input[len('--beam_width '):]))
print("[Beam width set to {}]".format(beam_width))
elif user_input.startswith('--reset'):
user_command_entered = True
reset = True
print("[Model state reset]")
except ValueError:
print("[Value error with provided argument.]")
return user_command_entered, reset, states, relevance, temperature, topn, beam_width
def consensus_length(beam_outputs, early_term_token):
for l in range(len(beam_outputs[0])):
if l > 0 and beam_outputs[0][l-1] == early_term_token:
return l-1, True
for b in beam_outputs[1:]:
if beam_outputs[0][l] != b[l]: return l, False
return l, False
def scale_prediction(prediction, temperature):
if (temperature == 1.0): return prediction # Temperature 1.0 makes no change
np.seterr(divide='ignore')
scaled_prediction = np.log(prediction) / temperature
scaled_prediction = scaled_prediction - np.logaddexp.reduce(scaled_prediction)
scaled_prediction = np.exp(scaled_prediction)
np.seterr(divide='warn')
return scaled_prediction
def forward_with_mask(sess, net, states, input_sample, forward_args):
# forward_args is a dictionary containing arguments for generating probabilities.
relevance = forward_args['relevance']
mask_reset_token = forward_args['mask_reset_token']
forbidden_token = forward_args['forbidden_token']
temperature = forward_args['temperature']
topn = forward_args['topn']
if relevance <= 0.:
# No relevance masking.
prob, states = net.forward_model(sess, states, input_sample)
else:
# states should be a 2-length list: [primary net state, mask net state].
if input_sample == mask_reset_token:
# Reset the mask probs when reaching mask_reset_token (newline).
states[1] = initial_state(net, sess)
primary_prob, states[0] = net.forward_model(sess, states[0], input_sample)
primary_prob /= sum(primary_prob)
mask_prob, states[1] = net.forward_model(sess, states[1], input_sample)
mask_prob /= sum(mask_prob)
prob = np.exp(np.log(primary_prob) - relevance * np.log(mask_prob))
# Mask out the forbidden token (">") to prevent the bot from deciding the chat is over)
prob[forbidden_token] = 0
# Normalize probabilities so they sum to 1.
prob = prob / sum(prob)
# Apply temperature.
prob = scale_prediction(prob, temperature)
# Apply top-n filtering if enabled
if topn > 0:
prob[np.argsort(prob)[:-topn]] = 0
prob = prob / sum(prob)
return prob, states
def beam_search_generator(sess, net, initial_state, initial_sample,
early_term_token, beam_width, forward_model_fn, forward_args):
'''Run beam search! Yield consensus tokens sequentially, as a generator;
return when reaching early_term_token (newline).
Args:
sess: tensorflow session reference
net: tensorflow net graph (must be compatible with the forward_net function)
initial_state: initial hidden state of the net
initial_sample: single token (excluding any seed/priming material)
to start the generation
early_term_token: stop when the beam reaches consensus on this token
(but do not return this token).
beam_width: how many beams to track
forward_model_fn: function to forward the model, must be of the form:
probability_output, beam_state =
forward_model_fn(sess, net, beam_state, beam_sample, forward_args)
(Note: probability_output has to be a valid probability distribution!)
tot_steps: how many tokens to generate before stopping,
unless already stopped via early_term_token.
Returns: a generator to yield a sequence of beam-sampled tokens.'''
# Store state, outputs and probabilities for up to args.beam_width beams.
# Initialize with just the one starting entry; it will branch to fill the beam
# in the first step.
beam_states = [initial_state] # Stores the best activation states
beam_outputs = [[initial_sample]] # Stores the best generated output sequences so far.
beam_probs = [1.] # Stores the cumulative normalized probabilities of the beams so far.
while True:
# Keep a running list of the best beam branches for next step.
# Don't actually copy any big data structures yet, just keep references
# to existing beam state entries, and then clone them as necessary
# at the end of the generation step.
new_beam_indices = []
new_beam_probs = []
new_beam_samples = []
# Iterate through the beam entries.
for beam_index, beam_state in enumerate(beam_states):
beam_prob = beam_probs[beam_index]
beam_sample = beam_outputs[beam_index][-1]
# Forward the model.
prediction, beam_states[beam_index] = forward_model_fn(
sess, net, beam_state, beam_sample, forward_args)
# Sample best_tokens from the probability distribution.
# Sample from the scaled probability distribution beam_width choices
# (but not more than the number of positive probabilities in scaled_prediction).
count = min(beam_width, sum(1 if p > 0. else 0 for p in prediction))
best_tokens = np.random.choice(len(prediction), size=count,
replace=False, p=prediction)
for token in best_tokens:
prob = prediction[token] * beam_prob
if len(new_beam_indices) < beam_width:
# If we don't have enough new_beam_indices, we automatically qualify.
new_beam_indices.append(beam_index)
new_beam_probs.append(prob)
new_beam_samples.append(token)
else:
# Sample a low-probability beam to possibly replace.
np_new_beam_probs = np.array(new_beam_probs)
inverse_probs = -np_new_beam_probs + max(np_new_beam_probs) + min(np_new_beam_probs)
inverse_probs = inverse_probs / sum(inverse_probs)
sampled_beam_index = np.random.choice(beam_width, p=inverse_probs)
if new_beam_probs[sampled_beam_index] <= prob:
# Replace it.
new_beam_indices[sampled_beam_index] = beam_index
new_beam_probs[sampled_beam_index] = prob
new_beam_samples[sampled_beam_index] = token
# Replace the old states with the new states, first by referencing and then by copying.
already_referenced = [False] * beam_width
new_beam_states = []
new_beam_outputs = []
for i, new_index in enumerate(new_beam_indices):
if already_referenced[new_index]:
new_beam = copy.deepcopy(beam_states[new_index])
else:
new_beam = beam_states[new_index]
already_referenced[new_index] = True
new_beam_states.append(new_beam)
new_beam_outputs.append(beam_outputs[new_index] + [new_beam_samples[i]])
# Normalize the beam probabilities so they don't drop to zero
beam_probs = new_beam_probs / sum(new_beam_probs)
beam_states = new_beam_states
beam_outputs = new_beam_outputs
# Prune the agreed portions of the outputs
# and yield the tokens on which the beam has reached consensus.
l, early_term = consensus_length(beam_outputs, early_term_token)
if l > 0:
for token in beam_outputs[0][:l]: yield token
beam_outputs = [output[l:] for output in beam_outputs]
if early_term: return
if __name__ == '__main__':
main()
|
|
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
from collections import namedtuple
from time import time, sleep
from threading import Thread, Event
try:
from math import isclose
except ImportError:
from ..compat import isclose
from . import Pin
from .data import pi_info
from ..exc import PinSetInput, PinPWMUnsupported, PinFixedPull
PinState = namedtuple('PinState', ('timestamp', 'state'))
class MockPin(Pin):
"""
A mock pin used primarily for testing. This class does *not* support PWM.
"""
_PINS = {}
@classmethod
def clear_pins(cls):
cls._PINS.clear()
@classmethod
def pi_info(cls):
return pi_info('a21041') # Pretend we're a Pi 2B
def __new__(cls, number):
if not (0 <= number < 54):
raise ValueError('invalid pin %d specified (must be 0..53)' % number)
try:
old_pin = cls._PINS[number]
except KeyError:
self = super(MockPin, cls).__new__(cls)
cls._PINS[number] = self
self._number = number
self._function = 'input'
self._state = False
self._pull = 'floating'
self._bounce = None
self._edges = 'both'
self._when_changed = None
self.clear_states()
return self
# Ensure the pin class expected supports PWM (or not)
if issubclass(cls, MockPWMPin) != isinstance(old_pin, MockPWMPin):
raise ValueError('pin %d is already in use as a %s' % (number, old_pin.__class__.__name__))
return old_pin
def __repr__(self):
return 'MOCK%d' % self._number
@property
def number(self):
return self._number
def close(self):
self.when_changed = None
self.function = 'input'
def _get_function(self):
return self._function
def _set_function(self, value):
assert value in ('input', 'output')
self._function = value
if value == 'input':
# Drive the input to the pull
self._set_pull(self._get_pull())
def _get_state(self):
return self._state
def _set_state(self, value):
if self._function == 'input':
raise PinSetInput('cannot set state of pin %r' % self)
assert self._function == 'output'
assert 0 <= value <= 1
self._change_state(bool(value))
def _change_state(self, value):
if self._state != value:
t = time()
self._state = value
self.states.append(PinState(t - self._last_change, value))
self._last_change = t
return True
return False
def _get_frequency(self):
return None
def _set_frequency(self, value):
if value is not None:
raise PinPWMUnsupported()
def _get_pull(self):
return self._pull
def _set_pull(self, value):
assert self._function == 'input'
assert value in ('floating', 'up', 'down')
self._pull = value
if value == 'up':
self.drive_high()
elif value == 'down':
self.drive_low()
def _get_bounce(self):
return self._bounce
def _set_bounce(self, value):
# XXX Need to implement this
self._bounce = value
def _get_edges(self):
return self._edges
def _set_edges(self, value):
assert value in ('none', 'falling', 'rising', 'both')
self._edges = value
def _get_when_changed(self):
return self._when_changed
def _set_when_changed(self, value):
self._when_changed = value
def drive_high(self):
assert self._function == 'input'
if self._change_state(True):
if self._edges in ('both', 'rising') and self._when_changed is not None:
self._when_changed()
def drive_low(self):
assert self._function == 'input'
if self._change_state(False):
if self._edges in ('both', 'falling') and self._when_changed is not None:
self._when_changed()
def clear_states(self):
self._last_change = time()
self.states = [PinState(0.0, self._state)]
def assert_states(self, expected_states):
# Tests that the pin went through the expected states (a list of values)
for actual, expected in zip(self.states, expected_states):
assert actual.state == expected
def assert_states_and_times(self, expected_states):
# Tests that the pin went through the expected states at the expected
# times (times are compared with a tolerance of tens-of-milliseconds as
# that's about all we can reasonably expect in a non-realtime
# environment on a Pi 1)
for actual, expected in zip(self.states, expected_states):
assert isclose(actual.timestamp, expected[0], rel_tol=0.05, abs_tol=0.05)
assert isclose(actual.state, expected[1])
class MockPulledUpPin(MockPin):
"""
This derivative of :class:`MockPin` emulates a pin with a physical pull-up
resistor.
"""
def _set_pull(self, value):
if value != 'up':
raise PinFixedPull('pin has a physical pull-up resistor')
class MockChargingPin(MockPin):
"""
This derivative of :class:`MockPin` emulates a pin which, when set to
input, waits a predetermined length of time and then drives itself high
(as if attached to, e.g. a typical circuit using an LDR and a capacitor
to time the charging rate).
"""
def __init__(self, number):
super(MockChargingPin, self).__init__()
self.charge_time = 0.01 # dark charging time
self._charge_stop = Event()
self._charge_thread = None
def _set_function(self, value):
super(MockChargingPin, self)._set_function(value)
if value == 'input':
if self._charge_thread:
self._charge_stop.set()
self._charge_thread.join()
self._charge_stop.clear()
self._charge_thread = Thread(target=self._charge)
self._charge_thread.start()
elif value == 'output':
if self._charge_thread:
self._charge_stop.set()
self._charge_thread.join()
def _charge(self):
if not self._charge_stop.wait(self.charge_time):
try:
self.drive_high()
except AssertionError:
# Charging pins are typically flipped between input and output
# repeatedly; if another thread has already flipped us to
# output ignore the assertion-error resulting from attempting
# to drive the pin high
pass
class MockTriggerPin(MockPin):
"""
This derivative of :class:`MockPin` is intended to be used with another
:class:`MockPin` to emulate a distance sensor. Set :attr:`echo_pin` to the
corresponding pin instance. When this pin is driven high it will trigger
the echo pin to drive high for the echo time.
"""
def __init__(self, number):
super(MockTriggerPin, self).__init__()
self.echo_pin = None
self.echo_time = 0.04 # longest echo time
self._echo_thread = None
def _set_state(self, value):
super(MockTriggerPin, self)._set_state(value)
if value:
if self._echo_thread:
self._echo_thread.join()
self._echo_thread = Thread(target=self._echo)
self._echo_thread.start()
def _echo(self):
sleep(0.001)
self.echo_pin.drive_high()
sleep(self.echo_time)
self.echo_pin.drive_low()
class MockPWMPin(MockPin):
"""
This derivative of :class:`MockPin` adds PWM support.
"""
def __init__(self, number):
super(MockPWMPin, self).__init__()
self._frequency = None
def close(self):
self.frequency = None
super(MockPWMPin, self).close()
def _set_state(self, value):
if self._function == 'input':
raise PinSetInput('cannot set state of pin %r' % self)
assert self._function == 'output'
assert 0 <= value <= 1
self._change_state(float(value))
def _get_frequency(self):
return self._frequency
def _set_frequency(self, value):
if value is not None:
assert self._function == 'output'
self._frequency = value
if value is None:
self._change_state(0.0)
class MockSPIClockPin(MockPin):
"""
This derivative of :class:`MockPin` is intended to be used as the clock pin
of a mock SPI device. It is not intended for direct construction in tests;
rather, construct a :class:`MockSPIDevice` with various pin numbers, and
this class will be used for the clock pin.
"""
def __init__(self, number):
super(MockSPIClockPin, self).__init__()
if not hasattr(self, 'spi_devices'):
self.spi_devices = []
def _set_state(self, value):
super(MockSPIClockPin, self)._set_state(value)
for dev in self.spi_devices:
dev.on_clock()
class MockSPISelectPin(MockPin):
"""
This derivative of :class:`MockPin` is intended to be used as the select
pin of a mock SPI device. It is not intended for direct construction in
tests; rather, construct a :class:`MockSPIDevice` with various pin numbers,
and this class will be used for the select pin.
"""
def __init__(self, number):
super(MockSPISelectPin, self).__init__()
if not hasattr(self, 'spi_device'):
self.spi_device = None
def _set_state(self, value):
super(MockSPISelectPin, self)._set_state(value)
if self.spi_device:
self.spi_device.on_select()
class MockSPIDevice(object):
def __init__(
self, clock_pin, mosi_pin, miso_pin, select_pin=None,
clock_polarity=False, clock_phase=False, lsb_first=False,
bits_per_word=8, select_high=False):
self.clock_pin = MockSPIClockPin(clock_pin)
self.mosi_pin = None if mosi_pin is None else MockPin(mosi_pin)
self.miso_pin = None if miso_pin is None else MockPin(miso_pin)
self.select_pin = None if select_pin is None else MockSPISelectPin(select_pin)
self.clock_polarity = clock_polarity
self.clock_phase = clock_phase
self.lsb_first = lsb_first
self.bits_per_word = bits_per_word
self.select_high = select_high
self.rx_bit = 0
self.rx_buf = []
self.tx_buf = []
self.clock_pin.spi_devices.append(self)
self.select_pin.spi_device = self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def close(self):
if self in self.clock_pin.spi_devices:
self.clock_pin.spi_devices.remove(self)
if self.select_pin is not None:
self.select_pin.spi_device = None
def on_select(self):
if self.select_pin.state == self.select_high:
self.on_start()
def on_clock(self):
# Don't do anything if this SPI device isn't currently selected
if self.select_pin is None or self.select_pin.state == self.select_high:
# The XOR of the clock pin's values, polarity and phase indicates
# whether we're meant to be acting on this edge
if self.clock_pin.state ^ self.clock_polarity ^ self.clock_phase:
self.rx_bit += 1
if self.mosi_pin is not None:
self.rx_buf.append(self.mosi_pin.state)
if self.miso_pin is not None:
try:
tx_value = self.tx_buf.pop(0)
except IndexError:
tx_value = 0
if tx_value:
self.miso_pin.drive_high()
else:
self.miso_pin.drive_low()
self.on_bit()
def on_start(self):
"""
Override this in descendents to detect when the mock SPI device's
select line is activated.
"""
self.rx_bit = 0
self.rx_buf = []
self.tx_buf = []
def on_bit(self):
"""
Override this in descendents to react to receiving a bit.
The :attr:`rx_bit` attribute gives the index of the bit received (this
is reset to 0 by default by :meth:`on_select`). The :attr:`rx_buf`
sequence gives the sequence of 1s and 0s that have been recevied so
far. The :attr:`tx_buf` sequence gives the sequence of 1s and 0s to
transmit on the next clock pulses. All these attributes can be modified
within this method.
The :meth:`rx_word` and :meth:`tx_word` methods can also be used to
read and append to the buffers using integers instead of bool bits.
"""
pass
def rx_word(self):
result = 0
bits = reversed(self.rx_buf) if self.lsb_first else self.rx_buf
for bit in bits:
result <<= 1
result |= bit
return result
def tx_word(self, value, bits_per_word=None):
if bits_per_word is None:
bits_per_word = self.bits_per_word
bits = [0] * bits_per_word
for bit in range(bits_per_word):
bits[bit] = value & 1
value >>= 1
assert not value
if not self.lsb_first:
bits = reversed(bits)
self.tx_buf.extend(bits)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#*** nmeta2 - Network Metadata - REST API Class and Methods
"""
This module is part of the nmeta suite running on top of Ryu SDN
controller to provide network identity and flow metadata.
.
It provides methods for RESTful API connectivity.
"""
import logging
import logging.handlers
import socket
import time
import sys
#*** Ryu Imports:
from ryu.exception import RyuException
from ryu.app.wsgi import ControllerBase, WSGIApplication
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
#*** Web API REST imports:
from webob import Response
import json
from json import JSONEncoder
#*** Universal Unique Identifier:
import uuid
from uuid import UUID
#*** Constants for REST API:
REST_RESULT = 'result'
REST_NG = 'failure'
REST_DETAILS = 'details'
NMETA_INSTANCE = 'nmeta_api_app'
LOGGER = 'logger_api_app'
# REST command template
def rest_command(func):
"""
REST API command template
"""
def _rest_command(*args, **kwargs):
"""
Run a REST command and return
appropriate response.
Keys/Values returned to this wrapper in a dictionary.
Valid Keys are:
'msg': the data to return in the message body
'location': a new location for the resource
'status': HTTP status code to return
"""
result = dict()
try:
result = func(*args, **kwargs)
except SyntaxError as e:
status = 400
details = e.msg
print "ERROR: SyntaxError in _rest_command, status ", status, \
"msg ", details
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
except (ValueError, NameError) as e:
status = 400
details = e.message
print "ERROR: ValueError or NameError in _rest_command, status ", \
status, "msg ", details
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
except NotFoundError as msg:
status = 404
details = str(msg)
print "ERROR: NotFoundError in _rest_command, status ", status, \
"msg ", details
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
except:
#*** General exception handling...
exc_type, exc_value, exc_traceback = sys.exc_info()
status = 500
details = "exc_type=" + str(exc_type) + " exc_value=" + \
str(exc_value) + " exc_traceback=" + \
str(exc_traceback)
print "ERROR: NotFoundError in _rest_command, status ", status, \
"msg ", details
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
if 'location' in result:
#*** Return an HTTP 201 with location for new resource:
msg = result['msg']
res_link = result['location']
status = 201
return Response(status=status, content_type='application/json',
location=res_link, body=json.dumps(msg))
else:
#*** No location to return:
msg = result['msg']
if 'status' in result:
status = result['status']
else:
status = 200
return Response(status=status, content_type='application/json',
body=json.dumps(msg))
#*** Return the inner function:
return _rest_command
class NotFoundError(RyuException):
message = 'Error occurred talking to function <TBD>'
class RESTAPIController(ControllerBase):
"""
This class is used to control REST API access to the
nmeta data and control functions
"""
def __init__(self, req, link, data, **config):
super(RESTAPIController, self).__init__(req, link, data, **config)
self.nmeta_parent_self = data[NMETA_INSTANCE]
#*** Get the parent logger and log against that:
self.logger = data[LOGGER]
#*** Performance Note: this init gets run for every API call...
#*** Update JSON to support UUID encoding:
JSONEncoder_olddefault = JSONEncoder.default
def JSONEncoder_newdefault(self, o):
if isinstance(o, UUID):
return str(o)
return JSONEncoder_olddefault(self, o)
JSONEncoder.default = JSONEncoder_newdefault
@rest_command
def rest_dpae_create(self, req, **kwargs):
"""
REST API function that creates a DPAE resource (Phase 1)
(HTTP POST method)
"""
nmeta = self.nmeta_parent_self
#*** Decode request body as JSON:
dpae_req_body = JSON_Body(req.body)
if dpae_req_body.error:
return ({'status': 400, 'msg': dpae_req_body.error})
self.logger.info("Phase 1 DPAE initiate request body=%s",
dpae_req_body.json)
#*** Validate required keys are present in JSON:
if not dpae_req_body.validate(['hostname_dpae', 'if_name',
'uuid_dpae']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
hostname_dpae = dpae_req_body['hostname_dpae']
uuid_dpae = dpae_req_body['uuid_dpae']
if_name = dpae_req_body['if_name']
#*** Create a unique ID:
hostname = socket.getfqdn()
our_uuid = uuid.uuid1()
#*** Record in database with controller UUID as key:
db_data = {'_id': str(our_uuid), 'time_created': time.time(),
'hostname_dpae': hostname_dpae, 'uuid_dpae': uuid_dpae,
'if_name': if_name}
db_result = nmeta.dbdpae.insert_one(db_data)
self.logger.info("Phase 1 created new db record id=%s",
db_result.inserted_id)
#*** Get the MAC addresses and ethertype for the DPAE to send to
#*** in Phase2:
dpae2ctrl_mac = str(nmeta.dpae2ctrl_mac)
ctrl2dpae_mac = str(nmeta.ctrl2dpae_mac)
dpae_ethertype = int(nmeta.dpae_ethertype)
#*** Create JSON response body:
json_create_response = json.dumps({'hostname_controller': hostname,
'uuid_dpae': uuid_dpae,
'uuid_controller': our_uuid,
'dpae2ctrl_mac': dpae2ctrl_mac,
'ctrl2dpae_mac': ctrl2dpae_mac,
'dpae_ethertype': dpae_ethertype}
)
self.logger.info("Phase 1 DPAE join response body=%s",
json_create_response)
#*** Return response body for sending to DPAE
#*** Include the location, which is branch where resource is created:
result = {'msg': json_create_response, 'location': str(our_uuid)}
return result
@rest_command
def rest_dpae_read(self, req, **kwargs):
"""
REST API function that returns DPAE resource names
(HTTP GET method)
"""
#*** We don't support this, so return a 403 Forbidden:
return ({'status': 403,
'msg': '{\"Error\": \"Listing of all DPAE is forbidden\"}'})
@rest_command
def rest_dpae_read_uuid(self, req, uri_uuid, **kwargs):
"""
REST API function that returns attributes of a DPAE resource
(HTTP GET method) for Phase 2
"""
nmeta = self.nmeta_parent_self
_results = {}
i = 0
#*** Decode request body as JSON:
dpae_req_body = JSON_Body(req.body)
if dpae_req_body.error:
return ({'status': 400, 'msg': dpae_req_body.error})
self.logger.info("Phase 2 DPAE read request body=%s",
dpae_req_body.json)
#*** Validate required keys are present in JSON:
if not dpae_req_body.validate(['hostname_dpae', 'if_name', 'uuid_dpae',
'uuid_controller']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
hostname_dpae = dpae_req_body['hostname_dpae']
uuid_dpae = dpae_req_body['uuid_dpae']
if_name = dpae_req_body['if_name']
uuid_controller = dpae_req_body['uuid_controller']
#*** Check that the UUID we were passed in the URI is valid:
try:
val = UUID(uri_uuid, version=1)
except ValueError:
return ({'status': 400, 'msg': '{\"Error\": \"Bad UUID in URI\"}'})
#*** Look up the UUID in the database:
db_result = nmeta.dbdpae.find_one({'_id': str(uri_uuid)})
if not db_result:
#*** Not in database:
return ({'status': 400, 'msg': '{\"Error\": \"UUID not in DB\"}'})
#*** Validate that parameters in HTTP GET JSON match DB:
if not hostname_dpae == str(db_result[u'hostname_dpae']):
return ({'status': 400, 'msg': \
'{\"Error\": \"hostname_dpae mismatch with DB value\"}'})
if not uuid_dpae == str(db_result[u'uuid_dpae']):
return ({'status': 400, 'msg': \
'{\"Error\": \"uuid_dpae mismatch with DB value\"}'})
if not uuid_controller == str(db_result['_id']):
return ({'status': 400, 'msg': \
'{\"Error\": \"uuid_controller mismatch with DB value\"}'})
if not if_name == str(db_result[u'if_name']):
return ({'status': 400, 'msg': \
'{\"Error\": \"if_name mismatch with DB value\"}'})
#*** Just return fields from DB doc that we want to return:
if 'hostname_dpae' in db_result:
_results['hostname_dpae'] = db_result[u'hostname_dpae']
if 'uuid_dpae' in db_result:
_results['uuid_dpae'] = db_result[u'uuid_dpae']
if 'uuid_controller' in db_result:
_results['uuid_controller'] = db_result[u'uuid_controller']
if 'time_created' in db_result:
_results['time_created'] = db_result[u'time_created']
if 'dpid' in db_result:
_results['dpid'] = db_result[u'dpid']
if 'switch_port' in db_result:
_results['switch_port'] = db_result[u'switch_port']
if 'lastModified' in db_result:
_results['lastModified'] = db_result[u'lastModified']
#*** Serialise JSON response body:
json_read_response = json.dumps(_results)
self.logger.info("DPAE Read response body=%s",
json_read_response)
#*** Return response body for sending to DPAE:
result = {'msg': json_read_response}
return result
@rest_command
def rest_dpae_keepalive(self, req, uri_uuid, **kwargs):
"""
REST API function that updates attributes of a DPAE resource
(HTTP PUT method). Body must include uuid_dpae corresponding
to a current uuid_controller (in URI).
Used by DPAE for keepalive messages.
"""
nmeta = self.nmeta_parent_self
_results = {}
i = 0
#*** Decode request body as JSON:
dpae_req_body = JSON_Body(req.body)
if dpae_req_body.error:
return ({'status': 400, 'msg': dpae_req_body.error})
#*** Validate required keys are present in JSON:
if not dpae_req_body.validate(['uuid_dpae',
'uuid_controller', 'keepalive', 'if_name']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
#*** Look up DB record for this DPAE:
uuid_dpae = dpae_req_body['uuid_dpae']
if uuid_dpae:
cursor = nmeta.dbdpae.find({'uuid_dpae': uuid_dpae})
if not cursor:
#*** Couldn't find in database so exit:
self.logger.error("DPAE update request no db doc uuid_dpae=%s"
", exiting")
return ({'status': 400, 'msg': '{\"Error\": \"Not in DB\"}'})
else:
#*** We weren't passed a uuid_dpae field so exit:
self.logger.error("DPAE update request no uuid_dpae, exiting")
return ({'status': 400, 'msg': '{\"Error\": \"No uuid_dpae\"}'})
#*** Update database record for this DPAE with keepalive lastseen time:
uuid_controller = dpae_req_body['uuid_controller']
db_result = nmeta.dbdpae.update_one(
{'_id': str(uuid_controller)},
{
'$set': {
'last_seen': time.time()
},
}
)
if db_result.matched_count != 1:
return ({'status': 400, 'msg': '{\"Error\": \"UUID not in DB\"}'})
#*** Return response body for sending to DPAE:
result = {'msg': 'Okay, got that'}
return result
@rest_command
def rest_dpae_delete(self, req, **kwargs):
"""
REST API function that deletes a DPAE resource
(HTTP DELETE method)
"""
#*** TBD
print "In rest_dpae_delete"
@rest_command
def rest_dpae_send_sniff_conf_pkt(self, req, uri_uuid, **kwargs):
"""
REST API function that returns attributes of a DPAE resource
(HTTP GET method)
"""
nmeta = self.nmeta_parent_self
_results = {}
i = 0
#*** Decode request body as JSON:
dpae_req_body = JSON_Body(req.body)
if dpae_req_body.error:
return ({'status': 400, 'msg': dpae_req_body.error})
self.logger.debug("DPAE send sniff conf pkt request body=%s",
dpae_req_body.json)
#*** Validate required keys are present in JSON:
if not dpae_req_body.validate(['hostname_dpae', 'if_name', 'uuid_dpae',
'uuid_controller']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
#*** Check that the UUID we were passed in the URI is valid:
try:
val = UUID(uri_uuid, version=1)
except ValueError:
return ({'status': 400, 'msg': '{\"Error\": \"Bad UUID in URI\"}'})
#*** Check the Controller UUID on the URI matches the one in JSON body:
if uri_uuid != dpae_req_body['uuid_controller']:
return ({'status': 400, 'msg': '{\"Error\": \"UUID mismatch\"}'})
#*** Look up the UUID in the database:
db_result = nmeta.dbdpae.find_one({'_id': str(uri_uuid)})
if not db_result:
#*** Not in database:
return ({'status': 400, 'msg': '{\"Error\": \"UUID not in DB\"}'})
#*** Validate and retrieve fields from database:
#*** Get datapath for switch:
if 'dpid' in db_result:
dpid = db_result[u'dpid']
else:
return ({'status': 500, 'msg': '{\"Error\": \"no datapath\"}'})
#*** Retrieve datapath object
datapath = nmeta.switches.datapath(dpid)
if not datapath:
return ({'status': 500, 'msg': '{\"Error\": \"no dpid stored\"}'})
#*** Get switch port to send packet out:
if 'switch_port' in db_result:
out_port = db_result[u'switch_port']
else:
return ({'status': 500, 'msg': '{\"Error\": \"no switch port\"}'})
#*** Since packet didn't come in a port we set source as controller:
in_port = datapath.ofproto.OFPP_CONTROLLER
#*** Get packet header parameters to use from nmeta configuration:
dpae2ctrl_mac = str(nmeta.dpae2ctrl_mac)
ctrl2dpae_mac = str(nmeta.ctrl2dpae_mac)
dpae_ethertype = int(nmeta.dpae_ethertype)
#*** Create sniff confirmation packet:
e = ethernet.ethernet(dst=ctrl2dpae_mac,
src=dpae2ctrl_mac,
ethertype=dpae_ethertype)
p = packet.Packet()
p.add_protocol(e)
#*** Serialise JSON response body:
packet_payload_json = json.dumps({
'uuid_controller': dpae_req_body['uuid_controller'],
'hostname_dpae': dpae_req_body['hostname_dpae'],
'if_name': dpae_req_body['if_name'],
'uuid_dpae': dpae_req_body['uuid_dpae']})
p.add_protocol(packet_payload_json)
p.serialize()
data = p.data
#*** Send confirmation packet, no queueing:
switch = nmeta.switches[dpid]
packet_out_result = switch.packet_out(data, in_port, out_port, 0, 1)
#*** Check packet send result:
if not packet_out_result:
#*** Failed to send packet for some reason:
return ({'status': 500, 'msg': '{\"Error\": \"Pkt send failed\"}'})
else:
#*** Return confirmation that we sent packet via API:
result = {'msg': 'Phase3 Sniff conf packet sent'}
return result
@rest_command
def rest_dpae_tc_state_update(self, req, uri_uuid, **kwargs):
"""
REST API function that sets DPAE interface TC state
(HTTP PUT method)
"""
nmeta = self.nmeta_parent_self
#*** Decode request body as JSON:
dpae_req_body = JSON_Body(req.body)
if dpae_req_body.error:
return ({'status': 400, 'msg': dpae_req_body.error})
self.logger.debug("DPAE TC State update request body=%s",
dpae_req_body.json)
#*** Validate required keys are present in JSON:
if not dpae_req_body.validate(['tc_state', 'dpae_version', 'uuid_dpae',
'uuid_controller']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
#*** Check version compatibility:
if not version_compare(dpae_req_body['dpae_version'],
nmeta.version):
self.logger.warning("Possible version compatibility issue. "
"DPAE_version=%s nmeta2_version=%s",
dpae_req_body['dpae_version'], nmeta.version)
#*** Check what state is being set (we only support 'run'):
tc_state = dpae_req_body['tc_state']
if tc_state:
if tc_state == 'run':
self.logger.debug("DPAE requested TC state=%s", tc_state)
else:
self.logger.error("DPAE TC unsupported state=%s", tc_state)
return ({'status': 400, 'msg': '{\"Error\": \"No tc_state\"}'})
else:
self.logger.error("DPAE did not send tc_state")
return ({'status': 400, 'msg': '{\"Error\": \"No tc_state\"}'})
#*** Check that the UUID we were passed in the URI is valid:
try:
val = UUID(uri_uuid, version=1)
except ValueError:
return ({'status': 400, 'msg': '{\"Error\": \"Bad UUID in URI\"}'})
#*** Look up the UUID in the database:
db_result = nmeta.dbdpae.find_one({'_id': str(uri_uuid)})
if not db_result:
#*** Not in database:
return ({'status': 400, 'msg': '{\"Error\": \"UUID not in DB\"}'})
#*** Validate and retrieve fields from database:
#*** Get datapath for switch:
if 'dpid' in db_result:
dpid = db_result[u'dpid']
else:
return ({'status': 500, 'msg': '{\"Error\": \"no datapath\"}'})
#*** Retrieve datapath object
datapath = nmeta.switches.datapath(dpid)
if not datapath:
return ({'status': 500, 'msg': '{\"Error\": \"no dpid stored\"}'})
#*** Get switch port to send packets out:
if 'switch_port' in db_result:
out_port = db_result[u'switch_port']
else:
return ({'status': 500, 'msg': '{\"Error\": \"no switch port\"}'})
#*** Call function to set up switch to DPAE FE:
_results = nmeta.tc_start(datapath, out_port)
#*** Add the uuid_dpae to the response:
_results['uuid_dpae'] = dpae_req_body['uuid_dpae']
#*** Encode response as JSON and send to DPAE:
json_response = json.dumps(_results)
self.logger.debug("json_response=%s", json_response)
_results_dict = {'msg': json_response}
return _results_dict
@rest_command
def rest_dpae_main_policy_read(self, req, uri_uuid, **kwargs):
"""
REST API function that retrieves main policy (for a DPAE)
(HTTP GET method)
"""
nmeta = self.nmeta_parent_self
_results = nmeta.main_policy.main_policy
_results_dict = {'msg': _results}
return _results_dict
@rest_command
def rest_dpae_tc_opt_rules_read(self, req, uri_uuid, **kwargs):
"""
REST API function that retrieves TC optimised rules (for a DPAE)
(HTTP GET method)
"""
nmeta = self.nmeta_parent_self
_results = nmeta.main_policy.optimised_rules.get_rules()
_results_dict = {'msg': _results}
return _results_dict
@rest_command
def rest_dpae_tc_classify_advice(self, req, uri_uuid, **kwargs):
"""
REST API function for a DPAE to inform Controller of a
traffic classification that it has determined
(HTTP POST method)
"""
nmeta = self.nmeta_parent_self
#*** Decode request body as JSON:
dpae_req_body = JSON_Body(req.body)
if dpae_req_body.error:
return ({'status': 400, 'msg': dpae_req_body.error})
self.logger.debug("TC advice body=%s",
dpae_req_body.json)
#*** Validate required keys are present in JSON:
if not dpae_req_body.validate(['type',
'subtype']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
tc_type = dpae_req_body[u'type']
tc_subtype = dpae_req_body[u'subtype']
#*** Look up the UUID in the database:
db_result = nmeta.dbdpae.find_one({'_id': str(uri_uuid)})
if not db_result:
#*** Not in database:
return ({'status': 400, 'msg': '{\"Error\": \"UUID not in DB\"}'})
#*** Retrieve the datapath for this switch from the database:
if 'dpid' in db_result:
dpid = db_result[u'dpid']
else:
return ({'status': 500, 'msg': '{\"Error\": \"no dpid\"}'})
if tc_type == 'id':
#*** Identity Metadata. Get fields out and update ID database:
if not dpae_req_body.validate(['src_mac', 'detail1']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
src_mac = dpae_req_body[u'src_mac']
detail1 = dpae_req_body[u'detail1']
#*** Call a function to process the identity classification
#*** advice:
nmeta.tc_advice_id(dpid, tc_type, tc_subtype, src_mac, detail1)
elif tc_type == 'treatment+suppress' or tc_type == 'suppress' \
or tc_type == 'treatment':
#*** Validate fields exist and extract:
flow_dict = {}
if not dpae_req_body.validate(['ip_A', 'ip_B', 'proto', 'tp_A',
'tp_B', 'flow_packets',
'actions']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
flow_dict['ip_A'] = dpae_req_body[u'ip_A']
flow_dict['ip_B'] = dpae_req_body[u'ip_B']
flow_dict['proto'] = dpae_req_body[u'proto']
flow_dict['tp_A'] = dpae_req_body[u'tp_A']
flow_dict['tp_B'] = dpae_req_body[u'tp_B']
flow_dict['actions'] = dpae_req_body[u'actions']
if dpae_req_body.validate(['qos_treatment']):
flow_dict['qos_treatment'] = dpae_req_body[u'qos_treatment']
if tc_type == 'treatment+suppress' or tc_type == 'suppress':
#*** Do flow suppression.
self.logger.debug("DPAE flow suppression type=%s "
"packets_seen=%s",
tc_type, dpae_req_body[u'flow_packets'])
nmeta.switches[dpid].flowtables.add_fe_tcf_suppress(flow_dict)
if tc_type == 'treatment+suppress' or tc_type == 'treatment':
#*** Do traffic treatment.
self.logger.debug("Traffic treatment type=%s "
"packets_seen=%s",
tc_type, dpae_req_body[u'flow_packets'])
nmeta.switches[dpid].flowtables.add_fe_tt_advised(flow_dict)
else:
self.logger.info("Didn't action tc_type=%s", tc_type)
result = {'msg': 'Thanks for letting us know!'}
return result
@rest_command
def rest_idmac_read(self, req, **kwargs):
"""
REST API function that returns Identity (MAC) resource
(HTTP GET method)
"""
nmeta = self.nmeta_parent_self
_results = {}
i = 0
#*** Retrieve all records:
cursor = nmeta.dbidmac.find()
for document in cursor:
_results[i] = {}
_results[i]['dpid'] = document[u'dpid']
_results[i]['mac'] = document[u'mac']
_results[i]['port'] = document[u'port']
i += 1
result = {'msg': _results}
return result
class JSON_Body(object):
"""
Represents a JSON-encoded body of an HTTP request.
Doesn't do logging, but does set .error when things
don't go to plan with a friendly message.
"""
def __init__(self, req_body):
self.json = {}
self.error = ""
self.error_full = ""
self.req_body = self.decode(req_body)
def decode(self, req_body):
"""
Passed an allegedly JSON body and see if it
decodes. Set error variable for exceptions
"""
json_decode = {}
if req_body:
#*** Try decode as JSON:
try:
json_decode = json.loads(req_body)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.error = '{\"Error\": \"Bad JSON\"}'
self.error_full = '{\"Error\": \"Bad JSON\",' + \
'\"exc_type\":' + str(exc_type) + ',' + \
'\"exc_value\":' + str(exc_value) + ',' + \
'\"exc_traceback\":' + str(exc_traceback) + '}'
return 0
else:
json_decode = {}
self.json = json_decode
return json_decode
def validate(self, key_list):
"""
Passed a list of keys and check that they exist in the
JSON. If they don't return 0 and set error to description
of first missing key that was found
"""
for key in key_list:
if not key in self.req_body:
self.error = '{\"Error\": \"No ' + key + '\"}'
return 0
return 1
def __getitem__(self, key):
"""
Passed a key and see if it exists in JSON
object. If it does, return the value for the key.
If not, return 0
Example:
foo = json_body['foo']
"""
if key in self.req_body:
return self.req_body[key]
else:
return 0
def version_compare(version1, version2):
"""
Compare two semantic version numbers and return 1 if they
are the same major version number
"""
(major1, minor1, patch1) = version1.split('.')
(major2, minor2, patch2) = version1.split('.')
if major1 == major2:
return 1
else:
return 0
class Api(object):
"""
This class is instantiated by nmeta.py and provides methods
for RESTful API connectivity.
"""
#*** URLs for REST API:
url_dpae_base = '/nmeta/v2/aux/'
url_dpae_uuid = url_dpae_base + '{uri_uuid}'
url_dpae_uuid_sendconfpkt = url_dpae_uuid + '/send_conf_packet/'
url_dpae_uuid_tc_state = url_dpae_uuid + '/services/tc/state/'
url_dpae_uuid_main_policy = url_dpae_uuid + '/main_policy/'
url_dpae_uuid_tc_opt_rules = url_dpae_uuid + '/services/tc/opt_rules/'
url_dpae_uuid_tc_classify = url_dpae_uuid + '/services/tc/classify/'
url_dpae_uuid_keepalive = url_dpae_uuid + '/keepalive/'
url_idmac = '/nmeta/v2/id/mac/'
IP_PATTERN = r'\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$){4}\b'
_CONTEXTS = {'wsgi': WSGIApplication}
def __init__(self, _nmeta, _config, _wsgi):
#*** Get logging config values from config class:
_logging_level_s = _config.get_value \
('api_logging_level_s')
_logging_level_c = _config.get_value \
('api_logging_level_c')
_syslog_enabled = _config.get_value('syslog_enabled')
_loghost = _config.get_value('loghost')
_logport = _config.get_value('logport')
_logfacility = _config.get_value('logfacility')
_syslog_format = _config.get_value('syslog_format')
_console_log_enabled = _config.get_value('console_log_enabled')
_console_format = _config.get_value('console_format')
#*** Set up Logging:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
#*** Syslog:
if _syslog_enabled:
#*** Log to syslog on host specified in config.yaml:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
#*** Add syslog log handler to logger:
self.logger.addHandler(self.syslog_handler)
#*** Console logging:
if _console_log_enabled:
#*** Log to the console:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
#*** Add console log handler to logger:
self.logger.addHandler(self.console_handler)
#*** Set up REST API:
wsgi = _wsgi
self.data = {NMETA_INSTANCE: self, LOGGER: self.logger}
mapper = wsgi.mapper
#*** Register the RESTAPIController class:
wsgi.register(RESTAPIController, {NMETA_INSTANCE : _nmeta,
LOGGER : self.logger})
requirements = {}
#*** Link to function for creating a DPAE:
mapper.connect('dpae', self.url_dpae_base,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_create',
conditions=dict(method=['POST']))
#*** Link to function for reading all DPAE:
mapper.connect('dpae', self.url_dpae_base,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_read',
conditions=dict(method=['GET']))
#*** Link to function for reading a particular DPAE:
mapper.connect('dpae_uuid', self.url_dpae_uuid,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_read_uuid',
conditions=dict(method=['GET']))
#*** Link to function for deleting a particular DPAE:
mapper.connect('dpae', self.url_dpae_base,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_delete',
conditions=dict(method=['DELETE']))
#*** Link to function for requesting send of sniff conf pkt to a DPAE:
mapper.connect('dpae', self.url_dpae_uuid_sendconfpkt,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_send_sniff_conf_pkt',
conditions=dict(method=['POST']))
#*** Link to function for setting DPAE interface TC state:
mapper.connect('dpae', self.url_dpae_uuid_tc_state,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_tc_state_update',
conditions=dict(method=['PUT']))
#*** Link to function for setting DPAE interface TC state:
mapper.connect('dpae', self.url_dpae_uuid_keepalive,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_keepalive',
conditions=dict(method=['PUT']))
#*** Link to function for getting main policy:
mapper.connect('dpae', self.url_dpae_uuid_main_policy,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_main_policy_read',
conditions=dict(method=['GET']))
#*** Link to function for getting optimised TC rules:
mapper.connect('dpae', self.url_dpae_uuid_tc_opt_rules,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_tc_opt_rules_read',
conditions=dict(method=['GET']))
#*** Link to function for posting a DPAE interface TC classification:
mapper.connect('dpae', self.url_dpae_uuid_tc_classify,
controller=RESTAPIController,
requirements=requirements,
action='rest_dpae_tc_classify_advice',
conditions=dict(method=['POST']))
#*** Link to function for reading the IDMAC table:
mapper.connect('general', self.url_idmac,
controller=RESTAPIController,
requirements=requirements,
action='rest_idmac_read',
conditions=dict(method=['GET']))
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from enum import Enum, IntEnum
import itertools
class Spin(IntEnum):
UP = 0
DOWN = 1
class SpinPairs(Enum):
"""The spin orbitals corresponding to a pair of spatial orbitals."""
SAME = 1
ALL = 0
DIFF = -1
class HubbardLattice(metaclass=abc.ABCMeta):
"""Base class for a Hubbard model lattice.
Subclasses must define the following properties:
n_dofs (int): The number of degrees of freedom per site (and spin if
applicable).
n_sites (int): The number of sites in the lattice.
spinless (bool): Whether or not the fermion has spin (False if so).
edge_types (Tuple[Hashable, ...]): The types of edges that a term could
correspond to. Examples include 'onsite', 'neighbor',
'diagonal_neighbor', etc.
onsite_edge_types (Sequence[Hashable]): The edge types that connect
sites to themselves.
And the following methods:
site_pairs_iter(edge_type: Hashable) \\
-> Iterable[Tuple[int, int]]: Iterable
over pairs of sites corresponding to the given edge type.
For 'spinful' lattices, the ``spin_indices`` ``0`` and ``1`` correspond to
'up' and 'down', respectively.
"""
@abc.abstractproperty
def n_dofs(self):
"""The number of degrees of freedom per site
(and spin if applicable)."""
@abc.abstractproperty
def n_sites(self):
"""The number of sites in the lattice."""
@abc.abstractproperty
def spinless(self):
"""Whether or not the fermion has spin (False if so)."""
@abc.abstractproperty
def edge_types(self):
"""The types of edges that a term could correspond to.
Examples include 'onsite', 'neighbor', 'diagonal_neighbor', etc.
"""
@abc.abstractproperty
def onsite_edge_types(self):
"""The edge types that connect sites to themselves."""
@abc.abstractmethod
def site_pairs_iter(self, edge_type, ordered=True):
"""Iterable over pairs of sites corresponding to the given edge type."""
# properties
@property
def n_spin_values(self):
return 1 if self.spinless else 2
@property
def n_spin_orbitals_per_site(self):
return self.n_dofs * self.n_spin_values
@property
def n_spin_orbitals(self):
return self.n_sites * self.n_spin_orbitals_per_site
# indexing
def site_index_offset(self, site_index):
return site_index * self.n_spin_orbitals_per_site
def dof_index_offset(self, dof_index):
return dof_index * self.n_spin_values
def to_spin_orbital_index(self, site_index, dof_index, spin_index):
"""The index of the spin orbital."""
return (self.site_index_offset(site_index) +
self.dof_index_offset(dof_index) + spin_index)
def from_spin_orbital_index(self, spin_orbital_index):
site_index, offset = divmod(spin_orbital_index,
self.n_spin_orbitals_per_site)
dof_index, spin_index = divmod(offset, self.n_spin_values)
return site_index, dof_index, spin_index
# iteration
@property
def site_indices(self):
return range(self.n_sites)
@property
def dof_indices(self):
return range(self.n_dofs)
def dof_pairs_iter(self, exclude_same=False):
return ((a, b)
for a in range(self.n_dofs)
for b in range(a + exclude_same, self.n_dofs))
@property
def spin_indices(self):
return range(self.n_spin_values)
def spin_pairs_iter(self, spin_pairs=SpinPairs.ALL, ordered=True):
if spin_pairs == SpinPairs.ALL:
return (itertools.product(self.spin_indices, repeat=2)
if ordered else itertools.combinations_with_replacement(
self.spin_indices, 2))
elif spin_pairs == SpinPairs.SAME:
return ((s, s) for s in self.spin_indices)
elif spin_pairs == SpinPairs.DIFF:
return (itertools.permutations(self.spin_indices, 2) if ordered else
itertools.combinations(self.spin_indices, 2))
raise ValueError(
'{} not a valid SpinPairs specification.'.format(spin_pairs))
# validation
def validate_edge_type(self, edge_type):
if edge_type not in self.edge_types:
raise ValueError('{} not a valid edge type {}.'.format(
edge_type, self.edge_types))
def validate_dof(self, dof, length=None):
if not (0 <= dof < self.n_dofs):
raise ValueError('not (0 <= {} < n_dofs = {})'.format(
dof, self.n_dofs))
def validate_dofs(self, dofs, length=None):
for dof in dofs:
self.validate_dof(dof)
if (length is not None) and (len(dofs) != length):
raise ValueError('len({}) != {}'.format(dofs, length))
class HubbardSquareLattice(HubbardLattice):
r"""A square lattice for a Hubbard model.
Valid edge types are:
* 'onsite'
* 'horizontal_neighbor'
* 'vertical_neighbor'
* 'neighbor': union of 'horizontal_neighbor' and 'vertical_neighbor'
* 'diagonal_neighbor'
"""
def __init__(self,
x_dimension,
y_dimension,
n_dofs=1,
spinless=False,
periodic=True):
"""
Args:
x_dimension (int): The width of the grid.
y_dimension (int): The height of the grid.
n_dofs (int, optional): The number of degrees of freedom per site
(and spin if applicable). Defaults is 1.
periodic (bool, optional): If True, add periodic boundary
conditions. Default is True.
spinless (bool, optional): If True, return a spinless Fermi-Hubbard
model. Default is False.
"""
self.x_dimension = x_dimension
self.y_dimension = y_dimension
self._n_dofs = n_dofs
self._spinless = spinless
self.periodic = periodic
@property
def n_dofs(self):
return self._n_dofs
@property
def spinless(self):
return self._spinless
@property
def n_sites(self):
return self.x_dimension * self.y_dimension
@property
def edge_types(self):
return ('onsite', 'neighbor', 'diagonal_neighbor',
'horizontal_neighbor', 'vertical_neighbor')
@property
def onsite_edge_types(self):
return ('onsite',)
def site_pairs_iter(self, edge_type, ordered=True):
if edge_type == 'onsite':
return ((i, i) for i in self.site_indices)
elif edge_type == 'neighbor':
return self.neighbors_iter(ordered)
elif edge_type == 'horizontal_neighbor':
return self.horizontal_neighbors_iter(ordered)
elif edge_type == 'vertical_neighbor':
return self.vertical_neighbors_iter(ordered)
elif edge_type == 'diagonal_neighbor':
return self.diagonal_neighbors_iter(ordered)
raise ValueError('Edge type {} is not valid.'.format(edge_type))
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__, ', '.join(
(('x_dimension={}'.format(self.x_dimension)),
('y_dimension={}'.format(self.y_dimension)),
('n_dofs={}'.format(self.n_dofs)), ('spinless={}'.format(
self.spinless)), ('periodic={}'.format(self.periodic)))))
# site indexing
def to_site_index(self, site):
"""The index of a site."""
x, y = site
return x + y * self.x_dimension
def from_site_index(self, site_index):
return divmod(site_index, self.x_dimension)[::-1]
# neighbor counting and iteration
def n_horizontal_neighbor_pairs(self, ordered=True):
"""Number of horizontally neighboring (unordered) pairs of sites."""
n_horizontal_edges_per_y = (
self.x_dimension - (self.x_dimension <= 2 or not self.periodic))
return (self.y_dimension * n_horizontal_edges_per_y *
(2 if ordered else 1))
def n_vertical_neighbor_pairs(self, ordered=True):
"""Number of vertically neighboring (unordered) pairs of sites."""
n_vertical_edges_per_x = (self.y_dimension -
(self.y_dimension <= 2 or not self.periodic))
return (self.x_dimension * n_vertical_edges_per_x *
(2 if ordered else 1))
def n_neighbor_pairs(self, ordered=True):
"""Number of neighboring (unordered) pairs of sites."""
return (self.n_horizontal_neighbor_pairs(ordered) +
self.n_vertical_neighbor_pairs(ordered))
def neighbors_iter(self, ordered=True):
return itertools.chain(self.horizontal_neighbors_iter(ordered),
self.vertical_neighbors_iter(ordered))
def diagonal_neighbors_iter(self, ordered=True):
n_sites_per_y = (self.x_dimension -
(self.x_dimension <= 2 or not self.periodic))
n_sites_per_x = (self.y_dimension -
(self.y_dimension <= 2 or not self.periodic))
for x in range(n_sites_per_y):
for y in range(n_sites_per_x):
for dy in (-1, 1):
i = self.to_site_index((x, y))
j = self.to_site_index(((x + 1) % self.x_dimension,
(y + dy) % self.y_dimension))
yield (i, j)
if ordered:
yield (j, i)
def horizontal_neighbors_iter(self, ordered=True):
n_horizontal_edges_per_y = (
self.x_dimension - (self.x_dimension <= 2 or not self.periodic))
for x in range(n_horizontal_edges_per_y):
for y in range(self.y_dimension):
i = self.to_site_index((x, y))
j = self.to_site_index(((x + 1) % self.x_dimension, y))
yield (i, j)
if ordered:
yield (j, i)
def vertical_neighbors_iter(self, ordered=True):
n_vertical_edges_per_x = (self.y_dimension -
(self.y_dimension <= 2 or not self.periodic))
for y in range(n_vertical_edges_per_x):
for x in range(self.x_dimension):
i = self.to_site_index((x, y))
j = self.to_site_index((x, (y + 1) % self.y_dimension))
yield (i, j)
if ordered:
yield (j, i)
# square-specific geometry
@property
def shape(self):
return (self.x_dimension, self.y_dimension)
def delta_mag(self, X, Y, by_index=False):
"""The distance between sites X and Y in each dimension."""
if by_index:
return self.delta_mag(self.from_site_index(X),
self.from_site_index(Y))
if self.periodic:
return tuple(
min(abs((s * (x - y)) % d)
for s in (-1, 1))
for d, x, y in zip(self.shape, X, Y))
return tuple(abs(x - xx) for x, xx in zip(X, Y))
def manhattan_distance(self, X, Y, by_index=False):
return sum(self.delta_mag(X, Y, by_index))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
"""
import re
import string
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class StorageError(Exception):
"""To raise errors related to SR, VDI, PBD, and VBD commands"""
def __init__(self, message=None):
super(StorageError, self).__init__(message)
def create_sr(session, label, params):
LOG.debug(_("creating sr within volume_utils"))
type = params['sr_type']
del params['sr_type']
LOG.debug(_('type is = %s') % type)
if 'name_description' in params:
desc = params['name_description']
LOG.debug(_('name = %s') % desc)
del params['name_description']
else:
desc = ''
if 'id' in params:
del params['id']
LOG.debug(params)
try:
sr_ref = session.call_xenapi("SR.create",
session.get_xenapi_host(),
params,
'0', label, desc, type, '', False, {})
LOG.debug(_('Created %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to create Storage Repository'))
def introduce_sr(session, sr_uuid, label, params):
LOG.debug(_("introducing sr within volume_utils"))
type = params['sr_type']
del params['sr_type']
LOG.debug(_('type is = %s') % type)
if 'name_description' in params:
desc = params['name_description']
LOG.debug(_('name = %s') % desc)
del params['name_description']
else:
desc = ''
if 'id' in params:
del params['id']
LOG.debug(params)
try:
sr_ref = session.call_xenapi("SR.introduce",
sr_uuid,
label,
desc,
type,
'',
False,
params,)
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
#Create pbd
LOG.debug(_('Creating pbd for SR'))
pbd_ref = create_pbd(session, sr_ref, params)
LOG.debug(_('Plugging SR'))
#Plug pbd
session.call_xenapi("PBD.plug", pbd_ref)
session.call_xenapi("SR.scan", sr_ref)
return sr_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce Storage Repository'))
def forget_sr(session, sr_uuid):
"""
Forgets the storage repository without destroying the VDIs within
"""
try:
sr_ref = session.call_xenapi("SR.get_by_uuid", sr_uuid)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to get SR using uuid'))
LOG.debug(_('Forgetting SR %s...') % sr_ref)
try:
unplug_pbds(session, sr_ref)
sr_ref = session.call_xenapi("SR.forget", sr_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to forget Storage Repository'))
def find_sr_by_uuid(session, sr_uuid):
"""
Return the storage repository given a uuid.
"""
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
if sr_rec['uuid'] == sr_uuid:
return sr_ref
return None
def create_iscsi_storage(session, info, label, description):
"""
Create an iSCSI storage repository that will be used to mount
the volume for the specified instance
"""
sr_ref = session.call_xenapi("SR.get_by_name_label", label)
if len(sr_ref) == 0:
LOG.debug(_('Introducing %s...'), label)
record = {}
if 'chapuser' in info and 'chappassword' in info:
record = {'target': info['targetHost'],
'port': info['targetPort'],
'targetIQN': info['targetIQN'],
'chapuser': info['chapuser'],
'chappassword': info['chappassword']}
else:
record = {'target': info['targetHost'],
'port': info['targetPort'],
'targetIQN': info['targetIQN']}
try:
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to create Storage Repository'))
else:
return sr_ref[0]
def find_sr_from_vbd(session, vbd_ref):
"""Find the SR reference from the VBD reference"""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
def create_pbd(session, sr_ref, params):
pbd_rec = {}
pbd_rec['host'] = session.get_xenapi_host()
pbd_rec['SR'] = sr_ref
pbd_rec['device_config'] = params
pbd_ref = session.call_xenapi("PBD.create", pbd_rec)
return pbd_ref
def unplug_pbds(session, sr_ref):
pbds = []
try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
except session.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s') % locals())
for pbd in pbds:
try:
session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s') % locals())
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
"""Introduce VDI in the host"""
try:
session.call_xenapi("SR.scan", sr_ref)
if vdi_uuid:
LOG.debug("vdi_uuid: %s" % vdi_uuid)
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
elif target_lun:
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for curr_ref in vdi_refs:
curr_rec = session.call_xenapi("VDI.get_record", curr_ref)
if ('sm_config' in curr_rec and
'LUNid' in curr_rec['sm_config'] and
curr_rec['sm_config']['LUNid'] == str(target_lun)):
vdi_ref = curr_ref
break
else:
vdi_ref = (session.call_xenapi("SR.get_VDIs", sr_ref))[0]
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
LOG.debug(vdi_rec)
LOG.debug(type(vdi_rec))
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to get record'
' of VDI %s on') % vdi_ref)
if vdi_rec['managed']:
# We do not need to introduce the vdi
return vdi_ref
try:
return session.call_xenapi("VDI.introduce",
vdi_rec['uuid'],
vdi_rec['name_label'],
vdi_rec['name_description'],
vdi_rec['SR'],
vdi_rec['type'],
vdi_rec['sharable'],
vdi_rec['read_only'],
vdi_rec['other_config'],
vdi_rec['location'],
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI for SR %s')
% sr_ref)
def purge_sr(session, sr_ref):
try:
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
except StorageError, ex:
LOG.exception(ex)
raise StorageError(_('Error finding vdis in SR %s') % sr_ref)
for vdi_ref in vdi_refs:
try:
vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
except StorageError, ex:
LOG.exception(ex)
raise StorageError(_('Unable to find vbd for vdi %s') %
vdi_ref)
if len(vbd_refs) > 0:
return
forget_sr(session, sr_rec['uuid'])
def get_device_number(mountpoint):
device_number = mountpoint_to_number(mountpoint)
if device_number < 0:
raise StorageError(_('Unable to obtain target information'
' %(mountpoint)s') % locals())
return device_number
def parse_volume_info(connection_data):
"""
Parse device_path and mountpoint as they can be used by XenAPI.
In particular, the mountpoint (e.g. /dev/sdc) must be translated
into a numeric literal.
FIXME(armando):
As for device_path, currently cannot be used as it is,
because it does not contain target information. As for interim
solution, target details are passed either via Flags or obtained
by iscsiadm. Long-term solution is to add a few more fields to the
db in the iscsi_target table with the necessary info and modify
the iscsi driver to set them.
"""
volume_id = connection_data['volume_id']
target_portal = connection_data['target_portal']
target_host = _get_target_host(target_portal)
target_port = _get_target_port(target_portal)
target_iqn = connection_data['target_iqn']
LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
volume_id, target_host, target_port, target_iqn)
if (volume_id is None or
target_host is None or
target_iqn is None):
raise StorageError(_('Unable to obtain target information'
' %(connection_data)s') % locals())
volume_info = {}
volume_info['id'] = volume_id
volume_info['target'] = target_host
volume_info['port'] = target_port
volume_info['targetIQN'] = target_iqn
if ('auth_method' in connection_data and
connection_data['auth_method'] == 'CHAP'):
volume_info['chapuser'] = connection_data['auth_username']
volume_info['chappassword'] = connection_data['auth_password']
return volume_info
def mountpoint_to_number(mountpoint):
"""Translate a mountpoint like /dev/sdc into a numeric"""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
return (ord(mountpoint[2:3]) - ord('a'))
elif re.match('^x?vd[a-p]$', mountpoint):
return (ord(mountpoint[-1]) - ord('a'))
elif re.match('^[0-9]+$', mountpoint):
return string.atoi(mountpoint, 10)
else:
LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
return -1
def _get_volume_id(path_or_id):
"""Retrieve the volume id from device_path"""
# If we have the ID and not a path, just return it.
if isinstance(path_or_id, int):
return path_or_id
# n must contain at least the volume_id
# :volume- is for remote volumes
# -volume- is for local volumes
# see compute/manager->setup_compute_volume
volume_id = path_or_id[path_or_id.find(':volume-') + 1:]
if volume_id == path_or_id:
volume_id = path_or_id[path_or_id.find('-volume--') + 1:]
volume_id = volume_id.replace('volume--', '')
else:
volume_id = volume_id.replace('volume-', '')
volume_id = volume_id[0:volume_id.find('-')]
return int(volume_id)
def _get_target_host(iscsi_string):
"""Retrieve target host"""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
elif iscsi_string is None or CONF.target_host:
return CONF.target_host
def _get_target_port(iscsi_string):
"""Retrieve target port"""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
return CONF.target_port
def _get_iqn(iscsi_string, id):
"""Retrieve target IQN"""
if iscsi_string:
return iscsi_string
elif iscsi_string is None or CONF.iqn_prefix:
volume_id = _get_volume_id(id)
return '%s:%s' % (CONF.iqn_prefix, volume_id)
|
|
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from flask_login import current_user
from dashboards.nmr_metabolomics.collection_editor.model import CollectionEditorModel
from data_tools.wrappers.analyses import get_analyses
from data_tools.wrappers.collections import get_collections
def get_options_form():
try:
collection_options = [
{'label': f'{collection.id}: {collection.name}', 'value': collection.id}
for collection in get_collections(current_user, {'kind': 'data'})
]
analysis_options = [
{'label': f'{analysis.id}: {analysis.name}', 'value': analysis.id}
for analysis in get_analyses(current_user)
]
except:
collection_options = []
analysis_options = []
try:
editor_data = CollectionEditorModel(load_data=True)
label_options = [{'label': label, 'value': label} for label in editor_data.labels]
loaded_badges = editor_data.get_collection_badges()
collection_load_info = editor_data.get_collection_load_info()
if len(loaded_badges) == 2:
collection_ids = editor_data.unique_vals('original_collection_id')
collection_id_options = [{'label': f'collection_id={i}', 'value': i} for i in collection_ids]
else:
collection_id_options = []
except:
loaded_badges = [html.Span([dbc.Badge('None', className='badge-pill')])]
collection_load_info = 'Loaded collections.'
label_options = []
collection_id_options = []
return dbc.Form(
[
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Collection IDs', html_for='collection-id'),
dcc.Dropdown(options=collection_options, id='collection-id', multi=True)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(collection_load_info, html_for='loaded-display',
id='collections-label'),
dcc.Loading(
[
dbc.InputGroup(
[
dbc.Button('Get', id='get-collection',
className='col-sm-2 btn-success'),
html.H4(loaded_badges, id='loaded-collections',
className='col-sm-10')
], id='loaded-display'
)
]
)
]
)
]
)
]
),
html.H5('Filter/Join Collections'),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Filter by label(s)',
html.Abbr('\uFE56',
title='Only consider records satisfying conditions on these'
' fields.')],
html_for='filter-by'),
dcc.Dropdown(id='filter-by', options=label_options, multi=True)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Filter by conditions',
html.Abbr('\uFE56',
title='The conditions which must be satisfied for the records'
'to be considered.')],
html_for='filter-by-value'),
dcc.Dropdown(id='filter-by-value', options=[], multi=True)
]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Ignore by label(s)',
html.Abbr('\uFE56',
title='Exclude records satisfying conditions on these fields')],
html_for='ignore-by'),
dcc.Dropdown(id='ignore-by', options=label_options, multi=True)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Ignore by conditions',
html.Abbr('\uFE56',
title='Conditions which apply to records to be excluded.')],
html_for='ignore-by-value'),
dcc.Dropdown(id='ignore-by-value', options=[], multi=True)
]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Join on label(s)',
html.Abbr('\uFE56',
title='A combination of values forming a unique key on which '
'the two collections are joined.')],
html_for='join-on'),
dcc.Dropdown(id='join-on', options=label_options, multi=True,
disabled=(len(loaded_badges) != 2))
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['"Left" Collection ID',
html.Abbr('\uFE56',
title='The collection id for the collection which will have '
'positive values for "x" and appear on the left side of '
'the plots')],
html_for='positive-collection'),
dcc.Dropdown(id='positive-collection', options=collection_id_options, multi=False,
disabled=(len(loaded_badges) != 2))
]
)
]
)
]
),
html.H5('Post Collection'),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Name', html_for='name-input-wrapper-wrapper'),
html.Div(
html.Div(
dbc.Input(id='name-input'), id='name-input-wrapper'
), id='name-input-wrapper-wrapper'
)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Analyses', html_for='analysis-select'),
dcc.Dropdown(id='analysis-select', options=analysis_options, multi=True)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Post', html_for='post-button-group'),
dbc.FormGroup(
[
dbc.Button([html.I(className='fas fa-upload'), ' Post'],
id='post-button',
className='btn btn-success')
], id='post-button-group'
)
]
)
]
)
], className='form-row'
),
dcc.Loading(html.Small('', id='post-message', className='form-text'))
# will inject link when results posted
]
)
def get_layout():
return html.Div(
[
html.Br(),
dbc.Container(
[
html.H2('Merge/Join/Filter Collections'),
get_options_form()
]
)
]
)
|
|
#!/usr/bin/env python3
"""
Template by pypi-mobans
"""
import os
import sys
import codecs
import locale
import platform
from shutil import rmtree
from setuptools import Command, setup, find_packages
PY2 = sys.version_info[0] == 2
PY26 = PY2 and sys.version_info[1] < 7
PY33 = sys.version_info < (3, 4)
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
# This work around is only if a project supports Python < 3.4
# Work around for locale not being set
try:
lc = locale.getlocale()
pf = platform.system()
if pf != "Windows" and lc == (None, None):
locale.setlocale(locale.LC_ALL, "C.UTF-8")
except (ValueError, UnicodeError, locale.Error):
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
NAME = "pyexcel"
AUTHOR = "chfw"
VERSION = "0.6.6"
EMAIL = "info@pyexcel.org"
LICENSE = "New BSD"
DESCRIPTION = (
"A wrapper library that provides one API to read, manipulate and write" +
"data in different excel formats"
)
URL = "https://github.com/pyexcel/pyexcel"
DOWNLOAD_URL = "%s/archive/0.6.6.tar.gz" % URL
FILES = ["README.rst", "CONTRIBUTORS.rst", "CHANGELOG.rst"]
KEYWORDS = [
"python",
'tsv',
'tsvz'
'csv',
'csvz',
'xls',
'xlsx',
'ods'
]
CLASSIFIERS = [
"Topic :: Software Development :: Libraries",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
'Development Status :: 3 - Alpha',
]
PYTHON_REQUIRES = ">=3.6"
INSTALL_REQUIRES = [
"lml>=0.0.4",
"pyexcel-io>=0.6.2",
"texttable>=0.8.2",
]
SETUP_COMMANDS = {}
PACKAGES = find_packages(exclude=["ez_setup", "examples", "tests", "tests.*"])
EXTRAS_REQUIRE = {
"xls": ['pyexcel-xls>=0.6.0'],
"xlsx": ['pyexcel-xlsx>=0.6.0'],
"ods": ['pyexcel-ods3>=0.6.0'],
}
# You do not need to read beyond this line
PUBLISH_COMMAND = "{0} setup.py sdist bdist_wheel upload -r pypi".format(sys.executable)
HERE = os.path.abspath(os.path.dirname(__file__))
GS_COMMAND = ("gease pyexcel v0.6.6 " +
"Find 0.6.6 in changelog for more details")
NO_GS_MESSAGE = ("Automatic github release is disabled. " +
"Please install gease to enable it.")
UPLOAD_FAILED_MSG = (
'Upload failed. please run "%s" yourself.' % PUBLISH_COMMAND)
class PublishCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package on github and pypi"
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds...")
rmtree(os.path.join(HERE, "dist"))
rmtree(os.path.join(HERE, "build"))
rmtree(os.path.join(HERE, "pyexcel.egg-info"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution...")
run_status = True
if has_gease():
run_status = os.system(GS_COMMAND) == 0
else:
self.status(NO_GS_MESSAGE)
if run_status:
if os.system(PUBLISH_COMMAND) != 0:
self.status(UPLOAD_FAILED_MSG)
sys.exit()
SETUP_COMMANDS.update({
"publish": PublishCommand
})
def has_gease():
"""
test if github release command is installed
visit http://github.com/moremoban/gease for more info
"""
try:
import gease # noqa
return True
except ImportError:
return False
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, "r", "utf-8") as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content
def filter_out_test_code(file_handle):
found_test_code = False
for line in file_handle.readlines():
if line.startswith(".. testcode:"):
found_test_code = True
continue
if found_test_code is True:
if line.startswith(" "):
continue
else:
empty_line = line.strip()
if len(empty_line) == 0:
continue
else:
found_test_code = False
yield line
else:
for keyword in ["|version|", "|today|"]:
if keyword in line:
break
else:
yield line
if __name__ == "__main__":
setup(
test_suite="tests",
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=read_files(*FILES),
license=LICENSE,
keywords=KEYWORDS,
python_requires=PYTHON_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=["nose"],
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
classifiers=CLASSIFIERS,
cmdclass=SETUP_COMMANDS
)
|
|
######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
'''
Script utility for moving relevant plots and tables after calibration
is complete
VLA pipeline
August 2012, B. Kent, NRAO
November 2012, B. Butler, NRAO
'''
import os
import stat
import glob
import shutil
import copy
######################################################################
logprint ('Starting EVLA_pipe_filecollect.py', logfileout='logs/filecollect.log')
time_list=runtiming('filecollect', 'start')
caltables_dir = './final_caltables'
weblog_dir = './weblog'
#Check if directories exists - if not create one
if not os.path.exists(caltables_dir):
os.makedirs(caltables_dir)
if not os.path.exists(weblog_dir):
os.makedirs(weblog_dir)
#Move all png files/plots
png_files = glob.glob('./*.png')
for file in png_files:
try:
shutil.move(file, weblog_dir+'/.')
except:
logprint('Unable to move ' + file, logfileout='logs/filecollect.log')
logprint('Plots moved to ' + weblog_dir, logfileout='logs/filecollect.log')
#Listobs output
listobs_output = glob.glob('./*.listobs')
for file in listobs_output:
try:
shutil.move(file, weblog_dir+'/.')
except:
logprint('Unable to move ' + file, logfileout='logs/filecollect.log')
#Move calibration tables into caltables_dir
cal_files=copy.copy(priorcals)
cal_files.append('switched_power.g')
cal_files.append('fluxgaincal.g')
cal_files.append('finaldelay.k')
cal_files.append('finalBPcal.b')
cal_files.append('averagephasegain.g')
cal_files.append('finalampgaincal.g')
cal_files.append('finalphasegaincal.g')
for file in cal_files:
try:
shutil.move(file, caltables_dir+'/.')
except:
logprint('Unable to move '+file,logfileout='logs/filecollect.log')
logprint('Final calibration tables moved to ' + caltables_dir, logfileout='logs/filecollect.log')
######################################################################
#Pickle up the timing profile list
try:
gmt_time = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
file_time = strftime("%d%b%Y_%H%M%Sgmt", gmtime())
#compute size of ms directory
ms_size = 0
try:
for (path, dirs, files) in os.walk(ms_active):
for file in files:
filename = os.path.join(path, file)
ms_size += os.path.getsize(filename)
#Size of the ms in Gigabytes
ms_size = ms_size / (1024.0*1024.0*1024.0)
except:
logprint('Unable to determine size of ms on disk')
pipeprofile = {'SDM_name':SDM_name,
'time_list':time_list,
'gmt_time':gmt_time,
'version':version,
'svnrevision':svnrevision,
'ms_size':ms_size}
if os.path.exists('logs'):
logprint('logs directory exists')
else:
logprint('logs directory does not exist / not in path')
#
# if SDM_name is a full path (as it is for automatic pipeline
# executions, it will have '/'s in it. search for the right-most one,
# and only use the part after that if it exists. otherwise, just use
# the full name (since it's one done by hand).
#
right_index = SDM_name.rfind('/')
pickle_filename = 'logs/profile_' + SDM_name[right_index+1:] + '_' + file_time + '.p'
logprint ('writing pickle file: ' + pickle_filename, logfileout='logs/completion.log')
try:
istat = pickle.dump(pipeprofile, open(pickle_filename, 'wb'))
logprint('Pickle dump of profile successful')
except:
logprint('Unable to dump pickle file')
#Load this dict with pipeprofile = pickle.load( open( "<file.p>", "rb" ) )
logprint ('Timing profile written to logs/timing.log', logfileout='logs/completion.log')
logprint ('Completed on ' + gmt_time + ' with pipeline SVN revision ' + svnrevision, logfileout='logs/timing.log')
except:
logprint ('Problem writing timing profile')
######################################################################
#Attempt to copy files to /lustre/aoc/cluster/pipeline/stats
# stats_dir = '/lustre/aoc/cluster/pipeline/stats'
# if os.path.exists(stats_dir):
# log_filename = stats_dir + '/profile_' + SDM_name[right_index+1:] + '_' + file_time + '.log'
# try:
# shutil.copyfile('logs/timing.log', log_filename)
# logprint ('Copied timing log to ' + stats_dir)
# except:
# logprint('Unable to copy timing log to ' + stats_dir)
# profile_filename = stats_dir + '/profile_' + SDM_name[right_index+1:] + '_' + file_time+ '.p'
# try:
# shutil.copyfile(pickle_filename, profile_filename)
# logprint ('Copied profile to ' + stats_dir)
# except:
# logprint('Unable to copy profile to ' + stats_dir)
# else:
# logprint (stats_dir + ' does not exist or not accessible')
#
# if this is an automatic execution, try to fix file permissions, so
# folks in the vlapipe group can clean up after things are moved into
# the Trash folder...
#
# if SDM_name_already_defined:
# #
# # if it turns out things in the rawdata or results directories are
# # improperly permissioned, uncomment the getcwd() and chdir() foo
# #
# # cwd = os.getcwd()
# # os.chdir('..')
# for path, dirs, files in os.walk('.'):
# for dir in dirs:
# full_dir_name = os.path.join(path, dir)
# st = os.stat(full_dir_name)
# if not (bool(st.st_mode & stat.S_IRGRP) and bool(st.st_mode & stat.S_IWGRP) and bool(st.st_mode & stat.S_IXGRP)):
# os.system("chmod -f g+rwx %s" % full_dir_name)
# for file in files:
# if file != 'epilogue.sh':
# full_file_name = os.path.join(path, file)
# st = os.stat(full_file_name)
# if not (bool(st.st_mode & stat.S_IRGRP) and bool(st.st_mode & stat.S_IWGRP) and bool(st.st_mode & stat.S_IXGRP)):
# os.system("chmod -f g+rwx %s" % full_file_name)
# os.chdir(cwd)
logprint ('Finished EVLA_pipe_filecollect.py', logfileout='logs/filecollect.log')
time_list=runtiming('filecollect', 'end')
pipeline_save()
|
|
"""
@package mi.instrument.star_asimet.bulkmet.metbk_a.driver
@file marine-integrations/mi/instrument/star_aismet/bulkmet/metbk_a/driver.py
@author Bill Bollenbacher
@brief Driver for the metbk_a
Release notes:
initial version
"""
__author__ = 'Bill Bollenbacher'
__license__ = 'Apache 2.0'
import re
import time
import string
import json
import time
from mi.core.log import get_logger ; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, \
InstrumentProtocolException
from mi.core.time import get_timestamp_delayed
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.driver_scheduler import DriverSchedulerConfigKey
from mi.core.driver_scheduler import TriggerType
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, \
ParameterDictType, \
ParameterDictVisibility
# newline.
NEWLINE = '\r\n'
# default timeout.
TIMEOUT = 10
SYNC_TIMEOUT = 30
AUTO_SAMPLE_SCHEDULED_JOB = 'auto_sample'
LOGGING_STATUS_REGEX = r'.*Sampling (GO|STOPPED)'
LOGGING_STATUS_COMPILED = re.compile(LOGGING_STATUS_REGEX, re.DOTALL)
LOGGING_SYNC_REGEX = r'.*Sampling GO - synchronizing...'
LOGGING_SYNC_COMPILED = re.compile(LOGGING_STATUS_REGEX, re.DOTALL)
####
# Driver Constant Definitions
####
class ScheduledJob(BaseEnum):
ACQUIRE_STATUS = 'acquire_status'
CLOCK_SYNC = 'clock_sync'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
SYNC_CLOCK = 'PROTOCOL_STATE_SYNC_CLOCK'
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
GET = DriverEvent.GET
SET = DriverEvent.SET
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
FLASH_STATUS = 'DRIVER_EVENT_FLASH_STATUS'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
FLASH_STATUS = ProtocolEvent.FLASH_STATUS
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
CLOCK = 'clock'
SAMPLE_INTERVAL = 'sample_interval'
class Prompt(BaseEnum):
"""
Device i/o prompts.
"""
CR_NL = NEWLINE
STOPPED = "Sampling STOPPED"
SYNC = "Sampling GO - synchronizing..."
GO = "Sampling GO"
FS = "bytes free\r" + NEWLINE
class Command(BaseEnum):
"""
Instrument command strings
"""
GET_CLOCK = "#CLOCK"
SET_CLOCK = "#CLOCK="
D = "#D"
FS = "#FS"
STAT = "#STAT"
GO = "#GO"
STOP = "#STOP"
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
METBK_PARSED = 'metbk_parsed'
METBK_STATUS = 'metbk_status'
###############################################################################
# Data Particles
###############################################################################
class METBK_SampleDataParticleKey(BaseEnum):
BAROMETRIC_PRESSURE = 'barometric_pressure'
RELATIVE_HUMIDITY = 'relative_humidity'
AIR_TEMPERATURE = 'air_temperature'
LONGWAVE_IRRADIANCE = 'longwave_irradiance'
PRECIPITATION = 'precipitation'
SEA_SURFACE_TEMPERATURE = 'sea_surface_temperature'
SEA_SURFACE_CONDUCTIVITY = 'sea_surface_conductivity'
SHORTWAVE_IRRADIANCE = 'shortwave_irradiance'
EASTWARD_WIND_VELOCITY = 'eastward_wind_velocity'
NORTHWARD_WIND_VELOCITY = 'northward_wind_velocity'
class METBK_SampleDataParticle(DataParticle):
_data_particle_type = DataParticleType.METBK_PARSED
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
SAMPLE_DATA_PATTERN = (r'(-*\d+\.\d+)' + # BPR
'\s*(-*\d+\.\d+)' + # RH %
'\s*(-*\d+\.\d+)' + # RH temp
'\s*(-*\d+\.\d+)' + # LWR
'\s*(-*\d+\.\d+)' + # PRC
'\s*(-*\d+\.\d+)' + # ST
'\s*(-*\d+\.\d+)' + # SC
'\s*(-*\d+\.\d+)' + # SWR
'\s*(-*\d+\.\d+)' + # We
'\s*(-*\d+\.\d+)' + # Wn
'.*?' + NEWLINE) # throw away batteries
return re.compile(SAMPLE_DATA_PATTERN, re.DOTALL)
def _build_parsed_values(self):
match = METBK_SampleDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("METBK_SampleDataParticle: No regex match of parsed sample data: [%s]", self.raw_data)
result = [{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.BAROMETRIC_PRESSURE,
DataParticleKey.VALUE: float(match.group(1))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.RELATIVE_HUMIDITY,
DataParticleKey.VALUE: float(match.group(2))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.AIR_TEMPERATURE,
DataParticleKey.VALUE: float(match.group(3))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.LONGWAVE_IRRADIANCE,
DataParticleKey.VALUE: float(match.group(4))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.PRECIPITATION,
DataParticleKey.VALUE: float(match.group(5))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SEA_SURFACE_TEMPERATURE,
DataParticleKey.VALUE: float(match.group(6))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SEA_SURFACE_CONDUCTIVITY,
DataParticleKey.VALUE: float(match.group(7))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SHORTWAVE_IRRADIANCE,
DataParticleKey.VALUE: float(match.group(8))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.EASTWARD_WIND_VELOCITY,
DataParticleKey.VALUE: float(match.group(9))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.NORTHWARD_WIND_VELOCITY,
DataParticleKey.VALUE: float(match.group(10))}]
log.debug("METBK_SampleDataParticle._build_parsed_values: result=%s" %result)
return result
class METBK_StatusDataParticleKey(BaseEnum):
INSTRUMENT_MODEL = 'instrument_model'
SERIAL_NUMBER = 'serial_number'
CALIBRATION_DATE = 'calibration_date'
FIRMWARE_VERSION = 'firmware_version'
DATE_TIME_STRING = 'date_time_string'
LOGGING_INTERVAL = 'logging_interval'
CURRENT_TICK = 'current_tick'
RECENT_RECORD_INTERVAL = 'recent_record_interval'
FLASH_CARD_PRESENCE = 'flash_card_presence'
BATTERY_VOLTAGE_MAIN = 'battery_voltage_main'
FAILURE_MESSAGES = 'failure_messages'
PTT_ID1 = 'ptt_id1'
PTT_ID2 = 'ptt_id2'
PTT_ID3 = 'ptt_id3'
SAMPLING_STATE = 'sampling_state'
class METBK_StatusDataParticle(DataParticle):
_data_particle_type = DataParticleType.METBK_STATUS
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
STATUS_DATA_PATTERN = (r'Model:\s+(.+?)\r\n' +
'SerNum:\s+(.+?)\r\n' +
'CfgDat:\s+(.+?)\r\n' +
'Firmware:\s+(.+?)\r\n' +
'RTClock:\s+(.+?)\r\n' +
'Logging Interval:\s+(\d+);\s+' +
'Current Tick:\s+(\d+)\r\n' +
'R-interval:\s+(.+?)\r\n' +
'(.+?)\r\n' + # compact flash info
'Main Battery Voltage:\s+(.+?)\r\n' +
'(.+?)' + # module failures & PTT messages
'\r\nSampling\s+(\w+)\r\n')
return re.compile(STATUS_DATA_PATTERN, re.DOTALL)
def _build_parsed_values(self):
log.debug("METBK_StatusDataParticle: input = %s" %self.raw_data)
match = METBK_StatusDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("METBK_StatusDataParticle: No regex match of parsed status data: [%s]", self.raw_data)
result = [{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.INSTRUMENT_MODEL,
DataParticleKey.VALUE: match.group(1)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: match.group(2)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.CALIBRATION_DATE,
DataParticleKey.VALUE: match.group(3)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FIRMWARE_VERSION,
DataParticleKey.VALUE: match.group(4)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.DATE_TIME_STRING,
DataParticleKey.VALUE: match.group(5)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.LOGGING_INTERVAL,
DataParticleKey.VALUE: int(match.group(6))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.CURRENT_TICK,
DataParticleKey.VALUE: int(match.group(7))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.RECENT_RECORD_INTERVAL,
DataParticleKey.VALUE: int(match.group(8))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FLASH_CARD_PRESENCE,
DataParticleKey.VALUE: match.group(9)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.BATTERY_VOLTAGE_MAIN,
DataParticleKey.VALUE: float(match.group(10))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.SAMPLING_STATE,
DataParticleKey.VALUE: match.group(12)}]
lines = match.group(11).split(NEWLINE)
length = len(lines)
print ("length=%d; lines=%s" %(length, lines))
if length < 3:
raise SampleException("METBK_StatusDataParticle: Not enough PTT lines in status data: [%s]", self.raw_data)
# grab PTT lines
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID1,
DataParticleKey.VALUE: lines[length-3]})
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID2,
DataParticleKey.VALUE: lines[length-2]})
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID3,
DataParticleKey.VALUE: lines[length-1]})
# grab any module failure lines
if length > 3:
length -= 3
failures = []
for index in range(0, length):
failures.append(lines[index])
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FAILURE_MESSAGES,
DataParticleKey.VALUE: failures})
log.debug("METBK_StatusDataParticle: result = %s" %result)
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
def __init__(self, evt_callback):
"""
Driver constructor.
@param evt_callback Driver process event callback.
"""
#Construct superclass.
SingleConnectionInstrumentDriver.__init__(self, evt_callback)
########################################################################
# Superclass overrides for resource query.
########################################################################
def get_resource_params(self):
"""
Return list of device parameters available.
"""
return Parameter.list()
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
last_sample = ''
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_SAMPLE, self._handler_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT, self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.CLOCK_SYNC, self._handler_command_sync_clock)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.FLASH_STATUS, self._handler_flash_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_SAMPLE, self._handler_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.CLOCK_SYNC, self._handler_autosample_sync_clock)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.FLASH_STATUS, self._handler_flash_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status)
# We setup a new state for clock sync because then we could use the state machine so the autosample scheduler
# is disabled before we try to sync the clock. Otherwise there could be a race condition introduced when we
# are syncing the clock and the scheduler requests a sample.
self._protocol_fsm.add_handler(ProtocolState.SYNC_CLOCK, ProtocolEvent.ENTER, self._handler_sync_clock_enter)
self._protocol_fsm.add_handler(ProtocolState.SYNC_CLOCK, ProtocolEvent.CLOCK_SYNC, self._handler_sync_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER, self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT, self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct)
# Add build handlers for device commands.
self._add_build_handler(Command.GET_CLOCK, self._build_simple_command)
self._add_build_handler(Command.SET_CLOCK, self._build_set_clock_command)
self._add_build_handler(Command.D, self._build_simple_command)
self._add_build_handler(Command.GO, self._build_simple_command)
self._add_build_handler(Command.STOP, self._build_simple_command)
self._add_build_handler(Command.FS, self._build_simple_command)
self._add_build_handler(Command.STAT, self._build_simple_command)
# Add response handlers for device commands.
self._add_response_handler(Command.GET_CLOCK, self._parse_clock_response)
self._add_response_handler(Command.SET_CLOCK, self._parse_clock_response)
self._add_response_handler(Command.FS, self._parse_fs_response)
self._add_response_handler(Command.STAT, self._parse_common_response)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._chunker = StringChunker(Protocol.sieve_function)
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.CLOCK_SYNC)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(METBK_SampleDataParticle.regex_compiled())
matchers.append(METBK_StatusDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
log.debug("_got_chunk: chunk=%s" %chunk)
self._extract_sample(METBK_SampleDataParticle, METBK_SampleDataParticle.regex_compiled(), chunk, timestamp)
self._extract_sample(METBK_StatusDataParticle, METBK_StatusDataParticle.regex_compiled(), chunk, timestamp)
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# override methods from base class.
########################################################################
def _extract_sample(self, particle_class, regex, line, timestamp, publish=True):
"""
Overridden to add duplicate sample checking. This duplicate checking should only be performed
on sample chunks and not other chunk types, therefore the regex is performed before the string checking.
Extract sample from a response line if present and publish parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample
@param line string to match for sample.
@param timestamp port agent timestamp to include with the particle
@param publish boolean to publish samples (default True). If True,
two different events are published: one to notify raw data and
the other to notify parsed data.
@retval dict of dicts {'parsed': parsed_sample, 'raw': raw_sample} if
the line can be parsed for a sample. Otherwise, None.
@todo Figure out how the agent wants the results for a single poll
and return them that way from here
"""
sample = None
match = regex.match(line)
if match:
if particle_class == METBK_SampleDataParticle:
# check to see if there is a delta from last sample, and don't parse this sample if there isn't
if match.group(0) == self.last_sample:
return
# save this sample as last_sample for next check
self.last_sample = match.group(0)
particle = particle_class(line, port_timestamp=timestamp)
parsed_sample = particle.generate()
if publish and self._driver_event:
self._driver_event(DriverAsyncEvent.SAMPLE, parsed_sample)
sample = json.loads(parsed_sample)
return sample
return sample
########################################################################
# implement virtual methods from base class.
########################################################################
def apply_startup_params(self):
"""
Apply sample_interval startup parameter.
"""
config = self.get_startup_config()
log.debug("apply_startup_params: startup config = %s" %config)
if config.has_key(Parameter.SAMPLE_INTERVAL):
log.debug("apply_startup_params: setting sample_interval to %d" %config[Parameter.SAMPLE_INTERVAL])
self._param_dict.set_value(Parameter.SAMPLE_INTERVAL, config[Parameter.SAMPLE_INTERVAL])
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no actual AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
(protocol_state, agent_state) = self._discover()
# If we are just starting up and we land in command mode then our state should
# be idle
if(agent_state == ResourceAgentState.COMMAND):
agent_state = ResourceAgentState.IDLE
log.debug("_handler_unknown_discover: state = %s", protocol_state)
return (protocol_state, agent_state)
########################################################################
# Clock Sync handlers.
# Not much to do in this state except sync the clock then transition
# back to autosample. When in command mode we don't have to worry about
# stopping the scheduler so we just sync the clock without state
# transitions
########################################################################
def _handler_sync_clock_enter(self, *args, **kwargs):
"""
Enter sync clock state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._protocol_fsm.on_event(ProtocolEvent.CLOCK_SYNC)
def _handler_sync_clock_sync(self, *args, **kwargs):
"""
Sync the clock
"""
next_state = ProtocolState.AUTOSAMPLE
next_agent_state = ResourceAgentState.STREAMING
result = None
self._sync_clock()
self._async_agent_state_change(ResourceAgentState.STREAMING)
return(next_state,(next_agent_state, result))
########################################################################
# Command handlers.
# just implemented to make DA possible, instrument has no actual command mode
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
self._init_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_set(self, *args, **kwargs):
"""
no writable parameters so does nothing, just implemented to make framework happy
"""
next_state = None
result = None
return (next_state, result)
def _handler_command_start_direct(self, *args, **kwargs):
"""
"""
result = None
next_state = ProtocolState.DIRECT_ACCESS
next_agent_state = ResourceAgentState.DIRECT_ACCESS
return (next_state, (next_agent_state, result))
def _handler_command_start_autosample(self, *args, **kwargs):
"""
"""
result = None
next_state = ProtocolState.AUTOSAMPLE
next_agent_state = ResourceAgentState.STREAMING
self._start_logging()
return (next_state, (next_agent_state, result))
def _handler_command_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
self._sync_clock()
return(next_state,(next_agent_state, result))
########################################################################
# autosample handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state Because this is an instrument that must be
polled we need to ensure the scheduler is added when we are in an
autosample state. This scheduler raises events to poll the
instrument for data.
"""
self._init_params()
self._ensure_autosample_config()
self._add_scheduler_event(AUTO_SAMPLE_SCHEDULED_JOB, ProtocolEvent.ACQUIRE_SAMPLE)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
exit autosample state.
"""
self._remove_scheduler(AUTO_SAMPLE_SCHEDULED_JOB)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
"""
result = None
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
self._stop_logging()
return (next_state, (next_agent_state, result))
def _handler_autosample_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = ProtocolState.SYNC_CLOCK
next_agent_state = ResourceAgentState.BUSY
result = None
return(next_state,(next_agent_state, result))
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = None
self._do_cmd_direct(data)
return (next_state, result)
def _handler_direct_access_stop_direct(self):
result = None
(next_state, next_agent_state) = self._discover()
return (next_state, (next_agent_state, result))
########################################################################
# general handlers.
########################################################################
def _handler_flash_status(self, *args, **kwargs):
"""
Acquire flash status from instrument.
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
result = self._do_cmd_resp(Command.FS, expected_prompt=Prompt.FS)
log.debug("FLASH RESULT: %s", result)
return (next_state, (next_agent_state, result))
def _handler_acquire_sample(self, *args, **kwargs):
"""
Acquire sample from instrument.
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
result = self._do_cmd_resp(Command.D, *args, **kwargs)
return (next_state, (next_agent_state, result))
def _handler_acquire_status(self, *args, **kwargs):
"""
Acquire status from instrument.
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
log.debug( "Logging status: %s", self._is_logging())
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.GO])
return (next_state, (next_agent_state, result))
########################################################################
# Private helpers.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Overloaded from the base class, used in apply DA params. Not needed
here so just noop it.
"""
pass
def _discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no actual AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
logging = self._is_logging()
if(logging == True):
protocol_state = ProtocolState.AUTOSAMPLE
agent_state = ResourceAgentState.STREAMING
elif(logging == False):
protocol_state = ProtocolState.COMMAND
agent_state = ResourceAgentState.COMMAND
else:
protocol_state = ProtocolState.UNKNOWN
agent_state = ResourceAgentState.ACTIVE_UNKNOWN
return (protocol_state, agent_state)
def _start_logging(self):
"""
start the instrument logging if is isn't running already.
"""
if(not self._is_logging()):
log.debug("Sending start logging command: %s", Command.GO)
self._do_cmd_resp(Command.GO, expected_prompt=Prompt.GO)
def _stop_logging(self):
"""
stop the instrument logging if is is running. When the instrument
is in a syncing state we can not stop logging. We must wait before
we sent the stop command.
"""
if(self._is_logging()):
log.debug("Attempting to stop the instrument logging.")
result = self._do_cmd_resp(Command.STOP, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
log.debug("Stop Command Result: %s", result)
# If we are still logging then let's wait until we are not
# syncing before resending the command.
if(self._is_logging()):
self._wait_for_sync()
log.debug("Attempting to stop the instrument again.")
result = self._do_cmd_resp(Command.STOP, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
log.debug("Stop Command Result: %s", result)
def _wait_for_sync(self):
"""
When the instrument is syncing internal parameters we can't stop
logging. So we will watch the logging status and when it is not
synchronizing we will return. Basically we will just block
until we are no longer syncing.
@raise InstrumentProtocolException when we timeout waiting for a
transition.
"""
timeout = time.time() + SYNC_TIMEOUT
while(time.time() < timeout):
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
match = LOGGING_SYNC_COMPILED.match(result)
if(match):
log.debug("We are still in sync mode. Wait a bit and retry")
time.sleep(2)
else:
log.debug("Transitioned out of sync.")
return True
# We timed out
raise InstrumentProtocolException("failed to transition out of sync mode")
def _is_logging(self):
"""
Run the status command to determine if we are in command or autosample
mode.
@return: True if sampling, false if not, None if we can't determine
"""
log.debug("_is_logging: start")
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.GO])
log.debug("Checking logging status from %s", result)
match = LOGGING_STATUS_COMPILED.match(result)
if not match:
log.error("Unable to determine logging status from: %s", result)
return None
if match.group(1) == 'GO':
log.debug("Looks like we are logging: %s", match.group(1))
return True
else:
log.debug("Looks like we are NOT logging: %s", match.group(1))
return False
def _ensure_autosample_config(self):
scheduler_config = self._get_scheduler_config()
if (scheduler_config == None):
log.debug("_ensure_autosample_config: adding scheduler element to _startup_config")
self._startup_config[DriverConfigKey.SCHEDULER] = {}
scheduler_config = self._get_scheduler_config()
log.debug("_ensure_autosample_config: adding autosample config to _startup_config")
config = {DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: self._param_dict.get(Parameter.SAMPLE_INTERVAL)}}
self._startup_config[DriverConfigKey.SCHEDULER][AUTO_SAMPLE_SCHEDULED_JOB] = config
if(not self._scheduler):
self.initialize_scheduler()
def _sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
time_format = "%Y/%m/%d %H:%M:%S"
str_val = get_timestamp_delayed(time_format)
log.debug("Setting instrument clock to '%s'", str_val)
self._do_cmd_resp(Command.SET_CLOCK, str_val, expected_prompt=Prompt.CR_NL)
def _wakeup(self, timeout):
"""There is no wakeup sequence for this instrument"""
pass
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="start autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="stop autosample")
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="synchronize clock")
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="acquire status")
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="acquire sample")
self._cmd_dict.add(Capability.FLASH_STATUS, display_name="flash status")
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(Parameter.CLOCK,
r'(.*)\r\n',
lambda match : match.group(1),
lambda string : str(string),
type=ParameterDictType.STRING,
display_name="clock",
expiration=0,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.SAMPLE_INTERVAL,
r'Not used. This parameter is not parsed from instrument response',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=30,
value=30,
startup_param=True,
display_name="sample_interval",
visibility=ParameterDictVisibility.IMMUTABLE)
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary.
"""
log.debug("_update_params:")
# Issue clock command and parse results.
# This is the only parameter and it is always changing so don't bother with the 'change' event
self._do_cmd_resp(Command.GET_CLOCK)
def _build_set_clock_command(self, cmd, val):
"""
Build handler for set clock command (cmd=val followed by newline).
@param cmd the string for setting the clock (this should equal #CLOCK=).
@param val the parameter value to set.
@ retval The set command to be sent to the device.
"""
cmd = '%s%s' %(cmd, val) + NEWLINE
return cmd
def _parse_clock_response(self, response, prompt):
"""
Parse handler for clock command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if clock command misunderstood.
"""
log.debug("_parse_clock_response: response=%s, prompt=%s" %(response, prompt))
if prompt not in [Prompt.CR_NL]:
raise InstrumentProtocolException('CLOCK command not recognized: %s.' % response)
if not self._param_dict.update(response):
raise InstrumentProtocolException('CLOCK command not parsed: %s.' % response)
return
def _parse_fs_response(self, response, prompt):
"""
Parse handler for FS command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if FS command misunderstood.
"""
log.debug("_parse_fs_response: response=%s, prompt=%s" %(response, prompt))
if prompt not in [Prompt.FS]:
raise InstrumentProtocolException('FS command not recognized: %s.' % response)
return response
def _parse_common_response(self, response, prompt):
"""
Parse handler for common commands.
@param response command response string.
@param prompt prompt following command response.
"""
return response
|
|
#
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
import _sre
from sre_constants import *
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
MAXCODE = 65535
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
for op, av in pattern:
if op in (LITERAL, NOT_LITERAL):
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
emit(_sre.getlower(av, flags))
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = lambda x: x
skip = len(code); emit(0)
_compile_charset(av, flags, code, fixup)
code[skip] = len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in (REPEAT, MIN_REPEAT, MAX_REPEAT):
if flags & SRE_FLAG_TEMPLATE:
raise error, "internal: unsupported template operator"
emit(OPCODES[REPEAT])
skip = len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = len(code) - skip
elif _simple(av) and op == MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
skip = len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = len(code) - skip
if op == MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in (SUCCESS, FAILURE):
emit(OPCODES[op])
elif op in (ASSERT, ASSERT_NOT):
emit(OPCODES[op])
skip = len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error, "look-behind requires fixed-width pattern"
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
for av in av[1]:
skip = len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tail.append(len(code)); emit(0)
code[skip] = len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
else:
raise ValueError, ("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None):
# compile charset subprogram
emit = code.append
if not fixup:
fixup = lambda x: x
for op, av in _optimize_charset(charset, fixup):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(fixup(av))
elif op is RANGE:
emit(fixup(av[0]))
emit(fixup(av[1]))
elif op is CHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error, "internal: unsupported set operator"
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup):
# internal: optimize character set
out = []
charmap = [0]*256
try:
for op, av in charset:
if op is NEGATE:
out.append((op, av))
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could append to charmap tail
return charset # cannot compress
except IndexError:
# character set contains unicode characters
return charset
# compress character map
i = p = n = 0
runs = []
for c in charmap:
if c:
if n == 0:
p = i
n = n + 1
elif n:
runs.append((p, n))
n = 0
i = i + 1
if n:
runs.append((p, n))
if len(runs) <= 2:
# use literal/range
for p, n in runs:
if n == 1:
out.append((LITERAL, p))
else:
out.append((RANGE, (p, p+n-1)))
if len(out) < len(charset):
return out
else:
# use bitmap
data = []
m = 1; v = 0
for c in charmap:
if c:
v = v + m
m = m << 1
if m > MAXCODE:
data.append(v)
m = 1; v = 0
out.append((CHARSET, data))
return out
return charset
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
if lo == 0 and hi == MAXREPEAT:
raise error, "nothing to repeat"
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefix_skip = 0
charset = [] # not used
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefix.append(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefix.append(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charset.append((op, av))
elif op is BRANCH:
c = []
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
c.append((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
c.append((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in range(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, 0, code)
code[skip] = len(code) - skip
STRING_TYPES = [type("")]
try:
STRING_TYPES.append(type(unicode("")))
except NameError:
pass
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
if type(p) in STRING_TYPES:
import sre_parse
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
code = _code(p, flags)
# print code
# XXX: <fl> get rid of this limitation!
assert p.pattern.groups <= 100,\
"sorry, but this version only supports 100 named groups"
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
|
|
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Example of a PCI alias::
| [pci]
| alias = '{
| "name": "QuickAssist",
| "product_id": "0443",
| "vendor_id": "8086",
| "device_type": "type-PCI",
| "numa_policy": "legacy"
| }'
Aliases with the same name, device_type and numa_policy are ORed::
| [pci]
| alias = '{
| "name": "QuickAssist",
| "product_id": "0442",
| "vendor_id": "8086",
| "device_type": "type-PCI",
| }'
These two aliases define a device request meaning: vendor_id is "8086" and
product_id is "0442" or "0443".
"""
import jsonschema
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import utils
LOG = logging.getLogger(__name__)
PCI_NET_TAG = 'physical_network'
PCI_TRUSTED_TAG = 'trusted'
PCI_DEVICE_TYPE_TAG = 'dev_type'
DEVICE_TYPE_FOR_VNIC_TYPE = {
network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF
}
CONF = nova.conf.CONF
_ALIAS_CAP_TYPE = ['pci']
_ALIAS_SCHEMA = {
"type": "object",
"additionalProperties": False,
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 256,
},
# TODO(stephenfin): This isn't used anywhere outside of tests and
# should probably be removed.
"capability_type": {
"type": "string",
"enum": _ALIAS_CAP_TYPE,
},
"product_id": {
"type": "string",
"pattern": utils.PCI_VENDOR_PATTERN,
},
"vendor_id": {
"type": "string",
"pattern": utils.PCI_VENDOR_PATTERN,
},
"device_type": {
"type": "string",
"enum": list(obj_fields.PciDeviceType.ALL),
},
"numa_policy": {
"type": "string",
"enum": list(obj_fields.PCINUMAAffinityPolicy.ALL),
},
},
"required": ["name"],
}
def _get_alias_from_config():
"""Parse and validate PCI aliases from the nova config.
:returns: A dictionary where the keys are device names and the values are
tuples of form ``(specs, numa_policy)``. ``specs`` is a list of PCI
device specs, while ``numa_policy`` describes the required NUMA
affinity of the device(s).
:raises: exception.PciInvalidAlias if two aliases with the same name have
different device types or different NUMA policies.
"""
jaliases = CONF.pci.alias
aliases = {} # map alias name to alias spec list
try:
for jsonspecs in jaliases:
spec = jsonutils.loads(jsonspecs)
jsonschema.validate(spec, _ALIAS_SCHEMA)
name = spec.pop('name').strip()
numa_policy = spec.pop('numa_policy', None)
if not numa_policy:
numa_policy = obj_fields.PCINUMAAffinityPolicy.LEGACY
dev_type = spec.pop('device_type', None)
if dev_type:
spec['dev_type'] = dev_type
if name not in aliases:
aliases[name] = (numa_policy, [spec])
continue
if aliases[name][0] != numa_policy:
reason = _("NUMA policy mismatch for alias '%s'") % name
raise exception.PciInvalidAlias(reason=reason)
if aliases[name][1][0]['dev_type'] != spec['dev_type']:
reason = _("Device type mismatch for alias '%s'") % name
raise exception.PciInvalidAlias(reason=reason)
aliases[name][1].append(spec)
except exception.PciInvalidAlias:
raise
except jsonschema.exceptions.ValidationError as exc:
raise exception.PciInvalidAlias(reason=exc.message)
except Exception as exc:
raise exception.PciInvalidAlias(reason=six.text_type(exc))
return aliases
def _translate_alias_to_requests(alias_spec, affinity_policy=None):
"""Generate complete pci requests from pci aliases in extra_spec."""
pci_aliases = _get_alias_from_config()
pci_requests = []
for name, count in [spec.split(':') for spec in alias_spec.split(',')]:
name = name.strip()
if name not in pci_aliases:
raise exception.PciRequestAliasNotDefined(alias=name)
count = int(count)
numa_policy, spec = pci_aliases[name]
policy = affinity_policy or numa_policy
# NOTE(gibi): InstancePCIRequest has a requester_id field that could
# be filled with the flavor.flavorid but currently there is no special
# handling for InstancePCIRequests created from the flavor. So it is
# left empty.
pci_requests.append(objects.InstancePCIRequest(
count=count,
spec=spec,
alias_name=name,
numa_policy=policy))
return pci_requests
def get_instance_pci_request_from_vif(context, instance, vif):
"""Given an Instance, return the PCI request associated
to the PCI device related to the given VIF (if any) on the
compute node the instance is currently running.
In this method we assume a VIF is associated with a PCI device
if 'pci_slot' attribute exists in the vif 'profile' dict.
:param context: security context
:param instance: instance object
:param vif: network VIF model object
:raises: raises PciRequestFromVIFNotFound if a pci device is requested
but not found on current host
:return: instance's PCIRequest object associated with the given VIF
or None if no PCI device is requested
"""
# Get PCI device address for VIF if exists
vif_pci_dev_addr = vif['profile'].get('pci_slot') \
if vif['profile'] else None
if not vif_pci_dev_addr:
return None
try:
cn_id = objects.ComputeNode.get_by_host_and_nodename(
context,
instance.host,
instance.node).id
except exception.NotFound:
LOG.warning("expected to find compute node with host %s "
"and node %s when getting instance PCI request "
"from VIF", instance.host, instance.node)
return None
# Find PCIDevice associated with vif_pci_dev_addr on the compute node
# the instance is running on.
found_pci_dev = None
for pci_dev in instance.pci_devices:
if (pci_dev.compute_node_id == cn_id and
pci_dev.address == vif_pci_dev_addr):
found_pci_dev = pci_dev
break
if not found_pci_dev:
return None
# Find PCIRequest associated with the given PCIDevice in instance
for pci_req in instance.pci_requests.requests:
if pci_req.request_id == found_pci_dev.request_id:
return pci_req
raise exception.PciRequestFromVIFNotFound(
pci_slot=vif_pci_dev_addr,
node_id=cn_id)
def get_pci_requests_from_flavor(flavor, affinity_policy=None):
"""Validate and return PCI requests.
The ``pci_passthrough:alias`` extra spec describes the flavor's PCI
requests. The extra spec's value is a comma-separated list of format
``alias_name_x:count, alias_name_y:count, ... ``, where ``alias_name`` is
defined in ``pci.alias`` configurations.
The flavor's requirement is translated into a PCI requests list. Each
entry in the list is an instance of nova.objects.InstancePCIRequests with
four keys/attributes.
- 'spec' states the PCI device properties requirement
- 'count' states the number of devices
- 'alias_name' (optional) is the corresponding alias definition name
- 'numa_policy' (optional) states the required NUMA affinity of the devices
For example, assume alias configuration is::
{
'vendor_id':'8086',
'device_id':'1502',
'name':'alias_1'
}
While flavor extra specs includes::
'pci_passthrough:alias': 'alias_1:2'
The returned ``pci_requests`` are::
[{
'count':2,
'specs': [{'vendor_id':'8086', 'device_id':'1502'}],
'alias_name': 'alias_1'
}]
:param flavor: The flavor to be checked
:param affinity_policy: pci numa affinity policy
:returns: A list of PCI requests
:rtype: nova.objects.InstancePCIRequests
:raises: exception.PciRequestAliasNotDefined if an invalid PCI alias is
provided
:raises: exception.PciInvalidAlias if the configuration contains invalid
aliases.
"""
pci_requests = []
if ('extra_specs' in flavor and
'pci_passthrough:alias' in flavor['extra_specs']):
pci_requests = _translate_alias_to_requests(
flavor['extra_specs']['pci_passthrough:alias'],
affinity_policy=affinity_policy)
return objects.InstancePCIRequests(requests=pci_requests)
|
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: audio_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('audio_spectrogram')
def audio_spectrogram(input, window_size, stride, magnitude_squared=False, name=None):
r"""Produces a visualization of audio data over time.
Spectrograms are a standard way of representing audio information as a series of
slices of frequency information, one slice for each window of time. By joining
these together into a sequence, they form a distinctive fingerprint of the sound
over time.
This op expects to receive audio data as an input, stored as floats in the range
-1 to 1, together with a window width in samples, and a stride specifying how
far to move the window between slices. From this it generates a three
dimensional output. The lowest dimension has an amplitude value for each
frequency during that time slice. The next dimension is time, with successive
frequency slices. The final dimension is for the channels in the input, so a
stereo audio input would have two here for example.
This means the layout when converted and saved as an image is rotated 90 degrees
clockwise from a typical spectrogram. Time is descending down the Y axis, and
the frequency decreases from left to right.
Each value in the result represents the square root of the sum of the real and
imaginary parts of an FFT on the current window of samples. In this way, the
lowest dimension represents the power of each frequency in the current window,
and adjacent windows are concatenated in the next dimension.
To get a more intuitive and visual look at what this operation does, you can run
tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
resulting spectrogram as a PNG image.
Args:
input: A `Tensor` of type `float32`. Float representation of audio data.
window_size: An `int`.
How wide the input window is in samples. For the highest efficiency
this should be a power of two, but other values are accepted.
stride: An `int`.
How widely apart the center of adjacent sample windows should be.
magnitude_squared: An optional `bool`. Defaults to `False`.
Whether to return the squared magnitude or just the
magnitude. Using squared magnitude can avoid extra calculations.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
window_size = _execute.make_int(window_size, "window_size")
stride = _execute.make_int(stride, "stride")
if magnitude_squared is None:
magnitude_squared = False
magnitude_squared = _execute.make_bool(magnitude_squared, "magnitude_squared")
_, _, _op = _op_def_lib._apply_op_helper(
"AudioSpectrogram", input=input, window_size=window_size,
stride=stride, magnitude_squared=magnitude_squared, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("window_size", _op.get_attr("window_size"), "stride",
_op.get_attr("stride"), "magnitude_squared",
_op.get_attr("magnitude_squared"))
_execute.record_gradient(
"AudioSpectrogram", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"AudioSpectrogram", name, _ctx._post_execution_callbacks, input,
"window_size", window_size, "stride", stride, "magnitude_squared",
magnitude_squared)
return _result
except _core._FallbackException:
return audio_spectrogram_eager_fallback(
input, window_size=window_size, stride=stride,
magnitude_squared=magnitude_squared, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def audio_spectrogram_eager_fallback(input, window_size, stride, magnitude_squared=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function audio_spectrogram
"""
_ctx = ctx if ctx else _context.context()
window_size = _execute.make_int(window_size, "window_size")
stride = _execute.make_int(stride, "stride")
if magnitude_squared is None:
magnitude_squared = False
magnitude_squared = _execute.make_bool(magnitude_squared, "magnitude_squared")
input = _ops.convert_to_tensor(input, _dtypes.float32)
_inputs_flat = [input]
_attrs = ("window_size", window_size, "stride", stride, "magnitude_squared",
magnitude_squared)
_result = _execute.execute(b"AudioSpectrogram", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"AudioSpectrogram", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_decode_wav_outputs = ["audio", "sample_rate"]
_DecodeWavOutput = _collections.namedtuple(
"DecodeWav", _decode_wav_outputs)
@tf_export('decode_wav')
def decode_wav(contents, desired_channels=-1, desired_samples=-1, name=None):
r"""Decode a 16-bit PCM WAV file to a float tensor.
The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
When desired_channels is set, if the input contains fewer channels than this
then the last channel will be duplicated to give the requested number, else if
the input has more channels than requested then the additional channels will be
ignored.
If desired_samples is set, then the audio will be cropped or padded with zeroes
to the requested length.
The first output contains a Tensor with the content of the audio samples. The
lowest dimension will be the number of channels, and the second will be the
number of samples. For example, a ten-sample-long stereo WAV file should give an
output shape of [10, 2].
Args:
contents: A `Tensor` of type `string`.
The WAV-encoded audio, usually from a file.
desired_channels: An optional `int`. Defaults to `-1`.
Number of sample channels wanted.
desired_samples: An optional `int`. Defaults to `-1`.
Length of audio requested.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (audio, sample_rate).
audio: A `Tensor` of type `float32`.
sample_rate: A `Tensor` of type `int32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if desired_channels is None:
desired_channels = -1
desired_channels = _execute.make_int(desired_channels, "desired_channels")
if desired_samples is None:
desired_samples = -1
desired_samples = _execute.make_int(desired_samples, "desired_samples")
_, _, _op = _op_def_lib._apply_op_helper(
"DecodeWav", contents=contents, desired_channels=desired_channels,
desired_samples=desired_samples, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("desired_channels", _op.get_attr("desired_channels"),
"desired_samples", _op.get_attr("desired_samples"))
_execute.record_gradient(
"DecodeWav", _inputs_flat, _attrs, _result, name)
_result = _DecodeWavOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "DecodeWav",
name, _ctx._post_execution_callbacks, contents, "desired_channels",
desired_channels, "desired_samples", desired_samples)
_result = _DecodeWavOutput._make(_result)
return _result
except _core._FallbackException:
return decode_wav_eager_fallback(
contents, desired_channels=desired_channels,
desired_samples=desired_samples, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def decode_wav_eager_fallback(contents, desired_channels=-1, desired_samples=-1, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function decode_wav
"""
_ctx = ctx if ctx else _context.context()
if desired_channels is None:
desired_channels = -1
desired_channels = _execute.make_int(desired_channels, "desired_channels")
if desired_samples is None:
desired_samples = -1
desired_samples = _execute.make_int(desired_samples, "desired_samples")
contents = _ops.convert_to_tensor(contents, _dtypes.string)
_inputs_flat = [contents]
_attrs = ("desired_channels", desired_channels, "desired_samples",
desired_samples)
_result = _execute.execute(b"DecodeWav", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DecodeWav", _inputs_flat, _attrs, _result, name)
_result = _DecodeWavOutput._make(_result)
return _result
@tf_export('encode_wav')
def encode_wav(audio, sample_rate, name=None):
r"""Encode audio data using the WAV file format.
This operation will generate a string suitable to be saved out to create a .wav
audio file. It will be encoded in the 16-bit PCM format. It takes in float
values in the range -1.0f to 1.0f, and any outside that value will be clamped to
that range.
`audio` is a 2-D float Tensor of shape `[length, channels]`.
`sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
Args:
audio: A `Tensor` of type `float32`. 2-D with shape `[length, channels]`.
sample_rate: A `Tensor` of type `int32`.
Scalar containing the sample frequency.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"EncodeWav", audio=audio, sample_rate=sample_rate, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"EncodeWav", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "EncodeWav",
name, _ctx._post_execution_callbacks, audio, sample_rate)
return _result
except _core._FallbackException:
return encode_wav_eager_fallback(
audio, sample_rate, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def encode_wav_eager_fallback(audio, sample_rate, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function encode_wav
"""
_ctx = ctx if ctx else _context.context()
audio = _ops.convert_to_tensor(audio, _dtypes.float32)
sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32)
_inputs_flat = [audio, sample_rate]
_attrs = None
_result = _execute.execute(b"EncodeWav", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"EncodeWav", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('mfcc')
def mfcc(spectrogram, sample_rate, upper_frequency_limit=4000, lower_frequency_limit=20, filterbank_channel_count=40, dct_coefficient_count=13, name=None):
r"""Transforms a spectrogram into a form that's useful for speech recognition.
Mel Frequency Cepstral Coefficients are a way of representing audio data that's
been effective as an input feature for machine learning. They are created by
taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
higher frequencies that are less significant to the human ear. They have a long
history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
is a good resource to learn more.
Args:
spectrogram: A `Tensor` of type `float32`.
Typically produced by the Spectrogram op, with magnitude_squared
set to true.
sample_rate: A `Tensor` of type `int32`.
How many samples per second the source audio used.
upper_frequency_limit: An optional `float`. Defaults to `4000`.
The highest frequency to use when calculating the
ceptstrum.
lower_frequency_limit: An optional `float`. Defaults to `20`.
The lowest frequency to use when calculating the
ceptstrum.
filterbank_channel_count: An optional `int`. Defaults to `40`.
Resolution of the Mel bank used internally.
dct_coefficient_count: An optional `int`. Defaults to `13`.
How many output channels to produce per time slice.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if upper_frequency_limit is None:
upper_frequency_limit = 4000
upper_frequency_limit = _execute.make_float(upper_frequency_limit, "upper_frequency_limit")
if lower_frequency_limit is None:
lower_frequency_limit = 20
lower_frequency_limit = _execute.make_float(lower_frequency_limit, "lower_frequency_limit")
if filterbank_channel_count is None:
filterbank_channel_count = 40
filterbank_channel_count = _execute.make_int(filterbank_channel_count, "filterbank_channel_count")
if dct_coefficient_count is None:
dct_coefficient_count = 13
dct_coefficient_count = _execute.make_int(dct_coefficient_count, "dct_coefficient_count")
_, _, _op = _op_def_lib._apply_op_helper(
"Mfcc", spectrogram=spectrogram, sample_rate=sample_rate,
upper_frequency_limit=upper_frequency_limit,
lower_frequency_limit=lower_frequency_limit,
filterbank_channel_count=filterbank_channel_count,
dct_coefficient_count=dct_coefficient_count, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("upper_frequency_limit", _op.get_attr("upper_frequency_limit"),
"lower_frequency_limit", _op.get_attr("lower_frequency_limit"),
"filterbank_channel_count",
_op.get_attr("filterbank_channel_count"),
"dct_coefficient_count", _op.get_attr("dct_coefficient_count"))
_execute.record_gradient(
"Mfcc", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Mfcc", name,
_ctx._post_execution_callbacks, spectrogram, sample_rate,
"upper_frequency_limit", upper_frequency_limit,
"lower_frequency_limit", lower_frequency_limit,
"filterbank_channel_count", filterbank_channel_count,
"dct_coefficient_count", dct_coefficient_count)
return _result
except _core._FallbackException:
return mfcc_eager_fallback(
spectrogram, sample_rate,
upper_frequency_limit=upper_frequency_limit,
lower_frequency_limit=lower_frequency_limit,
filterbank_channel_count=filterbank_channel_count,
dct_coefficient_count=dct_coefficient_count, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mfcc_eager_fallback(spectrogram, sample_rate, upper_frequency_limit=4000, lower_frequency_limit=20, filterbank_channel_count=40, dct_coefficient_count=13, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mfcc
"""
_ctx = ctx if ctx else _context.context()
if upper_frequency_limit is None:
upper_frequency_limit = 4000
upper_frequency_limit = _execute.make_float(upper_frequency_limit, "upper_frequency_limit")
if lower_frequency_limit is None:
lower_frequency_limit = 20
lower_frequency_limit = _execute.make_float(lower_frequency_limit, "lower_frequency_limit")
if filterbank_channel_count is None:
filterbank_channel_count = 40
filterbank_channel_count = _execute.make_int(filterbank_channel_count, "filterbank_channel_count")
if dct_coefficient_count is None:
dct_coefficient_count = 13
dct_coefficient_count = _execute.make_int(dct_coefficient_count, "dct_coefficient_count")
spectrogram = _ops.convert_to_tensor(spectrogram, _dtypes.float32)
sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32)
_inputs_flat = [spectrogram, sample_rate]
_attrs = ("upper_frequency_limit", upper_frequency_limit,
"lower_frequency_limit", lower_frequency_limit, "filterbank_channel_count",
filterbank_channel_count, "dct_coefficient_count", dct_coefficient_count)
_result = _execute.execute(b"Mfcc", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Mfcc", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "AudioSpectrogram"
# input_arg {
# name: "input"
# type: DT_FLOAT
# }
# output_arg {
# name: "spectrogram"
# type: DT_FLOAT
# }
# attr {
# name: "window_size"
# type: "int"
# }
# attr {
# name: "stride"
# type: "int"
# }
# attr {
# name: "magnitude_squared"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "DecodeWav"
# input_arg {
# name: "contents"
# type: DT_STRING
# }
# output_arg {
# name: "audio"
# type: DT_FLOAT
# }
# output_arg {
# name: "sample_rate"
# type: DT_INT32
# }
# attr {
# name: "desired_channels"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "desired_samples"
# type: "int"
# default_value {
# i: -1
# }
# }
# }
# op {
# name: "EncodeWav"
# input_arg {
# name: "audio"
# type: DT_FLOAT
# }
# input_arg {
# name: "sample_rate"
# type: DT_INT32
# }
# output_arg {
# name: "contents"
# type: DT_STRING
# }
# }
# op {
# name: "Mfcc"
# input_arg {
# name: "spectrogram"
# type: DT_FLOAT
# }
# input_arg {
# name: "sample_rate"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type: DT_FLOAT
# }
# attr {
# name: "upper_frequency_limit"
# type: "float"
# default_value {
# f: 4000
# }
# }
# attr {
# name: "lower_frequency_limit"
# type: "float"
# default_value {
# f: 20
# }
# }
# attr {
# name: "filterbank_channel_count"
# type: "int"
# default_value {
# i: 40
# }
# }
# attr {
# name: "dct_coefficient_count"
# type: "int"
# default_value {
# i: 13
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\np\n\020AudioSpectrogram\022\t\n\005input\030\001\032\017\n\013spectrogram\030\001\"\022\n\013window_size\022\003int\"\r\n\006stride\022\003int\"\035\n\021magnitude_squared\022\004bool\032\002(\000\n\200\001\n\tDecodeWav\022\014\n\010contents\030\007\032\t\n\005audio\030\001\032\017\n\013sample_rate\030\003\"$\n\020desired_channels\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"#\n\017desired_samples\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n5\n\tEncodeWav\022\t\n\005audio\030\001\022\017\n\013sample_rate\030\003\032\014\n\010contents\030\007\n\311\001\n\004Mfcc\022\017\n\013spectrogram\030\001\022\017\n\013sample_rate\030\003\032\n\n\006output\030\001\"%\n\025upper_frequency_limit\022\005float\032\005%\000\000zE\"%\n\025lower_frequency_limit\022\005float\032\005%\000\000\240A\"#\n\030filterbank_channel_count\022\003int\032\002\030(\" \n\025dct_coefficient_count\022\003int\032\002\030\r")
|
|
#! /usr/bin/env python
# title :reversiboard.py
# description :Creates and controls a Reversi game-board
# author :andresthor
# date :05-02-2017
# python_version :3.5.2
# =============================================================================
from constants import BLACK, WHITE, EMPTY
def add(t1, t2):
'''A simple function that adds together two tuples'''
return (t1[0] + t2[0], t1[1] + t2[1])
class ReversiBoard(object):
'''Creates a board of reversi and keeps track of pieces and legal moves'''
def __init__(self, size):
self.board = self.create_board(size, size)
self.size = size
self.score = {BLACK: 0, WHITE: 0}
self.turn = None
self.last = None
# Move directions in tuple form
self.dirs = [(-1, 0), ( 0, -1), ( 1, 0), ( 0, 1),
( 1, 1), (-1, -1), (-1, 1), ( 1, -1)]
self.init_pieces()
def init_pieces(self):
'''Initializes the board with the classic setup of 2x2 pieces'''
self.set_tiles([(4, 4), (5, 5)], WHITE)
self.set_tiles([(4, 5), (5, 4)], BLACK)
self.score = {BLACK: 2, WHITE: 2}
self.turn = BLACK
self.last = (4, 4)
def create_board(self, cols, rows):
'''Creates a 2d array with EMPTY slots'''
return [[EMPTY for x in range(rows)] for x in range(cols)]
def set_tiles(self, tiles, color):
'''Takes a list of tiles (tuples) and sets them to "color"'''
for t in tiles:
self.set_tile(t, color)
def set_tile(self, tile, color):
'''Sets a single tile (tuple) to "color"'''
if self.is_on_board(tile):
self.board[tile[0] - 1][tile[1] - 1] = color
def switch_turns(self):
'''Switches the active player'''
self.turn = BLACK if self.turn is WHITE else WHITE
def is_on_board(self, tile):
'''Returns True if the tile is a valid (column, row) tuple'''
col, row = tile[0] - 1, tile[1] - 1
valid_col = col >= 0 and col < self.size
valid_row = row >= 0 and row < self.size
return valid_col and valid_row
def valid_move(self, tile, color=None):
'''Returns True if the move is legal'''
if color is None:
color = self.turn
if not self.is_on_board(tile):
return False
if not self.can_flip(tile, color):
return False
return True
def can_flip(self, tile, color):
'''Returns True if setting tile to color will cause some piece(s) to be
flipped.
'''
if self.is_occupied(tile):
return False
for d in self.dirs:
if self.flips_in_dir(tile, d, color) != []:
return True
return False
def flips_in_dir(self, tile, direction, color):
'''Iterates from tile in direction, and determines if setting the tile
to color will cause a piece to be flipped in that directiona.
Returns a list of the flippable tiles.
'''
flips = []
step = add(tile, direction)
found_ally, found_foe = False, False
while self.is_on_board(step) and self.get_tile(step) is not EMPTY:
value = self.get_tile(step)
if value is color:
found_ally = True
elif value is self.opposite(color) and not found_ally:
flips.append(step)
found_foe = True
step = add(step, direction)
if found_ally and found_foe:
return flips
else:
return []
def flips(self, tile, color):
'''Checks all directions for flippable tiles, assuming tile is set to
color. Returns a list of the flippable tiles.
'''
flips = []
for d in self.dirs:
flips += (self.flips_in_dir(tile, d, color))
return flips
def valid_moves(self, color=None):
'''Returns a list of legal moves for color'''
if color is None:
color = self.turn
moves = []
for i in range(self.size):
for j in range(self.size):
if self.valid_move((i + 1, j + 1), color):
moves.append((i + 1, j + 1))
return moves
def opposite(self, color):
'''Returns the opposite color. Assumes the only colors sent in are BLACK
or WHITE.
'''
opposite = BLACK if color is WHITE else WHITE
return opposite
def is_occupied(self, tile):
'''Returns True if the tile is already occupied by a piece'''
return self.get_tile(tile) is not EMPTY
def get_tile(self, tile):
'''Returns the value at the specified tile'''
return self.board[tile[0] - 1][tile[1] - 1]
def do_move(self, tile):
'''Makes a move at the selected tile, with the active player, if the
move is valid. Then calculates score and switches turns.
'''
if self.valid_move(tile):
self.set_tile(tile, self.turn)
self.do_flips(tile)
self.switch_turns()
self.last = tile
self.calc_score()
return True
return False
def do_flips(self, tile, color=None):
'''Carries out the flips required for the specified move.'''
if color is None:
color = self.turn
self.set_tiles(self.flips(tile, color), self.turn)
def board_full(self):
'''Returns True if the board is full.'''
return sum(x.count(EMPTY) for x in self.board) == 0
def calc_score(self):
'''Calculates the score and stores in self.score'''
self.score[BLACK] = sum(x.count(BLACK) for x in self.board)
self.score[WHITE] = sum(x.count(WHITE) for x in self.board)
def ascii(self):
'''Prints out an ASCII version of the current board.'''
cols, rows = len(self.board), len(self.board[0])
margin = ' ' if rows < 10 else ' '
# Print top
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
top = margin + ' '
for i in range(cols):
space = ' ' if i < 9 else ''
top += ' ' + letters[i] + '.' + space
print(top)
print(margin + ' ' + cols * '+---' + '+')
# Print rows
for j in range(rows):
space = ' ' if j < 9 else ''
out = str(j+1) + '.' + space
for i in range(cols):
out += '| ' + str(self.board[i][j]) + ' '
print(out + '|')
print(margin + space + cols * '+---' + '+')
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for compute API."""
import contextlib
import copy
import datetime
import iso8601
import mock
import mox
from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import quotas as quotas_obj
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
from nova import test
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.objects import test_flavor
from nova.tests.objects import test_migration
from nova.tests.objects import test_service
from nova.volume import cinder
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
SHELVED_IMAGE = 'fake-shelved-image'
SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound'
SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized'
SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception'
class _ComputeAPIUnitTestMixIn(object):
def setUp(self):
super(_ComputeAPIUnitTestMixIn, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
def _get_vm_states(self, exclude_states=None):
vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
if not exclude_states:
exclude_states = set()
return vm_state - exclude_states
def _create_flavor(self, params=None):
flavor = {'id': 1,
'flavorid': 1,
'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'vcpu_weight': None,
'root_gb': 1,
'ephemeral_gb': 0,
'rxtx_factor': 1,
'swap': 0,
'deleted': 0,
'disabled': False,
'is_public': True,
}
if params:
flavor.update(params)
return flavor
def _create_instance_obj(self, params=None, flavor=None):
"""Create a test instance."""
if not params:
params = {}
if flavor is None:
flavor = self._create_flavor()
def make_fake_sys_meta():
sys_meta = params.pop("system_metadata", {})
for key in flavors.system_metadata_flavor_props:
sys_meta['instance_type_%s' % key] = flavor[key]
return sys_meta
now = timeutils.utcnow()
instance = objects.Instance()
instance.metadata = {}
instance.metadata.update(params.pop('metadata', {}))
instance.system_metadata = make_fake_sys_meta()
instance.system_metadata.update(params.pop('system_metadata', {}))
instance._context = self.context
instance.id = 1
instance.uuid = uuidutils.generate_uuid()
instance.cell_name = 'api!child'
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.image_ref = FAKE_IMAGE_REF
instance.reservation_id = 'r-fakeres'
instance.user_id = self.user_id
instance.project_id = self.project_id
instance.host = 'fake_host'
instance.node = NODENAME
instance.instance_type_id = flavor['id']
instance.ami_launch_index = 0
instance.memory_mb = 0
instance.vcpus = 0
instance.root_gb = 0
instance.ephemeral_gb = 0
instance.architecture = 'x86_64'
instance.os_type = 'Linux'
instance.locked = False
instance.created_at = now
instance.updated_at = now
instance.launched_at = now
instance.disable_terminate = False
instance.info_cache = objects.InstanceInfoCache()
if params:
instance.update(params)
instance.obj_reset_changes()
return instance
def test_create_quota_exceeded_messages(self):
image_href = "image_href"
image_id = 0
instance_type = self._create_flavor()
self.mox.StubOutWithMock(self.compute_api, "_get_image")
self.mox.StubOutWithMock(quota.QUOTAS, "limit_check")
self.mox.StubOutWithMock(quota.QUOTAS, "reserve")
quotas = {'instances': 1, 'cores': 1, 'ram': 1}
usages = dict((r, {'in_use': 1, 'reserved': 1}) for r in
['instances', 'cores', 'ram'])
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
quota_exception = exception.OverQuota(quotas=quotas,
usages=usages, overs=['instances'], headroom=headroom)
for _unused in range(2):
self.compute_api._get_image(self.context, image_href).AndReturn(
(image_id, {}))
quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int))
quota.QUOTAS.reserve(self.context, instances=40,
cores=mox.IsA(int),
expire=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg(),
ram=mox.IsA(int)).AndRaise(quota_exception)
self.mox.ReplayAll()
for min_count, message in [(20, '20-40'), (40, '40')]:
try:
self.compute_api.create(self.context, instance_type,
"image_href", min_count=min_count,
max_count=40)
except exception.TooManyInstances as e:
self.assertEqual(message, e.kwargs['req'])
else:
self.fail("Exception not raised")
def test_specified_port_and_multiple_instances_neutronv2(self):
# Tests that if port is specified there is only one instance booting
# (i.e max_count == 1) as we can't share the same port across multiple
# instances.
self.flags(network_api_class='nova.network.neutronv2.api.API')
port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
min_count = 1
max_count = 2
requested_networks = [(None, address, port)]
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create, self.context, 'fake_flavor', 'image_id',
min_count=min_count, max_count=max_count,
requested_networks=requested_networks)
def _test_specified_ip_and_multiple_instances_helper(self,
requested_networks):
# Tests that if ip is specified there is only one instance booting
# (i.e max_count == 1)
min_count = 1
max_count = 2
self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest,
self.compute_api.create, self.context, "fake_flavor", 'image_id',
min_count=min_count, max_count=max_count,
requested_networks=requested_networks)
def test_specified_ip_and_multiple_instances(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
requested_networks = [(network, address)]
self._test_specified_ip_and_multiple_instances_helper(
requested_networks)
def test_specified_ip_and_multiple_instances_neutronv2(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
requested_networks = [(network, address, None)]
self._test_specified_ip_and_multiple_instances_helper(
requested_networks)
def test_suspend(self):
# Ensure instance can be suspended.
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'suspend_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.SUSPEND)
rpcapi.suspend_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.suspend(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertEqual(task_states.SUSPENDING,
instance.task_state)
def _test_suspend_fails(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.suspend,
self.context, instance)
def test_suspend_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
for state in invalid_vm_states:
self._test_suspend_fails(state)
def test_resume(self):
# Ensure instance can be resumed (if suspended).
instance = self._create_instance_obj(
params=dict(vm_state=vm_states.SUSPENDED))
self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'resume_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.RESUME)
rpcapi.resume_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.resume(self.context, instance)
self.assertEqual(vm_states.SUSPENDED, instance.vm_state)
self.assertEqual(task_states.RESUMING,
instance.task_state)
def test_start(self):
params = dict(vm_state=vm_states.STOPPED)
instance = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.START)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'start_instance')
rpcapi.start_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.start(self.context, instance)
self.assertEqual(task_states.POWERING_ON,
instance.task_state)
def test_start_invalid_state(self):
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.start,
self.context, instance)
def test_start_no_host(self):
params = dict(vm_state=vm_states.STOPPED, host='')
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.start,
self.context, instance)
def _test_stop(self, vm_state, force=False):
# Make sure 'progress' gets reset
params = dict(task_state=None, progress=99, vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.STOP)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'stop_instance')
rpcapi.stop_instance(self.context, instance, do_cast=True)
self.mox.ReplayAll()
if force:
self.compute_api.force_stop(self.context, instance)
else:
self.compute_api.stop(self.context, instance)
self.assertEqual(task_states.POWERING_OFF,
instance.task_state)
self.assertEqual(0, instance.progress)
def test_stop(self):
self._test_stop(vm_states.ACTIVE)
def test_stop_stopped_instance_with_bypass(self):
self._test_stop(vm_states.STOPPED, force=True)
def _test_stop_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
vm_states.ERROR]))
for state in invalid_vm_states:
self._test_stop_invalid_state(state)
def test_stop_a_stopped_inst(self):
params = {'vm_state': vm_states.STOPPED}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_no_host(self):
params = {'host': ''}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.stop,
self.context, instance)
def _test_reboot_type(self, vm_state, reboot_type, task_state=None):
# Ensure instance can be soft rebooted.
inst = self._create_instance_obj()
inst.vm_state = vm_state
inst.task_state = task_state
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(inst, 'save')
inst.save(expected_task_state=[None, task_states.REBOOTING])
self.compute_api._record_action_start(self.context, inst,
instance_actions.REBOOT)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'reboot_instance')
rpcapi.reboot_instance(self.context, instance=inst,
block_device_info=None,
reboot_type=reboot_type)
self.mox.ReplayAll()
self.compute_api.reboot(self.context, inst, reboot_type)
def _test_reboot_type_fails(self, reboot_type, **updates):
inst = self._create_instance_obj()
inst.update(updates)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
self.context, inst, reboot_type)
def test_reboot_hard_active(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD')
def test_reboot_hard_error(self):
self._test_reboot_type(vm_states.ERROR, 'HARD')
def test_reboot_hard_rebooting(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD',
task_state=task_states.REBOOTING)
def test_reboot_hard_rescued(self):
self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED)
def test_reboot_hard_error_not_launched(self):
self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR,
launched_at=None)
def test_reboot_soft(self):
self._test_reboot_type(vm_states.ACTIVE, 'SOFT')
def test_reboot_soft_error(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR)
def test_reboot_soft_paused(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED)
def test_reboot_soft_stopped(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED)
def test_reboot_soft_suspended(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED)
def test_reboot_soft_rebooting(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING)
def test_reboot_soft_rebooting_hard(self):
self._test_reboot_type_fails('SOFT',
task_state=task_states.REBOOTING_HARD)
def test_reboot_soft_rescued(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED)
def test_reboot_soft_error_not_launched(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR,
launched_at=None)
def _test_delete_resizing_part(self, inst, deltas):
fake_db_migration = test_migration.fake_db_migration()
migration = objects.Migration._from_db_object(
self.context, objects.Migration(),
fake_db_migration)
inst.instance_type_id = migration.new_instance_type_id
old_flavor = {'vcpus': 1,
'memory_mb': 512}
deltas['cores'] = -old_flavor['vcpus']
deltas['ram'] = -old_flavor['memory_mb']
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(flavors, 'get_flavor')
self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status(
self.context, inst.uuid, 'post-migrating').AndReturn(migration)
flavors.get_flavor(migration.old_instance_type_id).AndReturn(
old_flavor)
def _test_delete_resized_part(self, inst):
migration = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status(
self.context, inst.uuid, 'finished').AndReturn(migration)
self.compute_api._downsize_quota_delta(self.context, inst
).AndReturn('deltas')
fake_quotas = objects.Quotas.from_reservations(self.context,
['rsvs'])
self.compute_api._reserve_quota_delta(self.context, 'deltas', inst,
).AndReturn(fake_quotas)
self.compute_api._record_action_start(
self.context, inst, instance_actions.CONFIRM_RESIZE)
self.compute_api.compute_rpcapi.confirm_resize(
self.context, inst, migration,
migration['source_compute'], fake_quotas.reservations, cast=False)
def _test_delete_shelved_part(self, inst):
image_api = self.compute_api.image_api
self.mox.StubOutWithMock(image_api, 'delete')
snapshot_id = inst.system_metadata.get('shelved_image_id')
if snapshot_id == SHELVED_IMAGE:
image_api.delete(self.context, snapshot_id).AndReturn(True)
elif snapshot_id == SHELVED_IMAGE_NOT_FOUND:
image_api.delete(self.context, snapshot_id).AndRaise(
exception.ImageNotFound(image_id=snapshot_id))
elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED:
image_api.delete(self.context, snapshot_id).AndRaise(
exception.ImageNotAuthorized(image_id=snapshot_id))
elif snapshot_id == SHELVED_IMAGE_EXCEPTION:
image_api.delete(self.context, snapshot_id).AndRaise(
test.TestingException("Unexpected error"))
def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
inst.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context, inst,
'%s.start' % delete_type)
self.context.elevated().AndReturn(self.context)
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
state = ('soft' in delete_type and vm_states.SOFT_DELETED or
vm_states.DELETED)
updates.update({'vm_state': state,
'task_state': None,
'terminated_at': delete_time})
inst.save()
updates.update({'deleted_at': delete_time,
'deleted': True})
fake_inst = fake_instance.fake_db_instance(**updates)
db.instance_destroy(self.context, inst.uuid,
constraint=None).AndReturn(fake_inst)
compute_utils.notify_about_instance_usage(
self.compute_api.notifier,
self.context, inst, '%s.end' % delete_type,
system_metadata=inst.system_metadata)
def _test_delete(self, delete_type, **attrs):
reservations = ['fake-resv']
inst = self._create_instance_obj()
inst.update(attrs)
inst._context = self.context
deltas = {'instances': -1,
'cores': -inst.vcpus,
'ram': -inst.memory_mb}
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.iso8601.Utc())
timeutils.set_time_override(delete_time)
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
'service_is_up')
self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(inst.info_cache, 'delete')
self.mox.StubOutWithMock(self.compute_api.network_api,
'deallocate_for_instance')
self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
self.mox.StubOutWithMock(db, 'instance_destroy')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'confirm_resize')
if (inst.vm_state in
(vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)):
self._test_delete_shelved_part(inst)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')
objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, inst.uuid).AndReturn([])
inst.save()
if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas)
quota.QUOTAS.reserve(self.context, project_id=inst.project_id,
user_id=inst.user_id,
expire=mox.IgnoreArg(),
**deltas).AndReturn(reservations)
# NOTE(comstud): This is getting messy. But what we are wanting
# to test is:
# If cells is enabled and we're the API cell:
# * Cast to cells_rpcapi.<method> with reservations=None
# * Commit reservations
# Otherwise:
# * Check for downed host
# * If downed host:
# * Clean up instance, destroying it, sending notifications.
# (Tested in _test_downed_host_part())
# * Commit reservations
# * If not downed host:
# * Record the action start.
# * Cast to compute_rpcapi.<method> with the reservations
cast = True
commit_quotas = True
if self.cell_type != 'api':
if inst.vm_state == vm_states.RESIZED:
self._test_delete_resized_part(inst)
self.context.elevated().AndReturn(self.context)
db.service_get_by_compute_host(
self.context, inst.host).AndReturn(
test_service.fake_service)
self.compute_api.servicegroup_api.service_is_up(
mox.IsA(objects.Service)).AndReturn(
inst.host != 'down-host')
if inst.host == 'down-host':
self._test_downed_host_part(inst, updates, delete_time,
delete_type)
cast = False
else:
# Happens on the manager side
commit_quotas = False
if cast:
if self.cell_type != 'api':
self.compute_api._record_action_start(self.context, inst,
instance_actions.DELETE)
if commit_quotas:
cast_reservations = None
else:
cast_reservations = reservations
if delete_type == 'soft_delete':
rpcapi.soft_delete_instance(self.context, inst,
reservations=cast_reservations)
elif delete_type in ['delete', 'force_delete']:
rpcapi.terminate_instance(self.context, inst, [],
reservations=cast_reservations)
if commit_quotas:
# Local delete or when we're testing API cell.
quota.QUOTAS.commit(self.context, reservations,
project_id=inst.project_id,
user_id=inst.user_id)
self.mox.ReplayAll()
getattr(self.compute_api, delete_type)(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
def test_delete(self):
self._test_delete('delete')
def test_delete_if_not_launched(self):
self._test_delete('delete', launched_at=None)
def test_delete_in_resizing(self):
self._test_delete('delete', task_state=task_states.RESIZE_FINISH)
def test_delete_in_resized(self):
self._test_delete('delete', vm_state=vm_states.RESIZED)
def test_delete_shelved(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
self._test_delete('delete',
vm_state=vm_states.SHELVED,
system_metadata=fake_sys_meta)
def test_delete_shelved_offloaded(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_image_not_found(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_image_not_authorized(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_exception(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION}
self._test_delete('delete',
vm_state=vm_states.SHELVED,
system_metadata=fake_sys_meta)
def test_delete_with_down_host(self):
self._test_delete('delete', host='down-host')
def test_delete_soft_with_down_host(self):
self._test_delete('soft_delete', host='down-host')
def test_delete_soft(self):
self._test_delete('soft_delete')
def test_delete_forced(self):
self._test_delete('force_delete', vm_state=vm_states.SOFT_DELETED)
def test_delete_fast_if_host_not_set(self):
inst = self._create_instance_obj()
inst.host = ''
quotas = quotas_obj.Quotas(self.context)
updates = {'progress': 0, 'task_state': task_states.DELETING}
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(db, 'constraint')
self.mox.StubOutWithMock(db, 'instance_destroy')
self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
db.block_device_mapping_get_all_by_instance(self.context,
inst.uuid,
use_slave=False).AndReturn([])
inst.save()
self.compute_api._create_reservations(self.context,
inst, inst.task_state,
inst.project_id, inst.user_id
).AndReturn(quotas)
if self.cell_type == 'api':
rpcapi.terminate_instance(
self.context, inst,
mox.IsA(objects.BlockDeviceMappingList),
reservations=None)
else:
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.start')
db.constraint(host=mox.IgnoreArg()).AndReturn('constraint')
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.iso8601.Utc())
updates['deleted_at'] = delete_time
updates['deleted'] = True
fake_inst = fake_instance.fake_db_instance(**updates)
db.instance_destroy(self.context, inst.uuid,
constraint='constraint').AndReturn(fake_inst)
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.end',
system_metadata=inst.system_metadata)
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
def test_local_delete_with_deleted_volume(self):
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 42, 'volume_id': 'volume_id',
'source_type': 'volume', 'destination_type': 'volume',
'delete_on_termination': False}))]
def _fake_do_delete(context, instance, bdms,
rservations=None, local=False):
pass
inst = self._create_instance_obj()
inst._context = self.context
self.mox.StubOutWithMock(inst, 'destroy')
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(inst.info_cache, 'delete')
self.mox.StubOutWithMock(self.compute_api.network_api,
'deallocate_for_instance')
self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute_api.volume_api,
'terminate_connection')
self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'destroy')
inst.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.start')
self.context.elevated().MultipleTimes().AndReturn(self.context)
if self.cell_type != 'api':
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
self.compute_api.volume_api.terminate_connection(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
AndRaise(exception. VolumeNotFound('volume_id'))
bdms[0].destroy(self.context)
inst.destroy()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.end',
system_metadata=inst.system_metadata)
self.mox.ReplayAll()
self.compute_api._local_delete(self.context, inst, bdms,
'delete',
_fake_do_delete)
def test_delete_disabled(self):
inst = self._create_instance_obj()
inst.disable_terminate = True
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
def test_delete_soft_rollback(self):
inst = self._create_instance_obj()
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(inst, 'save')
delete_time = datetime.datetime(1955, 11, 5)
timeutils.set_time_override(delete_time)
db.block_device_mapping_get_all_by_instance(
self.context, inst.uuid, use_slave=False).AndReturn([])
inst.save().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute_api.soft_delete, self.context, inst)
def _test_confirm_resize(self, mig_ref_passed=False):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'confirm_resize')
self.context.elevated().AndReturn(self.context)
if not mig_ref_passed:
objects.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig)
self.compute_api._downsize_quota_delta(self.context,
fake_inst).AndReturn('deltas')
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
self.compute_api._reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_mig(expected_task_state=None):
self.assertEqual('confirming', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig)
if self.cell_type:
fake_quotas.commit(self.context)
self.compute_api._record_action_start(self.context, fake_inst,
'confirmResize')
self.compute_api.compute_rpcapi.confirm_resize(
self.context, fake_inst, fake_mig, 'compute-source',
[] if self.cell_type else fake_quotas.reservations)
self.mox.ReplayAll()
if mig_ref_passed:
self.compute_api.confirm_resize(self.context, fake_inst,
migration=fake_mig)
else:
self.compute_api.confirm_resize(self.context, fake_inst)
def test_confirm_resize(self):
self._test_confirm_resize()
def test_confirm_resize_with_migration_ref(self):
self._test_confirm_resize(mig_ref_passed=True)
def _test_revert_resize(self):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api,
'_reverse_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'revert_resize')
self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig)
self.compute_api._reverse_upsize_quota_delta(
self.context, fake_mig).AndReturn('deltas')
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
self.compute_api._reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_REVERTING,
fake_inst.task_state)
fake_inst.save(expected_task_state=[None]).WithSideEffects(
_check_state)
def _check_mig(expected_task_state=None):
self.assertEqual('reverting', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig)
if self.cell_type:
fake_quotas.commit(self.context)
self.compute_api._record_action_start(self.context, fake_inst,
'revertResize')
self.compute_api.compute_rpcapi.revert_resize(
self.context, fake_inst, fake_mig, 'compute-dest',
[] if self.cell_type else fake_quotas.reservations)
self.mox.ReplayAll()
self.compute_api.revert_resize(self.context, fake_inst)
def test_revert_resize(self):
self._test_revert_resize()
def test_revert_resize_concurent_fail(self):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api,
'_reverse_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig)
delta = ['delta']
self.compute_api._reverse_upsize_quota_delta(
self.context, fake_mig).AndReturn(delta)
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
self.compute_api._reserve_quota_delta(
self.context, delta, fake_inst).AndReturn(fake_quotas)
exc = exception.UnexpectedTaskStateError(
actual=task_states.RESIZE_REVERTING, expected=None)
fake_inst.save(expected_task_state=[None]).AndRaise(exc)
fake_quotas.rollback(self.context)
self.mox.ReplayAll()
self.assertRaises(exception.UnexpectedTaskStateError,
self.compute_api.revert_resize,
self.context,
fake_inst)
def _test_resize(self, flavor_id_passed=True,
same_host=False, allow_same_host=False,
allow_mig_same_host=False,
project_id=None,
extra_kwargs=None,
same_flavor=False):
if extra_kwargs is None:
extra_kwargs = {}
self.flags(allow_resize_to_same_host=allow_same_host,
allow_migrate_to_same_host=allow_mig_same_host)
params = {}
if project_id is not None:
# To test instance w/ different project id than context (admin)
params['project_id'] = project_id
fake_inst = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
current_flavor = flavors.extract_flavor(fake_inst)
if flavor_id_passed:
new_flavor = dict(id=200, flavorid='new-flavor-id',
name='new_flavor', disabled=False)
if same_flavor:
cur_flavor = flavors.extract_flavor(fake_inst)
new_flavor['id'] = cur_flavor['id']
flavors.get_flavor_by_flavor_id(
'new-flavor-id',
read_deleted='no').AndReturn(new_flavor)
else:
new_flavor = current_flavor
if (self.cell_type == 'compute' or
not (flavor_id_passed and same_flavor)):
resvs = ['resvs']
project_id, user_id = quotas_obj.ids_from_instance(self.context,
fake_inst)
fake_quotas = objects.Quotas.from_reservations(self.context,
resvs)
self.compute_api._upsize_quota_delta(
self.context, new_flavor,
current_flavor).AndReturn('deltas')
self.compute_api._reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_PREP,
fake_inst.task_state)
self.assertEqual(fake_inst.progress, 0)
for key, value in extra_kwargs.items():
self.assertEqual(value, getattr(fake_inst, key))
fake_inst.save(expected_task_state=[None]).WithSideEffects(
_check_state)
if allow_same_host:
filter_properties = {'ignore_hosts': []}
else:
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if not flavor_id_passed and not allow_mig_same_host:
filter_properties['ignore_hosts'].append(fake_inst['host'])
expected_reservations = fake_quotas.reservations
if self.cell_type == 'api':
fake_quotas.commit(self.context)
expected_reservations = []
mig = objects.Migration()
def _get_migration():
return mig
def _check_mig(ctxt):
self.assertEqual(fake_inst.uuid, mig.instance_uuid)
self.assertEqual(current_flavor['id'],
mig.old_instance_type_id)
self.assertEqual(new_flavor['id'],
mig.new_instance_type_id)
self.assertEqual('finished', mig.status)
self.stubs.Set(objects, 'Migration', _get_migration)
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(mig, 'create')
self.context.elevated().AndReturn(self.context)
mig.create(self.context).WithSideEffects(_check_mig)
if flavor_id_passed:
self.compute_api._record_action_start(self.context, fake_inst,
'resize')
else:
self.compute_api._record_action_start(self.context, fake_inst,
'migrate')
scheduler_hint = {'filter_properties': filter_properties}
self.compute_api.compute_task_api.resize_instance(
self.context, fake_inst, extra_kwargs,
scheduler_hint=scheduler_hint,
flavor=new_flavor, reservations=expected_reservations)
self.mox.ReplayAll()
if flavor_id_passed:
self.compute_api.resize(self.context, fake_inst,
flavor_id='new-flavor-id',
**extra_kwargs)
else:
self.compute_api.resize(self.context, fake_inst, **extra_kwargs)
def _test_migrate(self, *args, **kwargs):
self._test_resize(*args, flavor_id_passed=False, **kwargs)
def test_resize(self):
self._test_resize()
def test_resize_with_kwargs(self):
self._test_resize(extra_kwargs=dict(cow='moo'))
def test_resize_same_host_and_allowed(self):
self._test_resize(same_host=True, allow_same_host=True)
def test_resize_same_host_and_not_allowed(self):
self._test_resize(same_host=True, allow_same_host=False)
def test_resize_different_project_id(self):
self._test_resize(project_id='different')
def test_migrate(self):
self._test_migrate()
def test_migrate_with_kwargs(self):
self._test_migrate(extra_kwargs=dict(cow='moo'))
def test_migrate_same_host_and_allowed(self):
self._test_migrate(same_host=True, allow_same_host=True)
def test_migrate_same_host_and_not_allowed(self):
self._test_migrate(same_host=True, allow_same_host=False)
def test_migrate_different_project_id(self):
self._test_migrate(project_id='different')
def test_resize_invalid_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
exc = exception.FlavorNotFound(flavor_id='flavor-id')
flavors.get_flavor_by_flavor_id('flavor-id',
read_deleted='no').AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
def test_resize_disabled_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
disabled=True)
flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor)
self.mox.ReplayAll()
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id):
fake_inst = self._create_instance_obj()
fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
root_gb=0)
get_flavor_by_flavor_id.return_value = fake_flavor
self.assertRaises(exception.CannotResizeDisk,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
def test_resize_quota_exceeds_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, 'update')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
current_flavor = flavors.extract_flavor(fake_inst)
fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
disabled=False)
flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor)
deltas = dict(resource=0)
self.compute_api._upsize_quota_delta(
self.context, fake_flavor,
current_flavor).AndReturn(deltas)
usage = dict(in_use=0, reserved=0)
quotas = {'resource': 0}
usages = {'resource': usage}
overs = ['resource']
headroom = {'resource': quotas['resource'] -
(usages['resource']['in_use'] + usages['resource']['reserved'])}
over_quota_args = dict(quotas=quotas,
usages=usages,
overs=overs,
headroom=headroom)
self.compute_api._reserve_quota_delta(self.context, deltas,
fake_inst).AndRaise(
exception.OverQuota(**over_quota_args))
self.mox.ReplayAll()
self.assertRaises(exception.TooManyInstances,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
def test_pause(self):
# Ensure instance can be paused.
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'pause_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.PAUSE)
rpcapi.pause_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.pause(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertEqual(task_states.PAUSING,
instance.task_state)
def _test_pause_fails(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.pause,
self.context, instance)
def test_pause_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
for state in invalid_vm_states:
self._test_pause_fails(state)
def test_unpause(self):
# Ensure instance can be unpaused.
params = dict(vm_state=vm_states.PAUSED)
instance = self._create_instance_obj(params=params)
self.assertEqual(instance.vm_state, vm_states.PAUSED)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'unpause_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.UNPAUSE)
rpcapi.unpause_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.unpause(self.context, instance)
self.assertEqual(vm_states.PAUSED, instance.vm_state)
self.assertEqual(task_states.UNPAUSING, instance.task_state)
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volumes return to previous states in case of error.
def fake_vol_api_begin_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
volumes[volume_id]['status'] = 'detaching'
def fake_vol_api_roll_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'detaching':
volumes[volume_id]['status'] = 'in-use'
def fake_vol_api_reserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
self.assertEqual(volumes[volume_id]['status'], 'available')
volumes[volume_id]['status'] = 'attaching'
def fake_vol_api_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
def fake_swap_volume_exc(context, instance, old_volume_id,
new_volume_id):
raise AttributeError # Random exception
# Should fail if VM state is not valid
instance = {'vm_state': vm_states.BUILDING,
'launched_at': timeutils.utcnow(),
'locked': False,
'availability_zone': 'fake_az',
'uuid': 'fake'}
volumes = {}
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'attach_status': 'attached',
'instance_uuid': 'fake',
'size': 5,
'status': 'in-use'}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'attach_status': 'detached',
'instance_uuid': None,
'size': 5,
'status': 'available'}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
# Should fail if old volume is not attached
volumes[old_volume_id]['attach_status'] = 'detached'
self.assertRaises(exception.VolumeUnattached,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['attach_status'] = 'attached'
# Should fail if old volume's instance_uuid is not that of the instance
volumes[old_volume_id]['instance_uuid'] = 'fake2'
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['instance_uuid'] = 'fake'
# Should fail if new volume is attached
volumes[new_volume_id]['attach_status'] = 'attached'
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[new_volume_id]['attach_status'] = 'detached'
# Should fail if new volume is smaller than the old volume
volumes[new_volume_id]['size'] = 4
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[new_volume_id]['size'] = 5
# Fail call to swap_volume
self.stubs.Set(self.compute_api.volume_api, 'begin_detaching',
fake_vol_api_begin_detaching)
self.stubs.Set(self.compute_api.volume_api, 'roll_detaching',
fake_vol_api_roll_detaching)
self.stubs.Set(self.compute_api.volume_api, 'reserve_volume',
fake_vol_api_reserve)
self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume',
fake_vol_api_unreserve)
self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
fake_swap_volume_exc)
self.assertRaises(AttributeError,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
# Should succeed
self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
lambda c, instance, old_volume_id, new_volume_id: True)
self.compute_api.swap_volume(self.context, instance,
volumes[old_volume_id],
volumes[new_volume_id])
def _test_snapshot_and_backup(self, is_snapshot=True,
with_base_ref=False, min_ram=None,
min_disk=None,
create_fails=False):
# 'cache_in_nova' is for testing non-inheritable properties
# 'user_id' should also not be carried from sys_meta into
# image property...since it should be set explicitly by
# _create_image() in compute api.
fake_sys_meta = dict(image_foo='bar', blah='bug?',
image_cache_in_nova='dropped',
cache_in_nova='dropped',
user_id='meow')
if with_base_ref:
fake_sys_meta['image_base_image_ref'] = 'fake-base-ref'
params = dict(system_metadata=fake_sys_meta, locked=True)
instance = self._create_instance_obj(params=params)
fake_sys_meta.update(instance.system_metadata)
extra_props = dict(cow='moo', cat='meow')
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(self.compute_api.image_api,
'create')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'snapshot_instance')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'backup_instance')
image_type = is_snapshot and 'snapshot' or 'backup'
expected_sys_meta = dict(fake_sys_meta)
expected_sys_meta.pop('cache_in_nova')
expected_sys_meta.pop('image_cache_in_nova')
expected_sys_meta.pop('user_id')
expected_sys_meta['foo'] = expected_sys_meta.pop('image_foo')
if with_base_ref:
expected_sys_meta['base_image_ref'] = expected_sys_meta.pop(
'image_base_image_ref')
expected_props = {'instance_uuid': instance.uuid,
'user_id': self.context.user_id,
'image_type': image_type}
expected_props.update(extra_props)
expected_props.update(expected_sys_meta)
expected_meta = {'name': 'fake-name',
'is_public': False,
'properties': expected_props}
if is_snapshot:
if min_ram is not None:
expected_meta['min_ram'] = min_ram
if min_disk is not None:
expected_meta['min_disk'] = min_disk
else:
expected_props['backup_type'] = 'fake-backup-type'
compute_utils.get_image_metadata(
self.context, self.compute_api.image_api,
FAKE_IMAGE_REF, instance).AndReturn(expected_meta)
fake_image = dict(id='fake-image-id')
mock_method = self.compute_api.image_api.create(
self.context, expected_meta)
if create_fails:
mock_method.AndRaise(test.TestingException())
else:
mock_method.AndReturn(fake_image)
def check_state(expected_task_state=None):
expected_state = (is_snapshot and
task_states.IMAGE_SNAPSHOT_PENDING or
task_states.IMAGE_BACKUP)
self.assertEqual(expected_state, instance.task_state)
if not create_fails:
instance.save(expected_task_state=[None]).WithSideEffects(
check_state)
if is_snapshot:
self.compute_api.compute_rpcapi.snapshot_instance(
self.context, instance, fake_image['id'])
else:
self.compute_api.compute_rpcapi.backup_instance(
self.context, instance, fake_image['id'],
'fake-backup-type', 'fake-rotation')
self.mox.ReplayAll()
got_exc = False
try:
if is_snapshot:
res = self.compute_api.snapshot(self.context, instance,
'fake-name',
extra_properties=extra_props)
else:
res = self.compute_api.backup(self.context, instance,
'fake-name',
'fake-backup-type',
'fake-rotation',
extra_properties=extra_props)
self.assertEqual(fake_image, res)
except test.TestingException:
got_exc = True
self.assertEqual(create_fails, got_exc)
def test_snapshot(self):
self._test_snapshot_and_backup()
def test_snapshot_fails(self):
self._test_snapshot_and_backup(create_fails=True)
def test_snapshot_invalid_state(self):
instance = self._create_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_BACKUP
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
instance.vm_state = vm_states.BUILDING
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
def test_snapshot_with_base_image_ref(self):
self._test_snapshot_and_backup(with_base_ref=True)
def test_snapshot_min_ram(self):
self._test_snapshot_and_backup(min_ram=42)
def test_snapshot_min_disk(self):
self._test_snapshot_and_backup(min_disk=42)
def test_backup(self):
self._test_snapshot_and_backup(is_snapshot=False)
def test_backup_fails(self):
self._test_snapshot_and_backup(is_snapshot=False, create_fails=True)
def test_backup_invalid_state(self):
instance = self._create_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name',
'fake', 'fake')
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_BACKUP
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context, instance, 'fake-name',
'fake', 'fake')
instance.vm_state = vm_states.BUILDING
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name',
'fake', 'fake')
def test_backup_with_base_image_ref(self):
self._test_snapshot_and_backup(is_snapshot=False,
with_base_ref=True)
def test_snapshot_volume_backed(self):
params = dict(locked=True)
instance = self._create_instance_obj(params=params)
instance['root_device_name'] = 'vda'
instance_bdms = []
image_meta = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away',
'owner': 'fake-tenant',
}
expect_meta = {
'name': 'test-snapshot',
'properties': {'root_device_name': 'vda',
'mappings': 'DONTCARE'},
'size': 0,
'is_public': False
}
def fake_get_all_by_instance(context, instance, use_slave=False):
return copy.deepcopy(instance_bdms)
def fake_image_create(context, image_meta, data=None):
self.assertThat(image_meta, matchers.DictMatches(expect_meta))
def fake_volume_get(context, volume_id):
return {'id': volume_id, 'display_description': ''}
def fake_volume_create_snapshot(context, volume_id, name, description):
return {'id': '%s-snapshot' % volume_id}
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_get_all_by_instance)
self.stubs.Set(self.compute_api.image_api, 'create',
fake_image_create)
self.stubs.Set(self.compute_api.volume_api, 'get',
fake_volume_get)
self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force',
fake_volume_create_snapshot)
# No block devices defined
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
bdm = fake_block_device.FakeDbBlockDeviceDict(
{'no_device': False, 'volume_id': '1', 'boot_index': 0,
'connection_info': 'inf', 'device_name': '/dev/vda',
'source_type': 'volume', 'destination_type': 'volume'})
instance_bdms.append(bdm)
expect_meta['properties']['bdm_v2'] = True
expect_meta['properties']['block_device_mapping'] = []
expect_meta['properties']['block_device_mapping'].append(
{'guest_format': None, 'boot_index': 0, 'no_device': None,
'image_id': None, 'volume_id': None, 'disk_bus': None,
'volume_size': None, 'source_type': 'snapshot',
'device_type': None, 'snapshot_id': '1-snapshot',
'destination_type': 'volume', 'delete_on_termination': None})
# All the db_only fields and the volume ones are removed
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
image_mappings = [{'virtual': 'ami', 'device': 'vda'},
{'device': 'vda', 'virtual': 'ephemeral0'},
{'device': 'vdb', 'virtual': 'swap'},
{'device': 'vdc', 'virtual': 'ephemeral1'}]
image_meta['properties']['mappings'] = image_mappings
expect_meta['properties']['mappings'] = [
{'virtual': 'ami', 'device': 'vda'}]
# Check that the mappgins from the image properties are included
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
def test_volume_snapshot_create(self):
volume_id = '1'
create_info = {'id': 'eyedee'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'device_name': '/dev/sda2',
'source_type': 'volume',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 1,
'boot_index': -1})
fake_bdm['instance'] = fake_instance.fake_db_instance()
fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
fake_bdm = objects.BlockDeviceMapping._from_db_object(
self.context, objects.BlockDeviceMapping(),
fake_bdm, expected_attrs=['instance'])
self.mox.StubOutWithMock(objects.BlockDeviceMapping,
'get_by_volume_id')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'volume_snapshot_create')
objects.BlockDeviceMapping.get_by_volume_id(
self.context, volume_id,
expected_attrs=['instance']).AndReturn(fake_bdm)
self.compute_api.compute_rpcapi.volume_snapshot_create(self.context,
fake_bdm['instance'], volume_id, create_info)
self.mox.ReplayAll()
snapshot = self.compute_api.volume_snapshot_create(self.context,
volume_id, create_info)
expected_snapshot = {
'snapshot': {
'id': create_info['id'],
'volumeId': volume_id,
},
}
self.assertEqual(snapshot, expected_snapshot)
def test_volume_snapshot_delete(self):
volume_id = '1'
snapshot_id = '2'
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'device_name': '/dev/sda2',
'source_type': 'volume',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 1,
'boot_index': -1})
fake_bdm['instance'] = fake_instance.fake_db_instance()
fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
fake_bdm = objects.BlockDeviceMapping._from_db_object(
self.context, objects.BlockDeviceMapping(),
fake_bdm, expected_attrs=['instance'])
self.mox.StubOutWithMock(objects.BlockDeviceMapping,
'get_by_volume_id')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'volume_snapshot_delete')
objects.BlockDeviceMapping.get_by_volume_id(
self.context, volume_id,
expected_attrs=['instance']).AndReturn(fake_bdm)
self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context,
fake_bdm['instance'], volume_id, snapshot_id, {})
self.mox.ReplayAll()
self.compute_api.volume_snapshot_delete(self.context, volume_id,
snapshot_id, {})
def _test_boot_volume_bootable(self, is_bootable=False):
def get_vol_data(*args, **kwargs):
return {'bootable': is_bootable}
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
with mock.patch.object(self.compute_api.volume_api, 'get',
side_effect=get_vol_data):
if not is_bootable:
self.assertRaises(exception.InvalidBDMVolumeNotBootable,
self.compute_api._get_bdm_image_metadata,
self.context, block_device_mapping)
else:
meta = self.compute_api._get_bdm_image_metadata(self.context,
block_device_mapping)
self.assertEqual({}, meta)
def test_boot_volume_non_bootable(self):
self._test_boot_volume_bootable(False)
def test_boot_volume_bootable(self):
self._test_boot_volume_bootable(True)
def _create_instance_with_disabled_disk_config(self, object=False):
sys_meta = {"image_auto_disk_config": "Disabled"}
params = {"system_metadata": sys_meta}
instance = self._create_instance_obj(params=params)
if object:
return instance
return obj_base.obj_to_primitive(instance)
def _setup_fake_image_with_disabled_disk_config(self):
self.fake_image = {
'id': 1,
'name': 'fake_name',
'status': 'active',
'properties': {"auto_disk_config": "Disabled"},
}
def fake_show(obj, context, image_id, **kwargs):
return self.fake_image
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
return self.fake_image['id']
def test_resize_with_disabled_auto_disk_config_fails(self):
fake_inst = self._create_instance_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.resize,
self.context, fake_inst,
auto_disk_config=True)
def test_create_with_disabled_auto_disk_config_fails(self):
image_id = self._setup_fake_image_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.create, self.context,
"fake_flavor", image_id, auto_disk_config=True)
def test_rebuild_with_disabled_auto_disk_config_fails(self):
fake_inst = self._create_instance_with_disabled_disk_config(
object=True)
image_id = self._setup_fake_image_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.rebuild,
self.context,
fake_inst,
image_id,
"new password",
auto_disk_config=True)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
get_flavor.return_value = test_flavor.fake_flavor
flavor = instance.get_flavor()
image_href = ''
image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': 'x86_64'}}
admin_pass = ''
files_to_inject = []
bdms = []
_get_image.return_value = (None, image)
bdm_get_by_instance_uuid.return_value = bdms
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
self.compute_api.rebuild(self.context, instance, image_href,
admin_pass, files_to_inject)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
injected_files=files_to_inject, image_ref=image_href,
orig_image_ref=image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host, kwargs={})
_check_auto_disk_config.assert_called_once_with(image=image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
None, image, flavor, {}, [])
self.assertNotEqual(orig_system_metadata, instance.system_metadata)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild_change_image(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
orig_system_metadata = {}
get_flavor.return_value = test_flavor.fake_flavor
orig_image_href = 'orig_image'
orig_image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': 'x86_64',
'vm_mode': 'hvm'}}
new_image_href = 'new_image'
new_image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': 'x86_64',
'vm_mode': 'xen'}}
admin_pass = ''
files_to_inject = []
bdms = []
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'],
image_ref=orig_image_href,
vm_mode=vm_mode.HVM)
flavor = instance.get_flavor()
def get_image(context, image_href):
if image_href == new_image_href:
return (None, new_image)
if image_href == orig_image_href:
return (None, orig_image)
_get_image.side_effect = get_image
bdm_get_by_instance_uuid.return_value = bdms
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
self.compute_api.rebuild(self.context, instance, new_image_href,
admin_pass, files_to_inject)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
injected_files=files_to_inject, image_ref=new_image_href,
orig_image_ref=orig_image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host, kwargs={})
_check_auto_disk_config.assert_called_once_with(image=new_image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
None, new_image, flavor, {}, [])
self.assertEqual(vm_mode.XEN, instance.vm_mode)
@mock.patch('nova.objects.Quotas.commit')
@mock.patch('nova.objects.Quotas.reserve')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceAction.action_start')
def test_restore(self, action_start, instance_save, quota_reserve,
quota_commit):
instance = self._create_instance_obj()
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save()
with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc:
self.compute_api.restore(self.context, instance)
rpc.restore_instance.assert_called_once_with(self.context,
instance)
self.assertEqual(instance.task_state, task_states.RESTORING)
self.assertEqual(1, quota_commit.call_count)
def test_external_instance_event(self):
instances = [
objects.Instance(uuid='uuid1', host='host1'),
objects.Instance(uuid='uuid2', host='host1'),
objects.Instance(uuid='uuid3', host='host2'),
]
events = [
objects.InstanceExternalEvent(instance_uuid='uuid1'),
objects.InstanceExternalEvent(instance_uuid='uuid2'),
objects.InstanceExternalEvent(instance_uuid='uuid3'),
]
self.compute_api.compute_rpcapi = mock.MagicMock()
self.compute_api.external_instance_event(self.context,
instances, events)
method = self.compute_api.compute_rpcapi.external_instance_event
method.assert_any_call(self.context, instances[0:2], events[0:2])
method.assert_any_call(self.context, instances[2:], events[2:])
self.assertEqual(2, method.call_count)
def test_volume_ops_invalid_task_state(self):
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
instance.task_state = 'Any'
volume_id = uuidutils.generate_uuid()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_volume,
self.context, instance, volume_id)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume,
self.context, instance, volume_id)
new_volume_id = uuidutils.generate_uuid()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume,
self.context, instance,
volume_id, new_volume_id)
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_get_bdm_image_metadata_with_cinder_down(self, mock_get):
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
}))]
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._get_bdm_image_metadata,
self.context,
bdms, legacy_bdm=True)
@mock.patch.object(cinder.API, 'get_snapshot',
side_effect=exception.CinderConnectionFailed(reason='error'))
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
instance = self._create_instance_obj()
instance_type = self._create_flavor()
bdm = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'snapshot_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._validate_bdm,
self.context,
instance, instance_type, bdm)
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._validate_bdm,
self.context,
instance, instance_type, bdms)
@mock.patch.object(objects.Instance, 'create')
@mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default')
@mock.patch.object(compute_api.API, '_populate_instance_names')
@mock.patch.object(compute_api.API, '_populate_instance_for_create')
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get,
mock_create,
mock_names,
mock_ensure,
mock_inst_create):
instance = self._create_instance_obj()
instance['display_name'] = 'FAKE_DISPLAY_NAME'
instance['shutdown_terminate'] = False
instance_type = self._create_flavor()
fake_image = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away'}
fake_security_group = None
fake_num_instances = 1
fake_index = 1
bdm = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
with mock.patch.object(instance, "destroy") as destroy:
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api.create_db_entry_for_new_instance,
self.context,
instance_type,
fake_image,
instance,
fake_security_group,
bdm,
fake_num_instances,
fake_index)
destroy.assert_called_once_with(self.context)
def _test_rescue(self, vm_state):
instance = self._create_instance_obj(params={'vm_state': vm_state})
bdms = []
with contextlib.nested(
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=bdms),
mock.patch.object(self.compute_api, 'is_volume_backed_instance',
return_value=False),
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.compute_rpcapi,
'rescue_instance')
) as (
bdm_get_by_instance_uuid, volume_backed_inst, instance_save,
record_action_start, rpcapi_rescue_instance
):
self.compute_api.rescue(self.context, instance)
# assert field values set on the instance object
self.assertEqual(task_states.RESCUING, instance.task_state)
# assert our mock calls
bdm_get_by_instance_uuid.assert_called_once_with(
self.context, instance.uuid)
volume_backed_inst.assert_called_once_with(
self.context, instance, bdms)
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.RESCUE)
rpcapi_rescue_instance.assert_called_once_with(
self.context, instance=instance, rescue_password=None,
rescue_image_ref=None)
def test_rescue_active(self):
self._test_rescue(vm_state=vm_states.ACTIVE)
def test_rescue_stopped(self):
self._test_rescue(vm_state=vm_states.STOPPED)
def test_rescue_error(self):
self._test_rescue(vm_state=vm_states.ERROR)
def test_unrescue(self):
instance = self._create_instance_obj(
params={'vm_state': vm_states.RESCUED})
with contextlib.nested(
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.compute_rpcapi,
'unrescue_instance')
) as (
instance_save, record_action_start, rpcapi_unrescue_instance
):
self.compute_api.unrescue(self.context, instance)
# assert field values set on the instance object
self.assertEqual(task_states.UNRESCUING, instance.task_state)
# assert our mock calls
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.UNRESCUE)
rpcapi_unrescue_instance.assert_called_once_with(
self.context, instance=instance)
def test_set_admin_password_invalid_state(self):
# Tests that InstanceInvalidState is raised when not ACTIVE.
instance = self._create_instance_obj({'vm_state': vm_states.STOPPED})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.set_admin_password,
self.context, instance)
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
instance = self._create_instance_obj()
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(self.compute_api, '_record_action_start')
@mock.patch.object(self.compute_api.compute_rpcapi,
'set_admin_password')
def do_test(compute_rpcapi_mock, record_mock, instance_save_mock):
# call the API
self.compute_api.set_admin_password(self.context, instance)
# make our assertions
instance_save_mock.assert_called_once_with(
expected_task_state=[None])
record_mock.assert_called_once_with(
self.context, instance, instance_actions.CHANGE_PASSWORD)
compute_rpcapi_mock.assert_called_once_with(
self.context, instance=instance, new_pass=None)
do_test()
def _test_attach_interface_invalid_state(self, state):
instance = self._create_instance_obj(
params={'vm_state': state})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_interface,
self.context, instance, '', '', '', [])
def test_attach_interface_invalid_state(self):
for state in [vm_states.BUILDING, vm_states.DELETED,
vm_states.ERROR, vm_states.RESCUED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.SUSPENDED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED]:
self._test_attach_interface_invalid_state(state)
def _test_detach_interface_invalid_state(self, state):
instance = self._create_instance_obj(
params={'vm_state': state})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_interface,
self.context, instance, '', '', '', [])
def test_detach_interface_invalid_state(self):
for state in [vm_states.BUILDING, vm_states.DELETED,
vm_states.ERROR, vm_states.RESCUED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.SUSPENDED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED]:
self._test_detach_interface_invalid_state(state)
class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
def setUp(self):
super(ComputeAPIUnitTestCase, self).setUp()
self.compute_api = compute_api.API()
self.cell_type = None
def test_resize_same_flavor_fails(self):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
def setUp(self):
super(ComputeAPIAPICellUnitTestCase, self).setUp()
self.flags(cell_type='api', enable=True, group='cells')
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.cell_type = 'api'
def test_resize_same_flavor_fails(self):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
def setUp(self):
super(ComputeAPIComputeCellUnitTestCase, self).setUp()
self.flags(cell_type='compute', enable=True, group='cells')
self.compute_api = compute_api.API()
self.cell_type = 'compute'
def test_resize_same_flavor_passes(self):
self._test_resize(same_flavor=True)
class DiffDictTestCase(test.NoDBTestCase):
"""Unit tests for _diff_dict()."""
def test_no_change(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=2, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, {})
def test_new_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=2, c=3, d=4)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(d=['+', 4]))
def test_changed_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=4, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(b=['+', 4]))
def test_removed_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(b=['-']))
class SecurityGroupAPITest(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupAPITest, self).setUp()
self.secgroup_api = compute_api.SecurityGroupAPI()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
@mock.patch('nova.objects.security_group.SecurityGroupList.'
'get_by_instance')
def test_get_instance_security_groups(self, mock_get):
groups = objects.SecurityGroupList()
groups.objects = [objects.SecurityGroup(name='foo'),
objects.SecurityGroup(name='bar')]
mock_get.return_value = groups
names = self.secgroup_api.get_instance_security_groups(self.context,
'fake-uuid')
self.assertEqual([{'name': 'bar'}, {'name': 'foo'}], sorted(names))
self.assertEqual(1, mock_get.call_count)
self.assertEqual('fake-uuid', mock_get.call_args_list[0][0][1].uuid)
|
|
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.cinder import volumes
from tests.unit import test
CINDER_VOLUMES = ("rally.plugins.openstack.scenarios.cinder.volumes"
".CinderVolumes")
class fake_type(object):
name = "fake"
class CinderServersTestCase(test.ScenarioTestCase):
def _get_context(self):
context = test.get_test_context()
context.update({
"user": {"tenant_id": "fake",
"endpoint": mock.MagicMock()},
"tenant": {"id": "fake", "name": "fake",
"volumes": [{"id": "uuid"}],
"servers": [1]}})
return context
def test_create_and_list_volume(self):
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock()
scenario._list_volumes = mock.MagicMock()
scenario.create_and_list_volume(1, True, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
scenario._list_volumes.assert_called_once_with(True)
def test_list_volumes(self):
scenario = volumes.CinderVolumes(self.context)
scenario._list_volumes = mock.MagicMock()
scenario.list_volumes(True)
scenario._list_volumes.assert_called_once_with(True)
def test_create_and_update_volume(self):
volume_update_args = {"dispaly_name": "_updated"}
scenario = volumes.CinderVolumes()
fake_volume = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._update_volume = mock.MagicMock()
scenario.create_and_update_volume(
1, update_volume_kwargs=volume_update_args)
scenario._create_volume.assert_called_once_with(1)
scenario._update_volume.assert_called_once_with(fake_volume,
**volume_update_args)
def test_create_and_delete_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.sleep_between = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario.create_and_delete_volume(size=1, min_sleep=10, max_sleep=20,
fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.create_volume(1, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
def test_create_volume_and_modify_metadata(self):
scenario = volumes.CinderVolumes(self._get_context())
scenario._set_metadata = mock.Mock()
scenario._delete_metadata = mock.Mock()
scenario.modify_volume_metadata(sets=5, set_size=4,
deletes=3, delete_size=2)
scenario._set_metadata.assert_called_once_with("uuid", 5, 4)
scenario._delete_metadata.assert_called_once_with(
"uuid",
scenario._set_metadata.return_value, 3, 2)
def test_create_and_extend_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._extend_volume = mock.MagicMock(return_value=fake_volume)
scenario.sleep_between = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario.create_and_extend_volume(1, 2, 10, 20, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
self.assertTrue(scenario._extend_volume.called)
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_from_image_and_delete_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario.create_and_delete_volume(1, image="fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_volume_from_image(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.create_volume(1, image="fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
def test_create_volume_from_image_and_list(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._list_volumes = mock.MagicMock()
scenario.create_and_list_volume(1, True, "fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
scenario._list_volumes.assert_called_once_with(True)
def test_create_from_volume_and_delete_volume(self):
fake_volume = mock.MagicMock()
vol_size = 1
scenario = volumes.CinderVolumes(self._get_context())
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario.create_from_volume_and_delete_volume(vol_size)
scenario._create_volume.assert_called_once_with(1, source_volid="uuid")
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_and_delete_snapshot(self):
fake_snapshot = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario.sleep_between = mock.MagicMock()
scenario._delete_snapshot = mock.MagicMock()
scenario.create_and_delete_snapshot(False, 10, 20, fakearg="f")
scenario._create_snapshot.assert_called_once_with("uuid", force=False,
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
def test_create_and_list_snapshots(self):
fake_snapshot = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._list_snapshots = mock.MagicMock()
scenario.create_and_list_snapshots(False, True, fakearg="f")
scenario._create_snapshot.assert_called_once_with("uuid", force=False,
fakearg="f")
scenario._list_snapshots.assert_called_once_with(True)
def test_create_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario.create_and_attach_volume(10, "img", "0")
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server)
def test_create_and_upload_volume_to_image(self):
fake_volume = mock.Mock()
fake_image = mock.Mock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._upload_volume_to_image = mock.MagicMock(
return_value=fake_image)
scenario._delete_volume = mock.MagicMock()
scenario._delete_image = mock.MagicMock()
scenario.create_and_upload_volume_to_image(2,
container_format="fake",
disk_format="disk",
do_delete=False)
scenario._create_volume.assert_called_once_with(2)
scenario._upload_volume_to_image.assert_called_once_with(fake_volume,
False,
"fake",
"disk")
scenario._create_volume.reset_mock()
scenario._upload_volume_to_image.reset_mock()
scenario.create_and_upload_volume_to_image(1, do_delete=True)
scenario._create_volume.assert_called_once_with(1)
scenario._upload_volume_to_image.assert_called_once_with(fake_volume,
False,
"bare",
"raw")
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_image.assert_called_once_with(fake_image)
def test_create_snapshot_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_server = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
self.clients("nova").servers.get = mock.MagicMock(
return_value=fake_server)
scenario.create_snapshot_and_attach_volume()
self.assertTrue(scenario._create_volume.called)
scenario._create_snapshot.assert_called_once_with(fake_volume.id,
False)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_snapshot_and_attach_volume_use_volume_type(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_server = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
fake = fake_type()
self.clients("cinder").volume_types.list = mock.MagicMock(
return_value=[fake])
self.clients("nova").servers.get = mock.MagicMock(
return_value=fake_server)
scenario.create_snapshot_and_attach_volume(volume_type=True)
# Make sure create volume's second arg was the correct volume type.
# fake or none (randomly selected)
self.assertTrue(scenario._create_volume.called)
vol_type = scenario._create_volume.call_args_list[0][1]["volume_type"]
self.assertTrue(vol_type is fake.name or vol_type is None)
scenario._create_snapshot.assert_called_once_with(fake_volume.id,
False)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_nested_snapshots_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
scenario = volumes.CinderVolumes(context=self._get_context())
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
scenario.create_nested_snapshots_and_attach_volume()
volume_count = scenario._create_volume.call_count
snapshots_count = scenario._create_snapshot.call_count
attached_count = scenario._attach_volume.call_count
self.assertEqual(scenario._delete_volume.call_count, volume_count)
self.assertEqual(scenario._delete_snapshot.call_count, snapshots_count)
self.assertEqual(scenario._detach_volume.call_count, attached_count)
def test_create_nested_snapshots_calls_order(self):
fake_volume1 = mock.MagicMock()
fake_volume2 = mock.MagicMock()
fake_snapshot1 = mock.MagicMock()
fake_snapshot2 = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(
side_effect=[fake_volume1, fake_volume2])
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(
side_effect=[fake_snapshot1, fake_snapshot2])
scenario._delete_snapshot = mock.MagicMock()
scenario.create_nested_snapshots_and_attach_volume(
nested_level={"min": 2, "max": 2})
vol_delete_calls = [mock.call(fake_volume2), mock.call(fake_volume1)]
snap_delete_calls = [mock.call(fake_snapshot2),
mock.call(fake_snapshot1)]
scenario._delete_volume.assert_has_calls(vol_delete_calls)
scenario._delete_snapshot.assert_has_calls(snap_delete_calls)
@mock.patch("rally.plugins.openstack.scenarios.cinder.volumes.random")
def test_create_nested_snapshots_check_resources_size(self, mock_random):
mock_random.randint.return_value = 3
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_server = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario.get_random_server = mock.MagicMock(return_value=fake_server)
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
scenario.create_nested_snapshots_and_attach_volume()
# NOTE: Two calls for random size and nested level
random_call_count = mock_random.randint.call_count
self.assertEqual(2, random_call_count)
calls = scenario._create_volume.mock_calls
expected_calls = [mock.call(3),
mock.call(3, snapshot_id=fake_snapshot.id),
mock.call(3, snapshot_id=fake_snapshot.id)]
self.assertEqual(expected_calls, calls)
def test_create_volume_backup(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup)
volume_kwargs = {"some_var": "zaq"}
scenario.create_volume_backup(
1, do_delete=True, create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_backup.assert_called_once_with(fake_backup)
def test_create_volume_backup_no_delete(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup)
volume_kwargs = {"some_var": "zaq"}
scenario.create_volume_backup(
1, do_delete=False, create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
self.assertFalse(scenario._delete_volume.called)
self.assertFalse(scenario._delete_backup.called)
def _get_scenario(self, fake_volume, fake_backup, fake_restore=None):
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._create_backup = mock.MagicMock(return_value=fake_backup)
scenario._restore_backup = mock.MagicMock(return_value=fake_restore)
scenario._list_backups = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario._delete_backup = mock.MagicMock()
return scenario
def test_create_and_restore_volume_backup(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
fake_restore = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup, fake_restore)
volume_kwargs = {"some_var": "zaq"}
scenario.create_and_restore_volume_backup(
1, do_delete=True, create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._restore_backup.assert_called_once_with(fake_backup.id)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_backup.assert_called_once_with(fake_backup)
def test_create_and_restore_volume_backup_no_delete(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
fake_restore = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup, fake_restore)
volume_kwargs = {"some_var": "zaq"}
scenario.create_and_restore_volume_backup(
1, do_delete=False, create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._restore_backup.assert_called_once_with(fake_backup.id)
self.assertFalse(scenario._delete_volume.called)
self.assertFalse(scenario._delete_backup.called)
def test_create_and_list_volume_backups(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup)
volume_kwargs = {"some_var": "zaq"}
scenario.create_and_list_volume_backups(
1, detailed=True, do_delete=True,
create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._list_backups.assert_called_once_with(True)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_backup.assert_called_once_with(fake_backup)
def test_create_and_list_volume_backups_no_delete(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup)
volume_kwargs = {"some_var": "zaq"}
scenario.create_and_list_volume_backups(
1, detailed=True, do_delete=False,
create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._list_backups.assert_called_once_with(True)
self.assertFalse(scenario._delete_volume.called)
self.assertFalse(scenario._delete_backup.called)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.model
import frappe.utils
import json, os
from six import iteritems, string_types, integer_types
from frappe.utils.file_manager import save_file
'''
Handle RESTful requests that are mapped to the `/api/resource` route.
Requests via FrappeClient are also handled here.
'''
@frappe.whitelist()
def get_list(doctype, fields=None, filters=None, order_by=None,
limit_start=None, limit_page_length=20, parent=None):
'''Returns a list of records by filters, fields, ordering and limit
:param doctype: DocType of the data to be queried
:param fields: fields to be returned. Default is `name`
:param filters: filter list by this dict
:param order_by: Order by this fieldname
:param limit_start: Start at this index
:param limit_page_length: Number of records to be returned (default 20)'''
if frappe.is_table(doctype):
check_parent_permission(parent)
return frappe.get_list(doctype, fields=fields, filters=filters, order_by=order_by,
limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=False)
@frappe.whitelist()
def get_count(doctype, filters=None, debug=False, cache=False):
return frappe.db.count(doctype, filters, debug, cache)
@frappe.whitelist()
def get(doctype, name=None, filters=None, parent=None):
'''Returns a document by name or filters
:param doctype: DocType of the document to be returned
:param name: return document of this `name`
:param filters: If name is not set, filter by these values and return the first match'''
if frappe.is_table(doctype):
check_parent_permission(parent)
if filters and not name:
name = frappe.db.get_value(doctype, json.loads(filters))
if not name:
frappe.throw(_("No document found for given filters"))
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
return frappe.get_doc(doctype, name).as_dict()
@frappe.whitelist()
def get_value(doctype, fieldname, filters=None, as_dict=True, debug=False, parent=None):
'''Returns a value form a document
:param doctype: DocType to be queried
:param fieldname: Field to be returned (default `name`)
:param filters: dict or string for identifying the record'''
if frappe.is_table(doctype):
check_parent_permission(parent)
if not frappe.has_permission(doctype):
frappe.throw(_("No permission for {0}".format(doctype)), frappe.PermissionError)
try:
filters = json.loads(filters)
if isinstance(filters, (integer_types, float)):
filters = frappe.as_unicode(filters)
except (TypeError, ValueError):
# filters are not passesd, not json
pass
try:
fieldname = json.loads(fieldname)
except (TypeError, ValueError):
# name passed, not json
pass
# check whether the used filters were really parseable and usable
# and did not just result in an empty string or dict
if not filters:
filters = None
return frappe.db.get_value(doctype, filters, fieldname, as_dict=as_dict, debug=debug)
@frappe.whitelist()
def get_single_value(doctype, field):
if not frappe.has_permission(doctype):
frappe.throw(_("No permission for {0}").format(doctype), frappe.PermissionError)
value = frappe.db.get_single_value(doctype, field)
return value
@frappe.whitelist()
def set_value(doctype, name, fieldname, value=None):
'''Set a value using get_doc, group of values
:param doctype: DocType of the document
:param name: name of the document
:param fieldname: fieldname string or JSON / dict with key value pair
:param value: value if fieldname is JSON / dict'''
if fieldname!="idx" and fieldname in frappe.model.default_fields:
frappe.throw(_("Cannot edit standard fields"))
if not value:
values = fieldname
if isinstance(fieldname, string_types):
try:
values = json.loads(fieldname)
except ValueError:
values = {fieldname: ''}
else:
values = {fieldname: value}
doc = frappe.db.get_value(doctype, name, ["parenttype", "parent"], as_dict=True)
if doc and doc.parent and doc.parenttype:
doc = frappe.get_doc(doc.parenttype, doc.parent)
child = doc.getone({"doctype": doctype, "name": name})
child.update(values)
else:
doc = frappe.get_doc(doctype, name)
doc.update(values)
doc.save()
return doc.as_dict()
@frappe.whitelist()
def insert(doc=None):
'''Insert a document
:param doc: JSON or dict object to be inserted'''
if isinstance(doc, string_types):
doc = json.loads(doc)
if doc.get("parent") and doc.get("parenttype"):
# inserting a child record
parent = frappe.get_doc(doc.get("parenttype"), doc.get("parent"))
parent.append(doc.get("parentfield"), doc)
parent.save()
return parent.as_dict()
else:
doc = frappe.get_doc(doc).insert()
return doc.as_dict()
@frappe.whitelist()
def insert_many(docs=None):
'''Insert multiple documents
:param docs: JSON or list of dict objects to be inserted in one request'''
if isinstance(docs, string_types):
docs = json.loads(docs)
out = []
if len(docs) > 200:
frappe.throw(_('Only 200 inserts allowed in one request'))
for doc in docs:
if doc.get("parent") and doc.get("parenttype"):
# inserting a child record
parent = frappe.get_doc(doc.get("parenttype"), doc.get("parent"))
parent.append(doc.get("parentfield"), doc)
parent.save()
out.append(parent.name)
else:
doc = frappe.get_doc(doc).insert()
out.append(doc.name)
return out
@frappe.whitelist()
def save(doc):
'''Update (save) an existing document
:param doc: JSON or dict object with the properties of the document to be updated'''
if isinstance(doc, string_types):
doc = json.loads(doc)
doc = frappe.get_doc(doc)
doc.save()
return doc.as_dict()
@frappe.whitelist()
def rename_doc(doctype, old_name, new_name, merge=False):
'''Rename document
:param doctype: DocType of the document to be renamed
:param old_name: Current `name` of the document to be renamed
:param new_name: New `name` to be set'''
new_name = frappe.rename_doc(doctype, old_name, new_name, merge=merge)
return new_name
@frappe.whitelist()
def submit(doc):
'''Submit a document
:param doc: JSON or dict object to be submitted remotely'''
if isinstance(doc, string_types):
doc = json.loads(doc)
doc = frappe.get_doc(doc)
doc.submit()
return doc.as_dict()
@frappe.whitelist()
def cancel(doctype, name):
'''Cancel a document
:param doctype: DocType of the document to be cancelled
:param name: name of the document to be cancelled'''
wrapper = frappe.get_doc(doctype, name)
wrapper.cancel()
return wrapper.as_dict()
@frappe.whitelist()
def delete(doctype, name):
'''Delete a remote document
:param doctype: DocType of the document to be deleted
:param name: name of the document to be deleted'''
frappe.delete_doc(doctype, name, ignore_missing=False)
@frappe.whitelist()
def set_default(key, value, parent=None):
"""set a user default value"""
frappe.db.set_default(key, value, parent or frappe.session.user)
frappe.clear_cache(user=frappe.session.user)
@frappe.whitelist()
def make_width_property_setter(doc):
'''Set width Property Setter
:param doc: Property Setter document with `width` property'''
if isinstance(doc, string_types):
doc = json.loads(doc)
if doc["doctype"]=="Property Setter" and doc["property"]=="width":
frappe.get_doc(doc).insert(ignore_permissions = True)
@frappe.whitelist()
def bulk_update(docs):
'''Bulk update documents
:param docs: JSON list of documents to be updated remotely. Each document must have `docname` property'''
docs = json.loads(docs)
failed_docs = []
for doc in docs:
try:
ddoc = {key: val for key, val in iteritems(doc) if key not in ['doctype', 'docname']}
doctype = doc['doctype']
docname = doc['docname']
doc = frappe.get_doc(doctype, docname)
doc.update(ddoc)
doc.save()
except:
failed_docs.append({
'doc': doc,
'exc': frappe.utils.get_traceback()
})
return {'failed_docs': failed_docs}
@frappe.whitelist()
def has_permission(doctype, docname, perm_type="read"):
'''Returns a JSON with data whether the document has the requested permission
:param doctype: DocType of the document to be checked
:param docname: `name` of the document to be checked
:param perm_type: one of `read`, `write`, `create`, `submit`, `cancel`, `report`. Default is `read`'''
# perm_type can be one of read, write, create, submit, cancel, report
return {"has_permission": frappe.has_permission(doctype, perm_type.lower(), docname)}
@frappe.whitelist()
def get_password(doctype, name, fieldname):
'''Return a password type property. Only applicable for System Managers
:param doctype: DocType of the document that holds the password
:param name: `name` of the document that holds the password
:param fieldname: `fieldname` of the password property
'''
frappe.only_for("System Manager")
return frappe.get_doc(doctype, name).get_password(fieldname)
@frappe.whitelist()
def get_js(items):
'''Load JS code files. Will also append translations
and extend `frappe._messages`
:param items: JSON list of paths of the js files to be loaded.'''
items = json.loads(items)
out = []
for src in items:
src = src.strip("/").split("/")
if ".." in src or src[0] != "assets":
frappe.throw(_("Invalid file path: {0}").format("/".join(src)))
contentpath = os.path.join(frappe.local.sites_path, *src)
with open(contentpath, "r") as srcfile:
code = frappe.utils.cstr(srcfile.read())
if frappe.local.lang != "en":
messages = frappe.get_lang_dict("jsfile", contentpath)
messages = json.dumps(messages)
code += "\n\n$.extend(frappe._messages, {})".format(messages)
out.append(code)
return out
@frappe.whitelist(allow_guest=True)
def get_time_zone():
'''Returns default time zone'''
return {"time_zone": frappe.defaults.get_defaults().get("time_zone")}
@frappe.whitelist()
def attach_file(filename=None, filedata=None, doctype=None, docname=None, folder=None, decode_base64=False, is_private=None, docfield=None):
'''Attach a file to Document (POST)
:param filename: filename e.g. test-file.txt
:param filedata: base64 encode filedata which must be urlencoded
:param doctype: Reference DocType to attach file to
:param docname: Reference DocName to attach file to
:param folder: Folder to add File into
:param decode_base64: decode filedata from base64 encode, default is False
:param is_private: Attach file as private file (1 or 0)
:param docfield: file to attach to (optional)'''
request_method = frappe.local.request.environ.get("REQUEST_METHOD")
if request_method.upper() != "POST":
frappe.throw(_("Invalid Request"))
doc = frappe.get_doc(doctype, docname)
if not doc.has_permission():
frappe.throw(_("Not permitted"), frappe.PermissionError)
f = save_file(filename, filedata, doctype, docname, folder, decode_base64, is_private, docfield)
if docfield and doctype:
doc.set(docfield, f.file_url)
doc.save()
return f.as_dict()
def check_parent_permission(parent):
if parent:
if frappe.permissions.has_permission(parent):
return
# Either parent not passed or the user doesn't have permission on parent doctype of child table!
raise frappe.PermissionError
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import warnings
import numpy as np
from landlab import ModelParameterDictionary, CLOSED_BOUNDARY, Component
from landlab.core.model_parameter_dictionary import MissingKeyError, \
ParameterValueError
from landlab.field.scalar_data_fields import FieldError
from landlab.grid.base import BAD_INDEX_VALUE
from landlab.utils.decorators import use_file_name_or_kwds
try:
from .cfuncs import (erode_with_link_alpha_varthresh,
erode_with_link_alpha_fixthresh)
except ImportError:
warnings.warn('Unable to import stream_power extension module.')
from copy import deepcopy as copy
UNDEFINED_INDEX = np.iinfo(np.int32).max
class StreamPowerEroder(Component):
"""Erode where channels are.
Implemented as:
.. math::
E = K A^m S^n - sp_{crit},
and if :math:`E < 0`, :math:`E = 0`.
If ``use_W`` is declared and ``True``, the module instead implements:
.. math::
E = K A^m S^n / W - sp_{crit}
DEJH Sept 2013, major modifications Sept 14 and May 16. This component
now wraps Fastscape-style functionality under the hood.
NB: If you want spatially or temporally variable runoff, pass the
runoff values at each pixel to the flow router using the input argument
*use_Q*.
Construction::
StreamPowerEroder(grid, K_sp=None, threshold_sp=0., sp_type='set_mn',
m_sp=0.5, n_sp=1., a_sp=None, b_sp=None, c_sp=None,
use_W=None, use_Q=None)
Parameters
----------
grid : ModelGrid
A grid.
K_sp : float, array, or field name
K in the stream power equation (units vary with other parameters).
threshold_sp : positive float, optional
The threshold stream power, below which no erosion occurs. This
threshold is assumed to be in "stream power" units, i.e., if
sp_type is 'Shear_stress', the value should be tau**a.
sp_type : {'set_mn', 'Total', 'Unit', 'Shear_stress'}
Controls how the law is implemented. If 'set_mn', use the supplied
values of m_sp and n_sp. Else, component will derive values of m and n
from supplied values of a_sp, b_sp, and c_sp, following Whipple and
Tucker:
* If ``'Total'``, ``m = a * c``, ``n = a``.
* If ``'Unit'``, ``m = a * c *(1 - b)``, ``n = a``.
* If ``'Shear_stress'``, ``m = 2 * a * c * (1 - b) / 3``,
``n = 2 * a / 3``.
m_sp : float, optional
m in the stream power equation (power on drainage area). Overridden if
a_sp, b_sp, and c_sp are supplied.
n_sp : float, optional, ~ 0.5<n_sp<4.
n in the stream power equation (power on slope). Overridden if
a_sp, b_sp, and c_sp are supplied.
a_sp : float, optional
The power on the SP/shear term to get the erosion rate; the "erosional
process" term. Only used if sp_type is not 'set_mn'.
b_sp : float, optional
The power on discharge to get width; the "hydraulic geometry" term.
Only used if sp_type in ('Unit', 'Shear_stress').
c_sp : float, optional
The power on area to get discharge; the "basin hydology" term. Only
used if sp_type is not 'set_mn'.
use_W : None, array, or field name, optional
If not None, component will look for node-centered data describing
channel width in grid.at_node[use_W] or if an array, will take the
array as the channel widths. It will use the widths to implement
incision ~ stream power per unit width. If sp_type is 'set_mn',
follows the equation given above. If sp_type in ('Unit',
'Shear_stress'), the width value will be implemented directly. W has no
effect if sp_type is 'Total'.
use_Q : None, array, or field name, optional
If not None, the equation becomes E=K*Q**m*S**n. Effectively sets c=1
in Wh&T's 1999 derivation, if you are setting m and n through a, b,
and c.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab import CLOSED_BOUNDARY, FIXED_VALUE_BOUNDARY
>>> from landlab.components import FlowRouter
>>> from landlab.components import StreamPowerEroder
>>> mg = RasterModelGrid((5, 5), 10.)
>>> z = np.array([7., 7., 7., 7., 7.,
... 7., 5., 3.2, 6., 7.,
... 7., 2., 3., 5., 7.,
... 7., 1., 1.9, 4., 7.,
... 7., 0., 7., 7., 7.])
>>> z = mg.add_field('node', 'topographic__elevation', z)
>>> fr = FlowRouter(mg)
>>> sp = StreamPowerEroder(mg, K_sp=1.)
>>> fr.run_one_step()
>>> sp.run_one_step(dt=1.)
>>> z # doctest: +NORMALIZE_WHITESPACE
array([ 7. , 7. , 7. , 7. , 7. ,
7. , 2.92996598, 2.02996598, 4.01498299, 7. ,
7. , 0.85993197, 1.87743897, 3.28268321, 7. ,
7. , 0.28989795, 0.85403051, 2.42701526, 7. ,
7. , 0. , 7. , 7. , 7. ])
>>> mg2 = RasterModelGrid((3, 7), 1.)
>>> z = np.array(mg2.node_x**2.)
>>> z = mg2.add_field('node', 'topographic__elevation', z)
>>> mg2.status_at_node[mg2.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_bottom_edge] = CLOSED_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_right_edge] = CLOSED_BOUNDARY
>>> fr2 = FlowRouter(mg2)
>>> sp2 = StreamPowerEroder(mg2, K_sp=0.1, m_sp=0., n_sp=2.,
... threshold_sp=2.)
>>> fr2.run_one_step()
>>> sp2.run_one_step(dt=10.)
>>> z.reshape((3, 7))[1, :] # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 1. , 4. , 8.52493781,
13.29039716, 18.44367965, 36. ])
>>> mg3 = RasterModelGrid((5, 5), 2.)
>>> z = mg.node_x/100.
>>> z = mg3.add_field('node', 'topographic__elevation', z)
>>> mg3.status_at_node[mg3.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_bottom_edge] = CLOSED_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_right_edge] = CLOSED_BOUNDARY
>>> mg3.at_node['water__unit_flux_in'] = mg3.node_y
>>> fr3 = FlowRouter(mg3)
>>> Q = mg3.at_node['surface_water__discharge']
>>> sp3 = StreamPowerEroder(mg3, K_sp=1., sp_type='Unit', a_sp=1.,
... b_sp=0.5, c_sp=1., use_Q=Q)
>>> fr3.run_one_step()
>>> sp3.run_one_step(1.)
>>> z
array([ 0. , 0.1 , 0.2 , 0.3 , 0.4 ,
0. , 0.02898979, 0.0859932 , 0.17463772, 0.4 ,
0. , 0.02240092, 0.06879049, 0.14586033, 0.4 ,
0. , 0.01907436, 0.05960337, 0.12929386, 0.4 ,
0. , 0.1 , 0.2 , 0.3 , 0.4 ])
"""
_name = 'StreamPowerEroder'
_input_var_names = (
'topographic__elevation',
'flow__link_to_receiver_node',
'drainage_area',
'flow__receiver_node',
'flow__upstream_node_order',
'topographic__steepest_slope'
)
_output_var_names = (
'topographic__elevation',
)
_var_units = {
'topographic__elevation': 'm',
'drainage_area': 'm**2',
'flow__link_to_receiver_node': '-',
'flow__receiver_node': '-',
'flow__upstream_node_order': '-',
'topographic__steepest_slope': '-'
}
_var_mapping = {
'topographic__elevation': 'node',
'drainage_area': 'node',
'flow__link_to_receiver_node': 'node',
'flow__receiver_node': 'node',
'flow__upstream_node_order': 'node',
'topographic__steepest_slope': 'node'
}
_var_doc = {
'topographic__elevation': 'Land surface topographic elevation',
'drainage_area':
"Upstream accumulated surface area contributing to the node's "
"discharge",
'flow__link_to_receiver_node':
'ID of link downstream of each node, which carries the discharge',
'flow__receiver_node':
'Node array of receivers (node that receives flow from current '
'node)',
'flow__upstream_node_order':
'Node array containing downstream-to-upstream ordered list of '
'node IDs',
'topographic__steepest_slope':
'Node array of steepest *downhill* slopes'
}
@use_file_name_or_kwds
def __init__(self, grid, K_sp=None, threshold_sp=0., sp_type='set_mn',
m_sp=0.5, n_sp=1., a_sp=None, b_sp=None, c_sp=None,
use_W=None, use_Q=None, **kwds):
if use_Q == 'water__discharge':
use_Q = 'surface_water__discharge'
self._grid = grid
self.fraction_gradient_change = 1.
self.link_S_with_trailing_blank = np.zeros(grid.number_of_links+1)
# ^needs to be filled with values in execution
self.count_active_links = np.zeros_like(
self.link_S_with_trailing_blank, dtype=int)
self.count_active_links[:-1] = 1
# self._K_unit_time = np.empty(active_nodes.sum(), dtype=float)
self._K_unit_time = self.grid.zeros('node', dtype=float)
self.use_K = False # grandfathered in; only if K_sp == 'array'
if type(K_sp) is np.ndarray:
self._K_unit_time[:] = K_sp
else:
try:
self._K_unit_time.fill(K_sp)
except ValueError: # could not cast => was a str
if K_sp == 'array':
self.use_K = True
else:
self._K_unit_time = grid.at_node[K_sp]
assert np.all(threshold_sp >= 0.)
# for now, enforce threshold as a float
assert type(threshold_sp) in (float, int)
try:
self.sp_crit = float(threshold_sp)
except TypeError:
try:
self.sp_crit = self.grid.at_node[threshold_sp]
except TypeError: # was an array
self.sp_crit = threshold_sp
assert self.sp_crit.size == self.grid.number_of_nodes
if np.any(threshold_sp != 0.):
self.set_threshold = True
# ^flag for sed_flux_dep_incision to see if the threshold was
# manually set.
else:
self.set_threshold = False
try:
self.tstep = kwds['dt']
except KeyError:
self.tstep = None
# retained for back compatibility; undocumented functionality
if type(use_W) is bool: # again for back-compatibility
self.use_W = use_W
self._W = None
elif use_W is None:
self.use_W = False
self._W = None
else:
self.use_W = True
try:
self._W = self.grid.at_node[use_W]
except (FieldError, TypeError):
assert use_W.size == self._grid.number_of_nodes
self._W = use_W
if type(use_Q) is bool:
self.use_Q = use_Q
self._Q = None
elif use_Q is None:
self.use_Q = False
self._Q = None
else:
self.use_Q = True
try:
self._Q = self.grid.at_node[use_Q]
except (FieldError, TypeError):
assert use_Q.size == self._grid.number_of_nodes
self._Q = use_Q
self._type = sp_type
if sp_type is 'set_mn':
assert (float(m_sp) >= 0.) and (float(n_sp) >= 0.), \
"m and n must be positive"
self._m = float(m_sp)
self._n = float(n_sp)
assert ((a_sp is None) and (b_sp is None) and (c_sp is None)), (
"If sp_type is 'set_mn', do not pass values for a, b, or c!")
else:
assert sp_type in ('Total', 'Unit', 'Shear_stress'), (
"sp_type not recognised. It must be 'set_mn', 'Total', " +
"'Unit', or 'Shear_stress'.")
assert (m_sp == 0.5 and n_sp == 1.), \
"Do not set m and n if sp_type is not 'set_mn'!"
assert float(a_sp) >= 0., "a must be positive"
self._a = float(a_sp)
if b_sp is not None:
assert float(b_sp) >= 0., "b must be positive"
self._b = float(b_sp)
else:
assert self.use_W, "b was not set"
self._b = 0.
if c_sp is not None:
assert float(c_sp) >= 0., "c must be positive"
self._c = float(c_sp)
else:
assert self.use_Q, "c was not set"
self._c = 1.
if self._type == 'Total':
self._n = self._a
self._m = self._a*self._c # ==_a if use_Q
elif self._type == 'Unit':
self._n = self._a
self._m = self._a*self._c*(1.-self._b)
# ^ ==_a iff use_Q&use_W etc
elif self._type == 'Shear_stress':
self._m = 2.*self._a*self._c*(1.-self._b)/3.
self._n = 2.*self._a/3.
else:
raise MissingKeyError('Not enough information was provided ' +
'on the exponents to use!')
# m and n will always be set, but care needs to be taken to include Q
# and W directly if appropriate
self.stream_power_erosion = grid.zeros(centering='node')
self.alpha = self.grid.zeros('node')
self.alpha_divided = self.grid.zeros('node')
def erode(self, grid, dt, node_elevs='topographic__elevation',
node_drainage_areas='drainage_area',
flow_receiver='flow__receiver_node',
node_order_upstream='flow__upstream_node_order',
slopes_at_nodes='topographic__steepest_slope',
link_node_mapping='flow__link_to_receiver_node',
link_slopes=None, slopes_from_elevs=None,
W_if_used=None, Q_if_used=None, K_if_used=None,
flooded_nodes=None):
"""
.. note:: deprecated
This run method is now DEPRECATED. Use the fully standardized
method :func:`run_one_step` instead.
A simple, explicit implementation of a stream power algorithm.
Parameters
----------
grid : RasterModelGrid
A grid.
dt : float
Component time step.
node_elevs : str or ndarray, optional
Elevations on the grid, either a field string or nnodes-long array.
node_drainage_areas: str or ndarray, optional
Tells the component where to look for the drainage area values.
Change to another string to override which grid field the
component looks at, or pass a nnodes-long array of drainage
areas values directly instead.
flow_receiver, node_order_upstream : str or ndarray, optional
The downstream node to which each node flows and the ordering of
the nodes in the network starting at the outlet, respectively,
are both necessary as inputs to allow stability testing.
If you already have slopes defined at nodes on the grid, pass them
to the component with *slopes_at_nodes*. The same syntax is
expected: string gives a name in the grid fields, an array gives
values direct.
Alternatively, set *link_slopes* (and *link_node_mapping*) if this
data
is only available at links. 'topographic__derivative_of_elevation'
is the default field name for link slopes. Override this name by
setting the variable as the appropriate string, or override use of
grid fields altogether by passing an array. *link_node_mapping*
controls how the component maps these link values onto the arrays.
We assume there is always a 1:1 mapping (pass the values already
projected onto the nodes using slopes_at_nodes if not). Other
components, e.g., flow_routing.route_flow_dn, may provide the
necessary outputs to make the mapping easier: e.g., just pass
'flow__link_to_receiver_node' from that module (the default name).
If the component cannot find an existing mapping through this
parameter, it will derive one on the fly, at considerable cost of
speed (see on-screen reports).
slopes_from_elevs : str, optional
Allows the module to create gradients internally
from elevations rather than have them provided. Set to True to
force the component to look for the data in the location specified
by node_elevs. Using this option is
considerably slower than any of the alternatives, as it also has to
calculate the link_node_mapping from stratch each time.
In both these cases, at present the mapping is to use the maximum
slope of *any* link attached to the node as the representative
node slope. This is primarily for speed, but may be a good idea
to modify later.
W_if_used, Q_if_used : str or ndarray, optional
Must be provided if you set *use_W* and *use_Q* respectively in
the component initialization. They can be either field names or
nnodes arrays as in the other cases.
If you are routing across flooded depressions in your flow routing
scheme, be sure to set *flooded_nodes* with a boolean array or
array of IDs to ensure erosion cannot occur in the lake. Erosion
is always zero if the gradient is adverse, but can still procede as
usual on the entry into the depression unless *flooded_nodes* is
set.
NB: If you want spatially or temporally variable runoff, pass the
runoff values at each pixel to the flow router, then pass
discharges at each node using *Q_if_used* to this component.
Returns
-------
tuple
Tuple of (*grid*, *modified_elevs*, *stream_power_erosion*);
modifies grid elevation fields to reflect updates. Note the value
stream_power_erosion is not an excess stream power; any specified
erosion threshold is not incorporated into it.
"""
upstream_order_IDs = self._grid['node']['flow__upstream_node_order']
defined_flow_receivers = np.not_equal(self._grid['node'][
'flow__link_to_receiver_node'], UNDEFINED_INDEX)
flow_link_lengths = self._grid._length_of_link_with_diagonals[
self._grid['node']['flow__link_to_receiver_node'][
defined_flow_receivers]]
active_nodes = np.where(grid.status_at_node != CLOSED_BOUNDARY)[0]
flow_receivers = self.grid['node']['flow__receiver_node']
if W_if_used is not None:
assert self.use_W, ("Widths were provided, but you didn't set " +
"the use_W flag in your input file! " +
"Aborting...")
assert self._W is None, ("Do not pass W to the run method " +
"if you also set them at initialization!")
if Q_if_used is not None:
assert self.use_Q, ("Discharges were provided, but you didn't " +
"set the use_Q flag in your input file! " +
"Aborting...")
assert self._Q is None, ("Do not pass Q to the run method " +
"if you also set them at initialization!")
if K_if_used is not None:
assert self.use_K, ("An array of erodabilities was provided, " +
"but you didn't set K_sp to 'array' in your " +
"input file! Aborting...")
try:
_K_unit_time = grid.at_node[K_if_used] # [active_nodes]
except TypeError:
_K_unit_time = K_if_used # [active_nodes]
else:
# little move to save a bit of memory management time...
if flooded_nodes is not None:
_K_unit_time = self._K_unit_time.copy()
else:
_K_unit_time = self._K_unit_time
if type(node_elevs) is str:
node_z = grid.at_node[node_elevs]
else:
node_z = node_elevs
if type(node_drainage_areas) is str:
node_A = grid.at_node[node_drainage_areas]
else:
node_A = node_drainage_areas
if type(node_order_upstream) is str:
node_order_upstream = grid.at_node[node_order_upstream]
# Disable incision in flooded nodes, as appropriate
if flooded_nodes is not None:
if flooded_nodes.dtype != bool:
flooded_nodes = flooded_nodes.astype(bool)
flooded_nodes = flooded_nodes
_K_unit_time[flooded_nodes] = 0.
# Operate the main function:
if self.use_W is False and self.use_Q is False: # normal case
self.alpha[defined_flow_receivers] = _K_unit_time[
defined_flow_receivers]*dt*node_A[
defined_flow_receivers]**self._m / flow_link_lengths
# Handle flooded nodes, if any (no erosion there)
if flooded_nodes is not None:
self.alpha[flooded_nodes] = 0.
reversed_flow = node_z < node_z[flow_receivers]
# this check necessary if flow has been routed across
# depressions
self.alpha[reversed_flow] = 0.
self.alpha_divided[defined_flow_receivers] = (
self.alpha[defined_flow_receivers] /
flow_link_lengths**(self._n - 1.))
threshdt = self.sp_crit * dt
if type(threshdt) is float:
erode_with_link_alpha_fixthresh(upstream_order_IDs,
flow_receivers,
threshdt, self.alpha_divided,
self._n, node_z)
else:
erode_with_link_alpha_varthresh(upstream_order_IDs,
flow_receivers,
threshdt, self.alpha_divided,
self._n, node_z)
elif self.use_W:
if self._W is None:
try:
W = grid.at_node[W_if_used]
except TypeError:
W = W_if_used
else:
W = self._W
if self.use_Q: # use both Q and W direct
if self._Q is None:
try:
Q_direct = grid.at_node[Q_if_used]
except TypeError:
Q_direct = Q_if_used
else:
Q_direct = self._Q
self.alpha[defined_flow_receivers] = (
_K_unit_time[defined_flow_receivers]*dt *
Q_direct[defined_flow_receivers]**self._m /
W[defined_flow_receivers] / flow_link_lengths)
# Handle flooded nodes, if any (no erosion there)
if flooded_nodes is not None:
self.alpha[flooded_nodes] = 0.
reversed_flow = node_z < node_z[flow_receivers]
# this check necessary if flow has been routed across
# depressions
self.alpha[reversed_flow] = 0.
self.alpha_divided[defined_flow_receivers] = (
self.alpha[defined_flow_receivers] /
flow_link_lengths**(self._n - 1.))
threshdt = self.sp_crit * dt
if type(threshdt) is float:
erode_with_link_alpha_fixthresh(
upstream_order_IDs, flow_receivers, threshdt,
self.alpha_divided, self._n, node_z)
else:
erode_with_link_alpha_varthresh(
upstream_order_IDs, flow_receivers, threshdt,
self.alpha_divided, self._n, node_z)
else: # just W to be used
self.alpha[defined_flow_receivers] = (
_K_unit_time[defined_flow_receivers]*dt *
node_A[defined_flow_receivers]**self._m /
W[defined_flow_receivers] / flow_link_lengths)
# Handle flooded nodes, if any (no erosion there)
if flooded_nodes is not None:
self.alpha[flooded_nodes] = 0.
reversed_flow = node_z < node_z[flow_receivers]
# this check necessary if flow has been routed across
# depressions
self.alpha[reversed_flow] = 0.
self.alpha_divided[defined_flow_receivers] = (
self.alpha[defined_flow_receivers] /
flow_link_lengths**(self._n - 1.))
threshdt = self.sp_crit * dt
if type(threshdt) is float:
erode_with_link_alpha_fixthresh(
upstream_order_IDs, flow_receivers, threshdt,
self.alpha_divided, self._n, node_z)
else:
erode_with_link_alpha_varthresh(
upstream_order_IDs, flow_receivers, threshdt,
self.alpha_divided, self._n, node_z)
else: # just use_Q
if self._Q is None:
try:
Q_direct = grid.at_node[Q_if_used]
except TypeError:
assert type(Q_if_used) in (np.ndarray, list)
Q_direct = Q_if_used
else:
Q_direct = self._Q
self.alpha[defined_flow_receivers] = (
_K_unit_time[defined_flow_receivers]*dt *
Q_direct[defined_flow_receivers]**self._m /
flow_link_lengths)
# Handle flooded nodes, if any (no erosion there)
if flooded_nodes is not None:
self.alpha[flooded_nodes] = 0.
reversed_flow = node_z < node_z[flow_receivers]
# this check necessary if flow has been routed across
# depressions
self.alpha[reversed_flow] = 0.
self.alpha_divided[defined_flow_receivers] = (
self.alpha[defined_flow_receivers] /
flow_link_lengths**(self._n - 1.))
threshdt = self.sp_crit * dt
if type(threshdt) is float:
erode_with_link_alpha_fixthresh(
upstream_order_IDs, flow_receivers, threshdt,
self.alpha_divided, self._n, node_z)
else:
erode_with_link_alpha_varthresh(
upstream_order_IDs, flow_receivers, threshdt,
self.alpha_divided, self._n, node_z)
# # Note that we save "stream_power_erosion" incorporating both K and a.
# # Most definitions would need this value /K then **(1/a) to give actual
# # stream power (unit, total, whatever), and it does not yet include the
# # threshold
# self.stream_power_erosion[active_nodes] = stream_power_active_nodes
# grid.at_node['stream_power_erosion'][:] = self.stream_power_erosion
# erosion_increment = (self.stream_power_erosion - self.sp_crit).clip(0.)
#
# # this prevents any node from incising below any node downstream of it
# # we have to go in upstream order in case our rate is so big we impinge
# # on baselevels > 1 node away
#
# elev_dstr = node_z[flow_receiver]
# # ^we substract erosion_increment[flow_receiver] in the loop, as it
# # can update
#
# method = 'cython'
# if method == 'cython':
# from .cfuncs import erode_avoiding_pits
#
# erode_avoiding_pits(node_order_upstream, flow_receiver, node_z,
# erosion_increment)
# else:
# for i in node_order_upstream:
# elev_this_node_before = node_z[i]
# elev_this_node_after = (elev_this_node_before -
# erosion_increment[i])
# elev_dstr_node_after = (elev_dstr[i] -
# erosion_increment[flow_receiver[i]])
# if elev_this_node_after < elev_dstr_node_after:
# erosion_increment[i] = (elev_this_node_before -
# elev_dstr_node_after)*0.999999
# # ^we add a tiny elevation excess to prevent the module from
# # ever totally severing its own flow paths
# # clip the erosion increments one more time to remove regatives
# # introduced by any pit filling algorithms or the above procedure:
# node_z -= erosion_increment.clip(0.)
#
# self._grid = grid
return grid, node_z, self.stream_power_erosion
def run_one_step(self, dt, flooded_nodes=None, **kwds):
"""
A simple, explicit implementation of a stream power algorithm.
This component now looks exclusively for the field
'topographic__steepest_slope' at each node to determine the local
slope (previoiusly it was possible to map values from links explicitly
within the component, but this functionality is now deprecated).
If you are routing across flooded depressions in your flow routing
scheme, be sure to set *flooded_nodes* with a boolean array or array
of IDs to ensure erosion cannot occur in the lake. Erosion
is always zero if the gradient is adverse, but can still procede as
usual on the entry into the depression unless *flooded_nodes* is set.
Parameters
----------
dt : float
Time-step size
flooded_nodes : ndarray of int (optional)
IDs of nodes that are flooded and should have no erosion. If not
provided but flow has still been routed across depressions, erosion
may still occur beneath the apparent water level (though will
always still be positive).
"""
self.erode(grid=self._grid, dt=dt, flooded_nodes=flooded_nodes)
|
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
iteritems
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', u'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(u'\\begin{' + self.envname + u'}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while len(text) > 0:
a, sep1, text = text.partition(self.left)
if len(sep1) > 0:
b, sep2, text = text.partition(self.right)
if len(sep2) > 0:
value += escape_tex(a, self.commandprefix) + b
else:
value += escape_tex(a + sep1 + b, self.commandprefix)
else:
value = value + escape_tex(a, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
elif ttype not in Token.Escape:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{' + self.envname + u'}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'utf8',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
r"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buf = ''
idx = 0
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
def get_tokens_aux(self, index, text):
while text:
a, sep1, text = text.partition(self.left)
if a:
for i, t, v in self.lang.get_tokens_unprocessed(a):
yield index + i, t, v
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
|
|
"""
Form Widget classes specific to the Django admin site.
"""
import copy
from django import forms
from django.db.models.deletion import CASCADE
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils.encoding import force_text
from django.utils.html import smart_urlquote
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import gettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super().__init__(attrs, choices)
def get_context(self, name, value, attrs=None):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['class'] = 'selectfilter'
if self.is_stacked:
context['widget']['attrs']['class'] += 'stacked'
context['widget']['attrs']['data-field-name'] = self.verbose_name
context['widget']['attrs']['data-is-stacked'] = int(self.is_stacked)
return context
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super().__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super().__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
template_name = 'admin/widgets/split_datetime.html'
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['date_label'] = _('Date:')
context['time_label'] = _('Time:')
return context
class AdminRadioSelect(forms.RadioSelect):
template_name = 'admin/widgets/radio.html'
class AdminFileWidget(forms.ClearableFileInput):
template_name = 'admin/widgets/clearable_file_input.html'
def url_params_from_lookup_dict(lookups):
"""
Convert the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
v = ('0', '1')[v]
else:
v = str(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
template_name = 'admin/widgets/foreign_key_raw_id.html'
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super().__init__(attrs)
def get_context(self, name, value, attrs=None):
context = super().get_context(name, value, attrs)
rel_to = self.rel.model
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
related_url += '?' + '&'.join(
'%s=%s' % (k, v) for k, v in params.items(),
)
context['related_url'] = mark_safe(related_url)
context['link_title'] = _('Lookup')
# The JavaScript code looks for this class.
context['widget']['attrs'].setdefault('class', 'vForeignKeyRawIdAdminField')
if context['widget']['value']:
context['link_label'], context['link_url'] = self.label_and_url_for_value(value)
return context
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_and_url_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
except (ValueError, self.rel.model.DoesNotExist):
return '', ''
try:
url = reverse(
'%s:%s_%s_change' % (
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,)
)
except NoReverseMatch:
url = '' # Admin not registered for target model.
return Truncator(obj).words(14, truncate='...'), url
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
template_name = 'admin/widgets/many_to_many_raw_id.html'
def get_context(self, name, value, attrs=None):
context = super().get_context(name, value, attrs)
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField'
return context
def url_parameters(self):
return self.base_url_parameters()
def label_and_url_for_value(self, value):
return '', ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def format_value(self, value):
return ','.join(force_text(v) for v in value) if value else ''
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template_name = 'admin/widgets/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def get_context(self, name, value, attrs=None):
with self.widget.override_choices(self.choices):
context = self.widget.get_context(name, value, attrs)
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context['url_params'] = url_params
context['model'] = rel_opts.verbose_name
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return context
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super().__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super().__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super().__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
template_name = 'admin/widgets/url.html'
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super().__init__(attrs=final_attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['current_label'] = _('Currently:')
context['change_label'] = _('Change:')
context['widget']['href'] = smart_urlquote(context['widget']['value'])
return context
def format_value(self, value):
value = super().format_value(value)
return force_text(value)
class AdminIntegerFieldWidget(forms.NumberInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super().__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
|
|
import ssl
import tempfile
import pytest
from astropy.utils.data import get_pkg_data_filename
from astropy.samp.hub import SAMPHubServer
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.errors import SAMPProxyError
# By default, tests should not use the internet.
from astropy.samp import conf
from .test_helpers import random_params, Receiver, assert_output, TEST_REPLY
def setup_module(module):
conf.use_internet = False
class TestStandardProfile:
@property
def hub_init_kwargs(self):
return {}
@property
def client_init_kwargs(self):
return {}
@property
def client_connect_kwargs(self):
return {}
def setup_method(self, method):
self.tmpdir = tempfile.mkdtemp()
self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1,
**self.hub_init_kwargs)
self.hub.start()
self.client1 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
self.client2 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
def teardown_method(self, method):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
def test_main(self):
self.client1_id = self.client1.get_public_id()
self.client2_id = self.client2.get_public_id()
self.metadata1 = {"samp.name": "Client 1",
"samp.description.text": "Client 1 Description",
"client.version": "1.1"}
self.metadata2 = {"samp.name": "Client 2",
"samp.description.text": "Client 2 Description",
"client.version": "1.2"}
# Check that the clients are connected
assert self.client1.is_connected
assert self.client2.is_connected
# Check that ping works
self.client1.ping()
self.client2.ping()
# Check that get_registered_clients works as expected.
assert self.client1_id not in self.client1.get_registered_clients()
assert self.client2_id in self.client1.get_registered_clients()
assert self.client1_id in self.client2.get_registered_clients()
assert self.client2_id not in self.client2.get_registered_clients()
# Check that get_metadata works as expected
assert self.client1.get_metadata(self.client1_id) == {}
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client1_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client1.declare_metadata(self.metadata1)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client2.declare_metadata(self.metadata2)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == self.metadata2
assert self.client2.get_metadata(self.client2_id) == self.metadata2
# Check that, without subscriptions, sending a notification from one
# client to another raises an error.
message = {}
message['samp.mtype'] = "table.load.votable"
message['samp.params'] = {}
with pytest.raises(SAMPProxyError):
self.client1.notify(self.client2_id, message)
# Check that there are no currently active subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
# We now test notifications and calls
rec1 = Receiver(self.client1)
rec2 = Receiver(self.client2)
self.client2.bind_receive_notification('table.load.votable',
rec2.receive_notification)
self.client2.bind_receive_call('table.load.votable',
rec2.receive_call)
self.client1.bind_receive_response('test-tag', rec1.receive_response)
# Check resulting subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {self.client2_id: {}}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
assert 'table.load.votable' in self.client1.get_subscriptions(self.client2_id)
assert 'table.load.votable' in self.client2.get_subscriptions(self.client2_id)
# Once we have finished with the calls and notifications, we will
# check the data got across correctly.
# Test notify
params = random_params(self.tmpdir)
self.client1.notify(self.client2.get_public_id(),
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify(self.client2.get_public_id(),
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test notify_all
params = random_params(self.tmpdir)
self.client1.notify_all({'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify_all("table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call
params = random_params(self.tmpdir)
self.client1.call(self.client2.get_public_id(), 'test-tag',
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall(self.client2.get_public_id(), 'test-tag',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_all
params = random_params(self.tmpdir)
self.client1.call_all('tag1',
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall_all('tag2',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_and_wait
params = random_params(self.tmpdir)
result = self.client1.call_and_wait(self.client2.get_public_id(),
{'samp.mtype': 'table.load.votable',
'samp.params': params}, timeout=5)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
result = self.client1.ecall_and_wait(self.client2.get_public_id(),
"table.load.votable", timeout=5, **params)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# TODO: check that receive_response received the right data
|
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# ==============================================================================
# Frank Matranga's Third-party Regis High School Moodle Scraper
# ==============================================================================
import getopt
import json
import os
import sys
import traceback
from time import sleep
import requests
from lxml import html
from pymongo import MongoClient
SKIP_LOGINS_AND_CONNECTION = False
PATH = "./secrets.json"
DB_URL = "localhost:27017"
DB_NAME = "regis"
SCRAPE_TYPE = None
START_AT = None
END_AT = None
def usage():
"""
Prints the usage for the command line.
"""
print "usage: trms [--help] [-p <json_path>] [-u <db_url>] [-n <db_name>] [-t <scrape_type>] [-s <start_mID>] [-e <end_mID>]"
# ----------- CLI ARGUMENTS -----------
if len(sys.argv) > 14:
print('Too many arguments.')
usage()
sys.exit(2)
try:
opts, args = getopt.getopt(sys.argv[1:], 'p:u:n:t:s:e:h',
['path=', 'dburl=', 'dbname=', 'scrapetype=', 'startmid=', 'endmid=', 'help'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(2)
elif opt in ('-p', '--path'):
PATH = arg
elif opt in ('-u', '--dburl'):
DB_URL = arg
elif opt in ('-n', '--dbname'):
DB_NAME = arg
elif opt in ('-t', '--scrapetype'):
if arg in ['course', 'person']:
SCRAPE_TYPE = arg
else:
print "Invalid scrape type. Try 'person' or 'course'."
sys.exit()
elif opt in ('-s', '--startmid'):
try:
START_AT = int(arg)
except ValueError:
print "Please user integers for -s and -e."
sys.exit(2)
elif opt in ('-e', '--endmid'):
try:
END_AT = int(arg)
except ValueError:
print "Please user integers for -s and -e."
sys.exit(2)
else:
usage()
sys.exit(2)
if None == END_AT and None == START_AT:
START_AT = 1
if SCRAPE_TYPE == "course":
END_AT = 600
else:
END_AT = 2500
if None == START_AT:
START_AT = 1
if None == END_AT:
END_AT = START_AT
if START_AT > END_AT:
print "Starting Moodle ID (-s) must be less than ending Moodle ID (-e)."
sys.exit(2)
if None == SCRAPE_TYPE:
print "No scrape type (-t) specified."
sys.exit(2)
# ----------------------------------------
class TRMS:
def __init__(self, path, db_url, db_name, scrape_type, start_mid, end_mid):
self.path = path
self.db_url = db_url
self.db_name = db_name
self.scrape_type = scrape_type
self.start_mid = start_mid
self.end_mid = end_mid
# MongoDB
self.client = None
self.db = None
self.secrets = None # Intranet username/password from JSON file
self.session = None # Requests session for persistent login
self.running = True
# print self.path, self.db_url, self.db_name
print " --- Initializing TRMS Alpha 1 --- "
if not SKIP_LOGINS_AND_CONNECTION:
self.get_credentials()
self.login()
self.connect()
print ""
self.run()
def get_credentials(self):
"""
Validates passed path to JSON file and then tries to parse it for username/password
"""
if os.path.isdir(self.path): # Is it a directory?
if self.path[-1] != "/": # If a dir, add a ending / if it doesn't already.
self.path += "/"
self.path += "secrets.json"
if not os.path.exists(self.path): # Does the file not exist?
print "'" + self.path + "' does not exist."
self.quit()
# Try to open the file and parse it for JSON
try:
self.secrets = json.loads(open(self.path).read())
except (ValueError, IOError):
print "'" + self.path + "' is not a valid JSON file."
self.quit()
# Make sure it contains the two needed keys
try:
self.secrets['regis_username']
self.secrets['regis_password']
except KeyError:
print "Missing required credentials in JSON file."
self.quit()
print "Using found credentials for " + self.secrets['regis_username'] + "."
def login(self):
"""
Attempts to login to Moodle and then the Intranet with the passed credentials
and keep a persistent session for later.
"""
creds = {'username': self.secrets['regis_username'], 'password': self.secrets['regis_password']}
url = "https://moodle.regis.org/login/index.php"
session = requests.Session()
r = session.post(url, data=creds)
parsed_body = html.fromstring(r.text)
title = parsed_body.xpath('//title/text()')[0]
# Check whether login was successful or not
if not "Dashboard" in title:
print "Failed to login to Moodle, check your credentials in '" + self.path + "'."
self.quit()
print "Successfully logged into Moodle."
url = "https://intranet.regis.org/login/submit.cfm"
values = creds
r = session.post(url, data=values)
parsed_body = html.fromstring(r.text)
# When logged in to the Intranet the page title is 'Regis Intranet' so we can use this to
# check for a successful login.
try:
title = parsed_body.xpath('//title/text()')[0]
if not "Intranet" in title:
print "Failed to login to the Intranet, check your credentials in '" + self.path + "'."
self.quit()
except Exception:
print "Failed to login to the Intranet, check your credentials in '" + self.path + "'."
self.quit()
print "Successfully logged in to the Intranet."
self.session = session # Store this in a persistent session so the logins are saved
def connect(self):
"""
Attempts to connect to MongoDB using the URI (or URL?) passed, and attempts to authenicate if possible.
"""
uri = "mongodb://" + self.db_url
try:
self.client = MongoClient(uri)
self.db = self.client[self.db_name]
try:
self.db.authenticate('ontrac', 'ontrac') # TODO: add support for this in JSON file
except Exception:
pass
self.db.students.count()
except Exception as e:
print "Failed to connect to '" + uri + "'"
self.quit()
sleep(1) # nasty hack to make it seems like something actually happens since the connection is so fast
print "Successfully connected to Database."
def run(self):
try:
print "[ scrape", self.scrape_type, "with Moodle ID's", self.start_mid, "to", self.end_mid, "]"
for mid in range(self.start_mid, self.end_mid + 1):
self.extract(mid)
self.quit()
except Exception as e:
print e
traceback.print_exc()
self.quit()
def extract(self, mid):
base_url = "http://moodle.regis.org/user/profile.php?id="
if self.scrape_type == "course":
base_url = "http://moodle.regis.org/course/view.php?id="
if mid == 1: # Moodle Course 1 is your homepage
return
# Get the page
r = self.session.get(base_url + str(mid)+"&showallcourses=1") # The url is created by appending the current ID to the base url
# Parse the html returned so we can find the title
parsed_body = html.fromstring(r.text)
# Get the page title
title = parsed_body.xpath('//title/text()')
# Check if page is useful
# --- POSSIBLE TITLES ---
# Matranga, Frank: Public profile
# Course: Computer Technology I: Croce
# Course: Advisement 2B-1: Bonagura
# Notice
# Error
# (Empty)
if len(title) == 0: # Check if page title exists
self.remove(mid, [])
print "Bad title"
return
title = parsed_body.xpath('//title/text()')[0]
parts = title.split(": ")
# Make sure its a valid page on some item
if "Test" in title:
self.remove(mid, parts)
print "Skipped test entry"
return
if ("Error" in title.strip()) or ("Notice" in title.strip()):
self.remove(mid, parts)
print "Error or Notice skipped"
return
try:
name = parts[1]
except:
name = parts[0]
# traceback.print_exc()
# print mid, title
if self.scrape_type == "course":
if "Advisement " in name:
self.extract_advisement(parsed_body, parts, mid)
else:
self.extract_course(parsed_body, parts, mid)
else:
self.extract_person(parsed_body, parts, mid)
def extract_person(self, body, parts, mid):
out = {}
name_parts = body.xpath('//title/text()')[0].split(":")[0].split(" ")[::-1] if \
len(body.xpath('//title/text()')) > 0 else ['Unknown']
#department = body.xpath('//dl/dd[1]/text()')
# Advisement (for students) or Department (for staff)
department = body.xpath('//dd[../dt = "Department"]/text()')
if len(department) == 0:
return
else:
department = department[0]
# Class list on profile with links to each
class_as = body.xpath('//dd[../dt = "Courses"]//a')
classes = []
for a in class_as:
#print a.get("href")
classes.append(int(a.get("href").split("?id=")[1].replace("&showallcourses=1", "")))
#print "CLASSES: ", classes
# Test department to get user type
f = department[0]
try:
int(f) # This would work if it was a student since department would be like '1C-2' so f would be '1'
userType = "student"
except ValueError:
userType = "teacher"
if department.startswith("REACH"):
userType = "other"
# Place holder profile image
picsrc = "/images/person-placeholder.jpg"
# Get Intranet profile picture
for img in body.xpath('//img[@class="userpicture"]'):
picsrc = img.get("src")
collect = self.db.courses
courses = [] # This will store the _id's of a user's courses
intranet = self.session.get(
"http://intranet.regis.org/infocenter/default.cfm?FuseAction=basicsearch&searchtype=namewildcard&criteria=" +
name_parts[0] + "%2C+" + name_parts[1] + "&[whatever]=Search")
intrabody = html.fromstring(intranet.text)
schedule = "<h2>Not Found</h2>"
if userType == "student":
style = "float:left;width:200px;"
else:
style = "float:left; width:200px;"
# Get student Advisement
if userType == "student":
if classes:
adv = self.db.advisements.find_one({"mID": classes[0]})
if adv:
department = adv['title']
# Get the list of search results (hopefully just one)
search_results = intrabody.xpath('//div[@style="' + style + '"]/span[1]/text()')
# print search_results
if len(search_results) == 0:
print str(mid) + ": Found on Moodle but not the Intranet. Disregarding."
return # should people found only on Moodle be included?
# print "Intranet found "+str(len(search_results))+" search results for", name_parts
# Go through Intrant search results for user until the correct one is found
for result in search_results:
index = search_results.index(result)
name_p = result.encode('utf-8').split(", ")
intranet_dep = name_p[1].split(" ")[1].replace("(", "").replace(")", "")
first_name = name_p[1].split(" ")[0]
last_name = name_p[0]
# print "Attempting to match "+ str([name_parts[0], name_parts[1], department])+" with " + str([last_name, first_name, intranet_dep])
if [last_name, first_name] == name_parts:
if userType == "student":
if intranet_dep == department:
# print "Yup!"
break
else:
# print "Yup!"
break
# print "Nope"
# Get user email from correct search result
email = intrabody.xpath('//div[@style="' + style + '"]/span[2]/a/text()')[index]
# print email
# Get username from email
username = str(email).replace("@regis.org", "").lower()
pic_elm = intrabody.xpath('//div[@style="' + style + '"]/a')[index]
# Get user Student ID# from profile picture
code = pic_elm.get("href").split("/")[-1].replace(".jpg", "")
# Student schedule HTML table and teacher HTML table uses very slightly differing style attributes
if userType == "student":
style = "float:left;width:200px;"
scheduleurl = "http://intranet.regis.org/infocenter?StudentCode=" + code
else:
style = "float:left; width:200px;"
scheduleurl = "http://intranet.regis.org/infocenter/default.cfm?StaffCode=" + code
# THIS CAN BREAK AT ANY TIME ^^^
# Request url for user's profile to get the schedule
htmlschedulereq = self.session.get(scheduleurl)
htmls = html.fromstring(htmlschedulereq.text)
if len(htmls.xpath('//div[@id="main"]/table[4]')) > 0:
schedule = html.tostring(htmls.xpath('//div[@id="main"]/table[4]')[0])
if userType == "student":
out = {
"mID": mid,
"firstName": name_parts[1],
"lastName": name_parts[0],
"username": username,
"code": code,
"mpicture": picsrc,
"ipicture": pic_elm.get("href"),
"schedule": schedule,
"email": username + "@regis.org",
"advisement": department,
"sclasses": classes,
}
# Check if this student already exists
existing = self.db.students.find_one({'mID': mid})
newID = None
if existing is not None:
if self.db.students.update_one({'mID': mid}, {'$set': out}).modified_count > 0:
print "Updated student."
newID = existing['_id']
else:
newID = self.db.students.insert_one(out).inserted_id
print "Added new student."
print str(
mid) + ": Student " + username + " in Advisement " + department + " with Student ID " + code + " in " + str(
len(classes)) + " courses"
if classes:
total = len(classes) # Total number of courses
matched = [] # List of mID's of courses Successfully matched
# Add student's _id to his advisement
if newID not in self.db.advisements.find_one({"mID": classes[0]})['students']:
self.db.advisements.update_one({"mID": classes[0]}, {"$push": {"students": newID}})
matched.append(classes[0]) # Advisement
for c in classes: # C IS A MOODLE ID FOR A COURSE
course = collect.find_one({"mID": c})
if course:
#print "FOUND "+course['title']
cID = course['_id']
courses.append(cID)
matched.append(c)
# Add student to advisement (IF HE IS NOT ALREADY)
if newID not in self.db.courses.find_one({"mID": c})['students']:
collect.update_one({"mID": c}, {"$push": {"students": newID}})
for mID in classes:
if mID not in matched:
print "FAILED TO MATCH COURSE " +str(mID)
# print courses
self.db.students.update_one({"_id": newID}, {"$set": {"courses": courses}})
else:
print str(
mid) + ": Staff Member " + username + " of the " + department + " Department with Staff ID " + code + " in " + str(
len(classes)) + " courses"
out = {
"mID": mid,
"userType": userType,
"image": picsrc,
"code": code,
"ipicture": pic_elm.get("href"),
"department": department,
"firstName": name_parts[1],
"lastName": name_parts[0],
"schedule": schedule,
"username": username,
"email": email,
"sclasses": classes,
"courses": courses
}
existing = self.db.teachers.find_one({"mID": mid})
newID = None
if existing is not None:
newID = existing['_id']
self.db.teachers.update_one({"mID": mid}, {"$set": out})
else:
out['courses'] = []
newID = self.db.teachers.insert_one(out).inserted_id
# print "Teacher " + str(mid) + ": " + str(newID)
for c in classes:
print c
course = collect.find_one({"mID": c})
if course:
print "FOUND"
if name_parts[0] in course["full"]:
collect.update_one({'mID': c}, {'$set': {'teacher': newID}})
if course['_id'] not in self.db.teachers.find_one({"_id": newID})['courses']:
self.db.teachers.update_one({"_id": newID}, {"$push": {"courses": course['_id']}})
adv = self.db.advisements.find_one({"mID": c})
if adv:
self.db.advisements.update_one({
'mID': c
}, {
'$set': {
'teacher': newID
}
})
# print out
def extract_advisement(self, body, parts, mid):
name = parts[1]
out = {
"mID": mid,
"title": name.replace("Advisement ", "")
}
existing = self.db.advisements.find_one({"mID": mid})
newID = None
if existing:
newID = existing['_id']
self.db.advisements.update_one({"mID": mid}, {"$set": out})
else:
out['students'] = []
newID = self.db.advisements.insert_one(out).inserted_id
print str(mid) + ": Advisement " + out['title'] + " " + str(newID)
def extract_course(self, body, parts, mid):
# print "REACHED HERE"
try:
name = parts[1]
except IndexError:
name = parts[0]
ps = name.split(" ")
teacher = parts[2] if len(parts) > 2 else "no"
grade = 13
for pa in ps:
for index, g in enumerate(["I", "II", "III", "IV"]):
if g == pa:
grade = 9 + index
try:
grade = int(pa)
except ValueError:
pass
courseType = "class"
if "Club" in name or "Society" in name:
courseType = "club"
if "REACH" in name or "Reach" in name:
courseType = "reach"
out = {
"mID": mid,
"full": ": ".join(parts),
"courseType": courseType,
"title": name,
"grade": grade
}
existing = self.db.courses.find_one({"mID": mid})
newID = None
if existing:
newID = existing['_id']
self.db.courses.update_one({"mID": mid}, {"$set": out})
else:
out['students'] = []
newID = self.db.courses.insert_one(out).inserted_id
print str(mid) + ": Course " + str(newID)
def remove(self, mid, parts):
try:
if self.scrape_type == "course":
self.db.courses.delete_one({'mID': mid})
self.db.advisements.delete_one({'mID': mid})
else:
self.db.students.delete_one({'mID': mid})
self.db.teachers.delete_one({'mID': mid})
except:
pass
def quit(self):
if self.client is not None:
self.client.close()
sys.exit(0)
def main():
TRMS(PATH, DB_URL, DB_NAME, SCRAPE_TYPE, START_AT, END_AT)
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.volume.drivers.emc import emc_vmax_fast
from cinder.volume.drivers.emc import emc_vmax_provision
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
INITIATORGROUPTYPE = 2
ISCSI = 'iscsi'
FC = 'fc'
EMC_ROOT = 'root/emc'
class EMCVMAXMasking(object):
"""Masking class for SMI-S based EMC volume drivers.
Masking code to dynamically create a masking view
This masking class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
self.fast = emc_vmax_fast.EMCVMAXFast(prtcl)
self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl)
def get_or_create_masking_view_and_map_lun(self, conn, maskingViewDict):
"""Get or Create a masking view.
Given a masking view tuple either get or create a masking view and add
the volume to the associated storage group
:param conn: the connection to ecom
:para maskingViewDict: the masking view tuple
:returns: dict rollbackDict
"""
rollbackDict = {}
controllerConfigService = maskingViewDict['controllerConfigService']
sgGroupName = maskingViewDict['sgGroupName']
volumeInstance = maskingViewDict['volumeInstance']
igGroupName = maskingViewDict['igGroupName']
connector = maskingViewDict['connector']
storageSystemName = maskingViewDict['storageSystemName']
maskingViewName = maskingViewDict['maskingViewName']
volumeName = maskingViewDict['volumeName']
pgGroupName = maskingViewDict['pgGroupName']
fastPolicyName = maskingViewDict['fastPolicy']
defaultStorageGroupInstanceName = None
# we need a rollback scenario for FAST.
# We must make sure that volume is returned to default storage
# group if anything goes wrong
if fastPolicyName is not None:
defaultStorageGroupInstanceName = (
self.fast.get_and_verify_default_storage_group(
conn, controllerConfigService, volumeInstance.path,
volumeName, fastPolicyName))
if defaultStorageGroupInstanceName is None:
exceptionMessage = (_(
"Cannot get the default storage group for FAST policy: "
"%(fastPolicyName)s. ")
% {'fastPolicyName': fastPolicyName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
retStorageGroupInstanceName = (
self.remove_device_from_default_storage_group(
conn, controllerConfigService, volumeInstance.path,
volumeName, fastPolicyName))
if retStorageGroupInstanceName is None:
exceptionMessage = (_(
"Failed to remove volume %(volumeName)s from default SG: "
"%(volumeName)s. ")
% {'volumeName': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
try:
maskingViewInstanceName = self._find_masking_view(
conn, maskingViewName, storageSystemName)
if maskingViewInstanceName is None:
storageGroupInstanceName = (
self._get_storage_group_instance_name(
conn, controllerConfigService, volumeInstance,
volumeName, sgGroupName, fastPolicyName,
storageSystemName, defaultStorageGroupInstanceName))
if storageGroupInstanceName is None:
exceptionMessage = (_(
"Cannot get or create a storage group: %(sgGroupName)s"
" for volume %(volumeName)s ")
% {'sgGroupName': sgGroupName,
'volumeName': volumeName})
LOG.error(exceptionMessage)
raise
portGroupInstanceName = self._get_port_group_instance_name(
conn, controllerConfigService, pgGroupName)
if portGroupInstanceName is None:
exceptionMessage = (_(
"Cannot get port group: %(pgGroupName)s. ")
% {'pgGroupName': pgGroupName})
LOG.error(exceptionMessage)
raise
initiatorGroupInstanceName = (
self._get_initiator_group_instance_name(
conn, controllerConfigService, igGroupName, connector,
storageSystemName))
if initiatorGroupInstanceName is None:
exceptionMessage = (_(
"Cannot get or create initiator group: "
"%(igGroupName)s. ")
% {'igGroupName': igGroupName})
LOG.error(exceptionMessage)
raise
maskingViewInstanceName = (
self._get_masking_view_instance_name(
conn, controllerConfigService, maskingViewName,
storageGroupInstanceName, portGroupInstanceName,
initiatorGroupInstanceName))
if maskingViewInstanceName is None:
exceptionMessage = (_(
"Cannot create masking view: %(maskingViewName)s. ")
% {'maskingViewName': maskingViewName})
LOG.error(exceptionMessage)
raise
else:
# first verify that the initiator group matches the initiators
if not self._verify_initiator_group_from_masking_view(
conn, controllerConfigService, maskingViewName,
connector, storageSystemName, igGroupName):
exceptionMessage = (_(
"Unable to verify initiator group: %(igGroupName)s"
"in masking view %(maskingViewName)s ")
% {'igGroupName': igGroupName,
'maskingViewName': maskingViewName})
LOG.error(exceptionMessage)
raise
# get the storage from the masking view and add the
# volume to it.
storageGroupInstanceName = (
self._get_storage_group_from_masking_view(
conn, maskingViewName, storageSystemName))
if storageGroupInstanceName is None:
exceptionMessage = (_(
"Cannot get storage group from masking view: "
"%(maskingViewName)s. ")
% {'maskingViewName': maskingViewName})
LOG.error(exceptionMessage)
raise
if self._is_volume_in_storage_group(
conn, storageGroupInstanceName,
volumeInstance):
LOG.warn(_(
"Volume: %(volumeName)s is already part "
"of storage group %(sgGroupName)s ")
% {'volumeName': volumeName,
'sgGroupName': sgGroupName})
else:
self.add_volume_to_storage_group(
conn, controllerConfigService,
storageGroupInstanceName, volumeInstance, volumeName,
sgGroupName, fastPolicyName, storageSystemName)
except Exception as e:
# rollback code if we cannot complete any of the steps above
# successfully then we must roll back by adding the volume back to
# the default storage group for that fast policy
if (fastPolicyName is not None and
defaultStorageGroupInstanceName is not None):
# if the exception happened before the volume was removed from
# the default storage group no action
self._check_if_rollback_action_for_masking_required(
conn, controllerConfigService, volumeInstance, volumeName,
fastPolicyName, defaultStorageGroupInstanceName)
LOG.error(_("Exception: %s") % six.text_type(e))
errorMessage = (_(
"Failed to get or create masking view %(maskingViewName)s ")
% {'maskingViewName': maskingViewName})
LOG.error(errorMessage)
exception.VolumeBackendAPIException(data=errorMessage)
rollbackDict['controllerConfigService'] = controllerConfigService
rollbackDict['defaultStorageGroupInstanceName'] = (
defaultStorageGroupInstanceName)
rollbackDict['volumeInstance'] = volumeInstance
rollbackDict['volumeName'] = volumeName
rollbackDict['fastPolicyName'] = fastPolicyName
return rollbackDict
def _is_volume_in_storage_group(
self, conn, storageGroupInstanceName, volumeInstance):
"""Check if the volume is already part of the storage group.
Check if the volume is already part of the storage group,
if it is no need to re-add it.
:param conn: the connection to ecom
:param storageGroupInstanceName: the storage group instance name
:param volumeInstance: the volume instance
:returns: boolean True/False
"""
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
conn, volumeInstance.path))
storageGroupInstance = conn.GetInstance(
storageGroupInstanceName, LocalOnly=False)
LOG.debug(
"The existing storage group instance element name is: "
"%(existingElement)s. "
% {'existingElement': storageGroupInstance['ElementName']})
if foundStorageGroupInstanceName is not None:
foundStorageGroupInstance = conn.GetInstance(
foundStorageGroupInstanceName, LocalOnly=False)
LOG.debug(
"The found storage group instance element name is: "
"%(foundElement)s. "
% {'foundElement': foundStorageGroupInstance['ElementName']})
if (foundStorageGroupInstance['ElementName'] == (
storageGroupInstance['ElementName'])):
LOG.warn(_(
"The volume is already part of storage group: "
"%(storageGroupInstanceName)s. ")
% {'storageGroupInstanceName': storageGroupInstanceName})
return True
return False
def _find_masking_view(self, conn, maskingViewName, storageSystemName):
"""Given the masking view name get the masking view instance.
:param conn: connection to the ecom server
:param maskingViewName: the masking view name
:param storageSystemName: the storage system name(String)
:returns: foundMaskingViewInstanceName masking view instance name
"""
foundMaskingViewInstanceName = None
maskingViewInstanceNames = conn.EnumerateInstanceNames(
'EMC_LunMaskingSCSIProtocolController')
for maskingViewInstanceName in maskingViewInstanceNames:
if storageSystemName == maskingViewInstanceName['SystemName']:
instance = conn.GetInstance(
maskingViewInstanceName, LocalOnly=False)
if maskingViewName == instance['ElementName']:
foundMaskingViewInstanceName = maskingViewInstanceName
break
if foundMaskingViewInstanceName is not None:
infoMessage = (_(
"Found existing masking view: %(maskingViewName)s ")
% {'maskingViewName': maskingViewName})
LOG.info(infoMessage)
return foundMaskingViewInstanceName
def _create_storage_group(
self, conn, controllerConfigService, storageGroupName,
volumeInstance, fastPolicyName, volumeName, storageSystemName,
defaultStorageGroupInstanceName):
"""Create a new storage group that doesn't already exist.
If fastPolicyName is not none we attempt to remove it from the
default storage group of that policy and associate to the new storage
group that will be part of the masking view.
Will not handle any exception in this method it will be handled
up the stack
:param conn: connection the ecom server
:param controllerConfigService: the controller configuration service
:param storageGroupName: the proposed group name (String)
:param volumeInstance: useful information on the volume
:param fastPolicyName: the fast policy name (String) can be None
:param volumeName: the volume name (String)
:param storageSystemName: the storage system name (String)
:param defaultStorageGroupInstanceName: the default storage group
instance name (Can be None)
:returns: foundStorageGroupInstanceName the instance Name of the
storage group
"""
failedRet = None
foundStorageGroupInstanceName = (
self.provision.create_and_get_storage_group(
conn, controllerConfigService, storageGroupName,
volumeInstance.path))
if foundStorageGroupInstanceName is None:
LOG.error(_(
"Cannot get storage Group from job : %(storageGroupName)s. ")
% {'storageGroupName': storageGroupName})
return failedRet
else:
LOG.info(_(
"Created new storage group: %(storageGroupName)s ")
% {'storageGroupName': storageGroupName})
if (fastPolicyName is not None and
defaultStorageGroupInstanceName is not None):
assocTierPolicyInstanceName = (
self.fast.add_storage_group_and_verify_tier_policy_assoc(
conn, controllerConfigService,
foundStorageGroupInstanceName,
storageGroupName, fastPolicyName))
if assocTierPolicyInstanceName is None:
LOG.error(_(
"Cannot add and verify tier policy association for storage"
" group : %(storageGroupName)s to FAST policy : "
"%(fastPolicyName)s. ")
% {'storageGroupName': storageGroupName,
'fastPolicyName': fastPolicyName})
return failedRet
return foundStorageGroupInstanceName
def _find_port_group(self, conn, controllerConfigService, portGroupName):
"""Given the port Group name get the port group instance name.
:param conn: connection to the ecom server
:param controllerConfigService: the controller configuration service
:param portGroupName: the name of the port group you are getting
:returns: foundPortGroup storage group instance name
"""
foundPortGroupInstanceName = None
portMaskingGroupInstanceNames = conn.AssociatorNames(
controllerConfigService, resultClass='CIM_TargetMaskingGroup')
for portMaskingGroupInstanceName in portMaskingGroupInstanceNames:
instance = conn.GetInstance(
portMaskingGroupInstanceName, LocalOnly=False)
if portGroupName == instance['ElementName']:
foundPortGroupInstanceName = portMaskingGroupInstanceName
break
if foundPortGroupInstanceName is None:
LOG.error(_(
"Could not find port group : %(portGroupName)s. Check that the"
" EMC configuration file has the correct port group name. ")
% {'portGroupName': portGroupName})
return foundPortGroupInstanceName
def _create_or_get_initiator_group(
self, conn, controllerConfigService, igGroupName,
connector, storageSystemName):
"""Attempt to create a initiatorGroup.
If one already exists with the same Initiator/wwns then get it
Check to see if an initiatorGroup already exists, that matches the
connector information
NOTE: An initiator/wwn can only belong to one initiatorGroup.
If we were to attempt to create one with an initiator/wwn that
is already belong to another initiatorGroup, it would fail
:param conn: connection to the ecom server
:param controllerConfigService: the controller config Servicer
:param igGroupName: the proposed name of the initiator group
:param connector: the connector information to the host
:param storageSystemName: the storage system name (String)
:returns: foundInitiatorGroupInstanceName
"""
failedRet = None
initiatorNames = self._find_initiator_names(conn, connector)
LOG.debug("The initiator name(s) are: %(initiatorNames)s "
% {'initiatorNames': initiatorNames})
foundInitiatorGroupInstanceName = self._find_initiator_masking_group(
conn, controllerConfigService, initiatorNames)
# If you cannot find an initiatorGroup that matches the connector
# info create a new initiatorGroup
if foundInitiatorGroupInstanceName is None:
# check that our connector information matches the
# hardwareId(s) on the symm
storageHardwareIDInstanceNames = (
self._get_storage_hardware_id_instance_names(
conn, initiatorNames, storageSystemName))
if not storageHardwareIDInstanceNames:
LOG.error(_(
"Initiator Name(s) %(initiatorNames)s are not on array "
"%(storageSystemName)s ")
% {'initiatorNames': initiatorNames,
'storageSystemName': storageSystemName})
return failedRet
foundInitiatorGroupInstanceName = self._create_initiator_Group(
conn, controllerConfigService, igGroupName,
storageHardwareIDInstanceNames)
LOG.info("Created new initiator group name: %(igGroupName)s "
% {'igGroupName': igGroupName})
else:
LOG.info("Using existing initiator group name: %(igGroupName)s "
% {'igGroupName': igGroupName})
return foundInitiatorGroupInstanceName
def _find_initiator_names(self, conn, connector):
"""check the connector object for initiators(ISCSI) or wwpns(FC).
:param conn: the connection to the ecom
:param connector: the connector object
:returns list foundinitiatornames list of string initiator names
"""
foundinitiatornames = []
name = 'initiator name'
if (self.protocol.lower() == ISCSI and connector['initiator']):
foundinitiatornames.append(connector['initiator'])
elif (self.protocol.lower() == FC and connector['wwpns']):
for wwn in connector['wwpns']:
foundinitiatornames.append(wwn)
name = 'world wide port names'
if (foundinitiatornames is None or len(foundinitiatornames) == 0):
msg = (_('Error finding %s.') % name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Found %(name)s: %(initiator)s."
% {'name': name,
'initiator': foundinitiatornames})
return foundinitiatornames
def _find_initiator_masking_group(
self, conn, controllerConfigService, initiatorNames):
"""Check to see if an initiatorGroup already exists.
NOTE: An initiator/wwn can only belong to one initiatorGroup.
If we were to attempt to create one with an initiator/wwn that is
already belong to another initiatorGroup, it would fail
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration service
:param initiatorName: the list of initiator names
:returns: foundInitiatorMaskingGroup
"""
foundInitiatorMaskingGroupName = None
initiatorMaskingGroupNames = (
conn.AssociatorNames(controllerConfigService,
ResultClass='CIM_InitiatorMaskingGroup'))
for initiatorMaskingGroupName in initiatorMaskingGroupNames:
initiatorMaskingGroup = conn.GetInstance(
initiatorMaskingGroupName, LocalOnly=False)
associators = (
conn.Associators(initiatorMaskingGroup.path,
ResultClass='EMC_StorageHardwareID'))
for assoc in associators:
# if EMC_StorageHardwareID matches the initiator,
# we found the existing EMC_LunMaskingSCSIProtocolController
# (Storage Group for VNX)
# we can use for masking a new LUN
hardwareid = assoc['StorageID']
for initiator in initiatorNames:
if six.text_type(hardwareid).lower() == \
six.text_type(initiator).lower():
foundInitiatorMaskingGroupName = (
initiatorMaskingGroupName)
break
if foundInitiatorMaskingGroupName is not None:
break
if foundInitiatorMaskingGroupName is not None:
break
return foundInitiatorMaskingGroupName
def _get_storage_hardware_id_instance_names(
self, conn, initiatorNames, storageSystemName):
"""Given a list of initiator names find CIM_StorageHardwareID instance.
:param conn: the connection to the ecom server
:param initiatorName: the list of initiator names
:param storageSystemName: the storage system name
:returns: foundHardwardIDsInstanceNames
"""
foundHardwardIDsInstanceNames = []
hardwareIdManagementService = (
self.utils.find_storage_hardwareid_service(
conn, storageSystemName))
hardwareIdInstanceNames = (
self.utils.get_hardware_id_instance_names_from_array(
conn, hardwareIdManagementService))
for hardwareIdInstanceName in hardwareIdInstanceNames:
hardwareIdInstance = conn.GetInstance(hardwareIdInstanceName)
storageId = hardwareIdInstance['StorageID']
for initiatorName in initiatorNames:
LOG.debug("The storage Id is : %(storageId)s "
% {'storageId': storageId.lower()})
LOG.debug("The initiatorName is : %(initiatorName)s "
% {'initiatorName': initiatorName.lower()})
if storageId.lower() == initiatorName.lower():
foundHardwardIDsInstanceNames.append(
hardwareIdInstanceName)
break
LOG.debug(
"The found hardware IDs are : %(foundHardwardIDsInstanceNames)s "
% {'foundHardwardIDsInstanceNames': foundHardwardIDsInstanceNames})
return foundHardwardIDsInstanceNames
def _get_initiator_group_from_job(self, conn, job):
"""After creating an new initiator group find it and return it
:param conn: the connection to the ecom server
:param job: the create initiator group job
:returns: dict initiatorDict
"""
associators = conn.Associators(
job['Job'],
ResultClass='CIM_InitiatorMaskingGroup')
volpath = associators[0].path
initiatorDict = {}
initiatorDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
initiatorDict['keybindings'] = keys
return initiatorDict
def _create_masking_view(
self, conn, configService, maskingViewName, deviceMaskingGroup,
targetMaskingGroup, initiatorMaskingGroup):
"""After creating an new initiator group find it and return it.
:param conn: the connection to the ecom server
:param configService: the create initiator group job
:param maskingViewName: the masking view name string
:param deviceMaskingGroup: device(storage) masking group (instanceName)
:param targetMaskingGroup: target(port) masking group (instanceName)
:param initiatorMaskingGroup: initiator masking group (instanceName)
:returns: int rc return code
:returns: dict job
"""
rc, job = conn.InvokeMethod(
'CreateMaskingView', configService, ElementName=maskingViewName,
InitiatorMaskingGroup=initiatorMaskingGroup,
DeviceMaskingGroup=deviceMaskingGroup,
TargetMaskingGroup=targetMaskingGroup)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Create Masking View: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'groupName': maskingViewName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.info(_("Created new masking view : %(maskingViewName)s ")
% {'maskingViewName': maskingViewName})
return rc, job
def find_new_masking_view(self, conn, jobDict):
"""Find the newly created volume
:param conn: the connection to the ecom server
:param jobDict: the job tuple
:returns: instance maskingViewInstance
"""
associators = conn.Associators(
jobDict['Job'],
ResultClass='Symm_LunMaskingView')
mvpath = associators[0].path
maskingViewInstance = {}
maskingViewInstance['classname'] = mvpath.classname
keys = {}
keys['CreationClassName'] = mvpath['CreationClassName']
keys['SystemName'] = mvpath['SystemName']
keys['DeviceID'] = mvpath['DeviceID']
keys['SystemCreationClassName'] = mvpath['SystemCreationClassName']
maskingViewInstance['keybindings'] = keys
return maskingViewInstance
def _get_storage_group_from_masking_view(
self, conn, maskingViewName, storageSystemName):
"""Gets the Device Masking Group from masking view.
:param conn: the connection to the ecom server
:param maskingViewName: the masking view name (String)
:param storageSystemName: storage system name (String)
:returns: instance name foundStorageGroupInstanceName
"""
foundStorageGroupInstanceName = None
maskingviews = conn.EnumerateInstanceNames(
'EMC_LunMaskingSCSIProtocolController')
for view in maskingviews:
if storageSystemName == view['SystemName']:
instance = conn.GetInstance(view, LocalOnly=False)
if maskingViewName == instance['ElementName']:
foundView = view
break
groups = conn.AssociatorNames(
foundView,
ResultClass='CIM_DeviceMaskingGroup')
if groups[0] > 0:
foundStorageGroupInstanceName = groups[0]
LOG.debug("Masking view: %(view)s DeviceMaskingGroup: %(masking)s."
% {'view': maskingViewName,
'masking': foundStorageGroupInstanceName})
return foundStorageGroupInstanceName
def _get_storage_group_instance_name(
self, conn, controllerConfigService, volumeInstance, volumeName,
sgGroupName, fastPolicyName, storageSystemName,
defaultStorageGroupInstanceName):
"""Gets the storage group instance name.
If fastPolicy name is None
then NON FAST is assumed. If it is a valid fastPolicy name
then associate the new storage group with the fast policy.
If we are using an existing storage group then we must check that
it is associated with the correct fast policy
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration server
:param volumeInstance: the volume instance
:param volumeName: the volume name (String)
:param sgGroupName: the storage group name (String)
:param fastPolicyName: the fast policy name (String): can be None
:param storageSystemName: the storage system name (String)
:param defaultStorageGroupInstanceName: default storage group instance
name (can be None for Non FAST)
:returns: instance name storageGroupInstanceName
"""
storageGroupInstanceName = self.utils.find_storage_masking_group(
conn, controllerConfigService, sgGroupName)
if storageGroupInstanceName is None:
storageGroupInstanceName = self._create_storage_group(
conn, controllerConfigService, sgGroupName, volumeInstance,
fastPolicyName, volumeName, storageSystemName,
defaultStorageGroupInstanceName)
if storageGroupInstanceName is None:
errorMessage = (_(
"Cannot create or find an storage group with name "
"%(sgGroupName)s")
% {'sgGroupName': sgGroupName})
LOG.error(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
else:
if self._is_volume_in_storage_group(
conn, storageGroupInstanceName, volumeInstance):
LOG.warn(_("Volume: %(volumeName)s is already "
"part of storage group %(sgGroupName)s ")
% {'volumeName': volumeName,
'sgGroupName': sgGroupName})
else:
self.add_volume_to_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeName, sgGroupName, fastPolicyName,
storageSystemName)
return storageGroupInstanceName
def _get_port_group_instance_name(
self, conn, controllerConfigService, pgGroupName):
"""Gets the port group instance name.
The portGroup name has been defined in the EMC Config file if it
does not exist the operation should fail
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration server
:param pgGroupName: the port group name
:returns: instance name foundPortGroupInstanceName
"""
foundPortGroupInstanceName = self._find_port_group(
conn, controllerConfigService, pgGroupName)
if foundPortGroupInstanceName is None:
errorMessage = (_(
"Cannot find a portGroup with name %(pgGroupName)s. "
"The port group for a masking view must be pre-defined")
% {'pgGroupName': pgGroupName})
LOG.error(errorMessage)
return foundPortGroupInstanceName
LOG.info(_(
"Port group instance name is %(foundPortGroupInstanceName)s")
% {'foundPortGroupInstanceName': foundPortGroupInstanceName})
return foundPortGroupInstanceName
def _get_initiator_group_instance_name(
self, conn, controllerConfigService, igGroupName, connector,
storageSystemName):
"""Gets the initiator group instance name.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration server
:param igGroupName: the port group name
:param connector: the connector object
:param storageSystemName = the storage system name
:returns: instance name foundInitiatorGroupInstanceName
"""
foundInitiatorGroupInstanceName = (self._create_or_get_initiator_group(
conn, controllerConfigService, igGroupName, connector,
storageSystemName))
if foundInitiatorGroupInstanceName is None:
errorMessage = (_(
"Cannot create or find an initiator group with "
"name %(igGroupName)s")
% {'igGroupName': igGroupName})
LOG.error(errorMessage)
return foundInitiatorGroupInstanceName
def _get_masking_view_instance_name(
self, conn, controllerConfigService, maskingViewName,
storageGroupInstanceName, portGroupInstanceName,
initiatorGroupInstanceName):
"""Gets the masking view instance name
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration server
:param maskingViewName: the masking view name (String)
:param storageGroupInstanceName: the storage group instance name
:param portGroupInstanceName: the port group instance name
:param initiatorGroupInstanceName: the initiator group instance name
:returns: instance name foundMaskingViewInstanceName
"""
rc, job = self._create_masking_view(
conn, controllerConfigService, maskingViewName,
storageGroupInstanceName, portGroupInstanceName,
initiatorGroupInstanceName)
foundMaskingViewInstanceName = self.find_new_masking_view(conn, job)
if foundMaskingViewInstanceName is None:
errorMessage = (_(
"Cannot find the new masking view just created with name "
"%(maskingViewName)s")
% {'maskingViewName': maskingViewName})
LOG.error(errorMessage)
return foundMaskingViewInstanceName
def _check_if_rollback_action_for_masking_required(
self, conn, controllerConfigService, volumeInstance,
volumeName, fastPolicyName, defaultStorageGroupInstanceName):
"""This is a rollback action for FAST.
We need to be able to return the volume to the default storage group
if anything has gone wrong. The volume can also potentially belong to
a storage group that is not the default depending on where
the exception occurred.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller config service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param fastPolicyName: the fast policy name (String)
:param defaultStorageGroupInstanceName: the default storage group
instance name
"""
try:
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
conn, volumeInstance.path))
# volume is not associated with any storage group so add it back
# to the default
if len(foundStorageGroupInstanceName) == 0:
infoMessage = (_(
"Performing rollback on Volume: %(volumeName)s "
"To return it to the default storage group for FAST policy"
" %(fastPolicyName)s. ")
% {'volumeName': volumeName,
'fastPolicyName': fastPolicyName})
LOG.warn("No storage group found. " + infoMessage)
assocDefaultStorageGroupName = (
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
conn, controllerConfigService, volumeInstance,
volumeName, fastPolicyName))
if assocDefaultStorageGroupName is None:
errorMsg = (_(
"Failed to Roll back to re-add volume %(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s: Please contact your sys admin to "
"get the volume re-added manually ")
% {'volumeName': volumeName,
'fastPolicyName': fastPolicyName})
LOG.error(errorMsg)
if len(foundStorageGroupInstanceName) > 0:
errorMsg = (_(
"The storage group found is "
"%(foundStorageGroupInstanceName)s: ")
% {'foundStorageGroupInstanceName':
foundStorageGroupInstanceName})
LOG.info(errorMsg)
# check the name see is it the default storage group or another
if (foundStorageGroupInstanceName !=
defaultStorageGroupInstanceName):
# remove it from its current masking view and return it
# to its default masking view if fast is enabled
self.remove_and_reset_members(
conn, controllerConfigService, volumeInstance,
fastPolicyName, volumeName)
except Exception as e:
LOG.error(_("Exception: %s") % six.text_type(e))
errorMessage = (_(
"Rollback for Volume: %(volumeName)s has failed. "
"Please contact your system administrator to manually return "
"your volume to the default storage group for fast policy "
"%(fastPolicyName)s failed ")
% {'volumeName': volumeName,
'fastPolicyName': fastPolicyName})
LOG.error(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
def _find_new_initiator_group(self, conn, maskingGroupDict):
"""After creating an new initiator group find it and return it.
:param conn: connection the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: instance name foundInitiatorGroupInstanceName
"""
foundInitiatorGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundInitiatorGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundInitiatorGroupInstanceName
def _get_initiator_group_from_masking_view(
self, conn, maskingViewName, storageSystemName):
"""Given the masking view name get the initiator group from it.
:param conn: connection the the ecom server
:param maskingViewName: the name of the masking view
:param storageSystemName: the storage system name
:returns: instance name foundInitiatorMaskingGroupInstanceName
"""
foundInitiatorMaskingGroupInstanceName = None
maskingviews = conn.EnumerateInstanceNames(
'EMC_LunMaskingSCSIProtocolController')
for view in maskingviews:
if storageSystemName == view['SystemName']:
instance = conn.GetInstance(view, LocalOnly=False)
if maskingViewName == instance['ElementName']:
foundView = view
break
groups = conn.AssociatorNames(
foundView,
ResultClass='CIM_InitiatorMaskingGroup')
if len(groups):
foundInitiatorMaskingGroupInstanceName = groups[0]
LOG.debug(
"Masking view: %(view)s InitiatorMaskingGroup: %(masking)s."
% {'view': maskingViewName,
'masking': foundInitiatorMaskingGroupInstanceName})
return foundInitiatorMaskingGroupInstanceName
def _verify_initiator_group_from_masking_view(
self, conn, controllerConfigService, maskingViewName, connector,
storageSystemName, igGroupName):
"""Check that the initiator group contains the correct initiators.
If using an existing masking view check that the initiator group
contains the correct initiators. If it does not contain the correct
initiators then we delete the initiator group from the masking view,
re-create it with the correct initiators and add it to the masking view
NOTE: EMC does not support ModifyMaskingView so we must first
delete the masking view and recreate it.
:param conn: connection the ecom server
:param controllerConfigService: the controller configuration service
:param maskingViewName: maskingview name (String)
:param connector: the connector dict
:param storageSystemName: the storage System Name (string)
:param igGroupName: the initiator group name (String)
"""
initiatorNames = self._find_initiator_names(conn, connector)
foundInitiatorGroupFromConnector = self._find_initiator_masking_group(
conn, controllerConfigService, initiatorNames)
foundInitiatorGroupFromMaskingView = (
self._get_initiator_group_from_masking_view(
conn, maskingViewName, storageSystemName))
if (foundInitiatorGroupFromConnector !=
foundInitiatorGroupFromMaskingView):
if foundInitiatorGroupFromMaskingView is not None:
maskingViewInstanceName = self._find_masking_view(
conn, maskingViewName, storageSystemName)
if foundInitiatorGroupFromConnector is None:
storageHardwareIDInstanceNames = (
self._get_storage_hardware_id_instance_names(
conn, initiatorNames, storageSystemName))
if not storageHardwareIDInstanceNames:
LOG.error(_(
"Initiator Name(s) %(initiatorNames)s are not on "
"array %(storageSystemName)s ")
% {'initiatorNames': initiatorNames,
'storageSystemName': storageSystemName})
return False
foundInitiatorGroupFromConnector = (
self._create_initiator_Group(
conn, controllerConfigService, igGroupName,
storageHardwareIDInstanceNames))
storageGroupInstanceName = (
self._get_storage_group_from_masking_view(
conn, maskingViewName, storageSystemName))
portGroupInstanceName = self._get_port_group_from_masking_view(
conn, maskingViewName, storageSystemName)
if (foundInitiatorGroupFromConnector is not None and
storageGroupInstanceName is not None and
portGroupInstanceName is not None):
self._delete_masking_view(
conn, controllerConfigService, maskingViewName,
maskingViewInstanceName)
newMaskingViewInstanceName = (
self._get_masking_view_instance_name(
conn, controllerConfigService, maskingViewName,
storageGroupInstanceName, portGroupInstanceName,
foundInitiatorGroupFromConnector))
if newMaskingViewInstanceName is not None:
LOG.debug(
"The old masking view has been replaced: "
"%(maskingViewName)s. "
% {'maskingViewName': maskingViewName})
else:
LOG.error(_(
"One of the components of the original masking view "
"%(maskingViewName)s cannot be retrieved so "
"please contact your system administrator to check "
"that the correct initiator(s) are part of masking ")
% {'maskingViewName': maskingViewName})
return False
return True
def _create_initiator_Group(
self, conn, controllerConfigService, igGroupName,
hardwareIdinstanceNames):
"""Create a new initiator group
Given a list of hardwareId Instance name create a new
initiator group
:param conn: connection the ecom server
:param controllerConfigService: the controller configuration service
:param igGroupName: the initiator group name (String)
:param hardwareIdinstanceNames: one or more hardware id instance names
"""
rc, job = conn.InvokeMethod(
'CreateGroup', controllerConfigService, GroupName=igGroupName,
Type=self.utils.get_num(INITIATORGROUPTYPE, '16'),
Members=[hardwareIdinstanceNames[0]])
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'groupName': igGroupName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
foundInitiatorGroupInstanceName = self._find_new_initiator_group(
conn, job)
numHardwareIDInstanceNames = len(hardwareIdinstanceNames)
if numHardwareIDInstanceNames > 1:
for j in range(1, numHardwareIDInstanceNames):
rc, job = conn.InvokeMethod(
'AddMembers', controllerConfigService,
MaskingGroup=foundInitiatorGroupInstanceName,
Members=[hardwareIdinstanceNames[j]])
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error adding initiator to group : %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'groupName': igGroupName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
j = j + 1
return foundInitiatorGroupInstanceName
def _get_port_group_from_masking_view(
self, conn, maskingViewName, storageSystemName):
"""Given the masking view name get the port group from it
:param conn: connection the the ecom server
:param maskingViewName: the name of the masking view
:param storageSystemName: the storage system name
:returns: instance name foundPortMaskingGroupInstanceName
"""
foundPortMaskingGroupInstanceName = None
maskingviews = conn.EnumerateInstanceNames(
'EMC_LunMaskingSCSIProtocolController')
for view in maskingviews:
if storageSystemName == view['SystemName']:
instance = conn.GetInstance(view, LocalOnly=False)
if maskingViewName == instance['ElementName']:
foundView = view
break
groups = conn.AssociatorNames(
foundView,
ResultClass='CIM_TargetMaskingGroup')
if len(groups) > 0:
foundPortMaskingGroupInstanceName = groups[0]
LOG.debug(
"Masking view: %(view)s InitiatorMaskingGroup: %(masking)s."
% {'view': maskingViewName,
'masking': foundPortMaskingGroupInstanceName})
return foundPortMaskingGroupInstanceName
def _delete_masking_view(
self, conn, controllerConfigService, maskingViewName,
maskingViewInstanceName):
"""Delete a masking view
:param conn: connection the ecom server
:param controllerConfigService: the controller configuration service
:param maskingViewName: maskingview name (String)
:param maskingViewInstanceName: the masking view instance name
"""
rc, job = conn.InvokeMethod('DeleteMaskingView',
controllerConfigService,
ProtocolController=maskingViewInstanceName)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Modifying masking view : %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'groupName': maskingViewName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
def get_masking_view_from_storage_group(
self, conn, storageGroupInstanceName):
"""Get the associated maskingview instance name
Given storage group instance name, get the associated masking
view instance name
:param conn: connection the ecom server
:param storageGroupInstanceName: the storage group instance name
:returns: instance name foundMaskingViewInstanceName
"""
foundMaskingViewInstanceName = None
maskingViews = conn.AssociatorNames(
storageGroupInstanceName,
ResultClass='Symm_LunMaskingView')
if len(maskingViews) > 0:
foundMaskingViewInstanceName = maskingViews[0]
return foundMaskingViewInstanceName
def add_volume_to_storage_group(
self, conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeName, sgGroupName, fastPolicyName,
storageSystemName=None):
"""Add a volume to an existing storage group
:param conn: connection to ecom server
:param controllerConfigService: the controller configuration service
:param storageGroup: storage group instance
:param volumeInstance: the volume instance
:param volumeName: the name of the volume (String)
:param sgGroupName: the name of the storage group (String)
:param fastPolicyName: the fast policy name (String) can be None
:param storageSystemName: the storage system name (Optional Parameter),
if None plain operation assumed
:returns: int rc the return code of the job
:returns: dict the job dict
"""
self.provision.add_members_to_masking_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance.path, volumeName)
infoMessage = (_(
"Added volume: %(volumeName)s to existing storage group "
"%(sgGroupName)s. ")
% {'volumeName': volumeName,
'sgGroupName': sgGroupName})
LOG.info(infoMessage)
def remove_device_from_default_storage_group(
self, conn, controllerConfigService, volumeInstanceName,
volumeName, fastPolicyName):
"""Remove the volume from the default storage group.
Remove the volume from the default storage group for the FAST
policy and return the default storage group instance name
:param conn: the connection to the ecom server
:param controllerConfigService: the controller config service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param fastPolicyName: the fast policy name (String)
:returns: instance name defaultStorageGroupInstanceName
"""
failedRet = None
defaultStorageGroupInstanceName = (
self.fast.get_and_verify_default_storage_group(
conn, controllerConfigService, volumeInstanceName,
volumeName, fastPolicyName))
if defaultStorageGroupInstanceName is None:
errorMessage = (_(
"Volume %(volumeName)s was not first part of the default "
"storage group for the FAST Policy")
% {'volumeName': volumeName})
LOG.warn(errorMessage)
return failedRet
assocVolumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
LOG.debug(
"There are %(length)lu associated with the default storage group "
"for fast before removing volume %(volumeName)s"
% {'length': len(assocVolumeInstanceNames),
'volumeName': volumeName})
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, defaultStorageGroupInstanceName,
volumeInstanceName, volumeName)
assocVolumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
LOG.debug(
"There are %(length)lu associated with the default storage group "
"for fast after removing volume %(volumeName)s"
% {'length': len(assocVolumeInstanceNames),
'volumeName': volumeName})
# required for unit tests
emptyStorageGroupInstanceName = (
self._wrap_get_storage_group_from_volume(conn, volumeInstanceName))
if emptyStorageGroupInstanceName is not None:
errorMessage = (_(
"Failed to remove %(volumeName)s from the default storage "
"group for the FAST Policy")
% {'volumeName': volumeName})
LOG.error(errorMessage)
return failedRet
return defaultStorageGroupInstanceName
def _wrap_get_storage_group_from_volume(self, conn, volumeInstanceName):
"""Wrapper for get_storage_group_from_volume.
Needed for override in tests
:param conn: the connection to the ecom server
:param volumeInstanceName: the volume instance name
:returns: emptyStorageGroupInstanceName
"""
return self.utils.get_storage_group_from_volume(
conn, volumeInstanceName)
def get_devices_from_storage_group(
self, conn, storageGroupInstanceName):
"""Get the associated volume Instance names
Given the storage group instance name get the associated volume
Instance names
:param conn: connection the the ecom server
:param storageGroupInstanceName: the storage group instance name
:returns: list volumeInstanceNames list of volume instance names
"""
volumeInstanceNames = conn.AssociatorNames(
storageGroupInstanceName,
ResultClass='EMC_StorageVolume')
return volumeInstanceNames
def get_associated_masking_group_from_device(
self, conn, volumeInstanceName):
maskingGroupInstanceNames = conn.AssociatorNames(
volumeInstanceName,
ResultClass='CIM_DeviceMaskingGroup',
AssocClass='CIM_OrderedMemberOfCollection')
if len(maskingGroupInstanceNames) > 0:
return maskingGroupInstanceNames[0]
else:
return None
def remove_and_reset_members(
self, conn, controllerConfigService, volumeInstance,
fastPolicyName, volumeName):
"""Part of unmap device or rollback.
Removes volume from the Device Masking Group that belongs to a
Masking View. Check if fast policy is in the extra specs, if it isn't
we do not need to do any thing for FAST. Assume that
isTieringPolicySupported is False unless the FAST policy is in
the extra specs and tiering is enabled on the array
:param conn: connection the the ecom server
:param controllerConfigService: the controller configuration service
:param volumeInstance: the volume Instance
:param fastPolicyName: the fast policy name (if it exists)
:param volumeName: the volume name
:returns: list volumeInstanceNames list of volume instance names
"""
rc = -1
maskingGroupInstanceName = (
self.get_associated_masking_group_from_device(
conn, volumeInstance.path))
volumeInstanceNames = self.get_devices_from_storage_group(
conn, maskingGroupInstanceName)
storageSystemInstanceName = self.utils.find_storage_system(
conn, controllerConfigService)
isTieringPolicySupported = False
if fastPolicyName is not None:
tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
conn, storageSystemInstanceName)
isTieringPolicySupported = self.fast.is_tiering_policy_enabled(
conn, tierPolicyServiceInstanceName)
LOG.debug(
"FAST policy enabled on %(storageSystem)s: %(isSupported)s"
% {'storageSystem': storageSystemInstanceName,
'isSupported': isTieringPolicySupported})
numVolInMaskingView = len(volumeInstanceNames)
LOG.debug(
"There are %(numVol)d volumes in the masking view %(maskingGroup)s"
% {'numVol': numVolInMaskingView,
'maskingGroup': maskingGroupInstanceName})
if numVolInMaskingView == 1: # last volume in the storage group
# delete masking view
mvInstanceName = self.get_masking_view_from_storage_group(
conn, maskingGroupInstanceName)
LOG.debug(
"Last volume in the storage group, deleting masking view "
"%(mvInstanceName)s"
% {'mvInstanceName': mvInstanceName})
conn.DeleteInstance(mvInstanceName)
# disassociate storage group from FAST policy
if fastPolicyName is not None and isTieringPolicySupported is True:
tierPolicyInstanceName = self.fast.get_tier_policy_by_name(
conn, storageSystemInstanceName['Name'], fastPolicyName)
LOG.info(_(
"policy:%(policy)s, policy service:%(service)s, "
"masking group=%(maskingGroup)s")
% {'policy': tierPolicyInstanceName,
'service': tierPolicyServiceInstanceName,
'maskingGroup': maskingGroupInstanceName})
self.fast.delete_storage_group_from_tier_policy_rule(
conn, tierPolicyServiceInstanceName,
maskingGroupInstanceName, tierPolicyInstanceName)
rc = self.provision.remove_device_from_storage_group(
conn, controllerConfigService, maskingGroupInstanceName,
volumeInstance.path, volumeName)
LOG.debug(
"Remove the last volume %(volumeName)s completed successfully."
% {'volumeName': volumeName})
# Delete storage group
conn.DeleteInstance(maskingGroupInstanceName)
if isTieringPolicySupported:
self._cleanup_tiering(
conn, controllerConfigService, fastPolicyName,
volumeInstance, volumeName)
else:
# not the last volume
LOG.debug("start: number of volumes in masking storage group: "
"%(numVol)d" % {'numVol': len(volumeInstanceNames)})
rc = self.provision.remove_device_from_storage_group(
conn, controllerConfigService, maskingGroupInstanceName,
volumeInstance.path, volumeName)
LOG.debug(
"RemoveMembers for volume %(volumeName)s completed "
"successfully." % {'volumeName': volumeName})
# if FAST POLICY enabled, move the volume to the default SG
if fastPolicyName is not None and isTieringPolicySupported:
self._cleanup_tiering(
conn, controllerConfigService, fastPolicyName,
volumeInstance, volumeName)
# validation
volumeInstanceNames = self.get_devices_from_storage_group(
conn, maskingGroupInstanceName)
LOG.debug(
"end: number of volumes in masking storage group: %(numVol)d"
% {'numVol': len(volumeInstanceNames)})
return rc
def _cleanup_tiering(
self, conn, controllerConfigService, fastPolicyName,
volumeInstance, volumeName):
"""Cleanup tiering
:param conn: the ecom connection
:param controllerConfigService: the controller configuration service
:param fastPolicyName: the fast policy name
:param volumeInstance: volume instance
:param volumeName: the volume name
"""
defaultStorageGroupInstanceName = (
self.fast.get_policy_default_storage_group(
conn, controllerConfigService, fastPolicyName))
volumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
LOG.debug(
"start: number of volumes in default storage group: %(numVol)d"
% {'numVol': len(volumeInstanceNames)})
defaultStorageGroupInstanceName = (
self.fast.add_volume_to_default_storage_group_for_fast_policy(
conn, controllerConfigService, volumeInstance, volumeName,
fastPolicyName))
# check default storage group number of volumes
volumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
LOG.debug(
"end: number of volumes in default storage group: %(numVol)d"
% {'numVol': len(volumeInstanceNames)})
|
|
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import input
from builtins import str
from builtins import range
from past.utils import old_div
import sys
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
from pylab import *
import pmagpy.pmagplotlib as pmagplotlib
# contributed by Ron Shaar 6/26/08
#
def smooth(x,window_len,window='bartlett'):
"""smooth the data using a sliding window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by padding the beginning and the end of the signal
with average of the first (last) ten values of the signal, to evoid jumps
at the beggining/end
input:
x: the input signal, equaly spaced!
window_len: the dimension of the smoothing window
window: type of window from numpy library ['flat','hanning','hamming','bartlett','blackman']
-flat window will produce a moving average smoothing.
-Bartlett window is very similar to triangular window,
but always ends with zeros at points 1 and n,
-hanning,hamming,blackman are used for smoothing the Fourier transfrom
for curie temperature calculation the default is Bartlett
output:
aray of the smoothed signal
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
# numpy available windows
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
# padding the beggining and the end of the signal with an average value to evoid edge effect
start=[average(x[0:10])]*window_len
end=[average(x[-10:])]*window_len
s=start+list(x)+end
#s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(old_div(w,w.sum()),s,mode='same')
return array(y[window_len:-window_len])
def deriv1(x,y,i,n):
"""
alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position.
"""
m_,x_,y_,xy_,x_2=0.,0.,0.,0.,0.
for ix in range(i,i+n,1):
x_=x_+x[ix]
y_=y_+y[ix]
xy_=xy_+x[ix]*y[ix]
x_2=x_2+x[ix]**2
m= old_div(( (n*xy_) - (x_*y_) ), ( n*x_2-(x_)**2))
return(m)
def main():
"""
NAME
curie.py
DESCTIPTION
plots and interprets curie temperature data.
the 1st derivative is calculated from smoothed M-T curve
(convolution with trianfular window with width= <-w> degrees)
the 2nd derivative is calculated from smoothed 1st derivative curve
( using the same sliding window width)
the estinated curie temp. is the maximum of the 2nd derivative
- the temperature steps should be in multiples of 1.0 degrees
INPUT
T,M
SYNTAX
curie.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, sets M,T input file (required)
-w size of sliding window in degrees (default - 3 degrees)
-t <min> <max> temperature range (optional)
-sav save figures and quit
-fmt [svg,jpg,eps,png,pdf] set format for figure output [default: svg]
example:
curie.py -f ex2.1 -w 30 -t 300 700
"""
plot,fmt=0,'svg'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
meas_file=sys.argv[ind+1]
else:
print("missing -f\n")
sys.exit()
if '-w' in sys.argv:
ind=sys.argv.index('-w')
window_len=int(sys.argv[ind+1])
else:
window_len=3
if '-t' in sys.argv:
ind=sys.argv.index('-t')
t_begin=int(sys.argv[ind+1])
t_end=int(sys.argv[ind+2])
else:
t_begin=''
t_end=''
if '-sav' in sys.argv:plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
# read data from file
Data=numpy.loadtxt(meas_file,dtype=numpy.float)
T=Data.transpose()[0]
M=Data.transpose()[1]
T=list(T)
M=list(M)
# cut the data if -t is one of the flags
if t_begin:
while T[0]<t_begin:
M.pop(0);T.pop(0)
while T[-1]>t_end:
M.pop(-1);T.pop(-1)
# prepare the signal:
# from M(T) array with unequal deltaT
# to M(T) array with deltaT=(1 degree).
# if delataT is larger, then points are added using linear fit between
# consecutive data points.
# exit if deltaT is not integer
i=0
while i<(len(T)-1):
if (T[i+1]-T[i])%1>0.001:
print("delta T should be integer, this program will not work!")
print("temperature range:",T[i],T[i+1])
sys.exit()
if (T[i+1]-T[i])==0.:
M[i]=average([M[i],M[i+1]])
M.pop(i+1);T.pop(i+1)
elif (T[i+1]-T[i])<0.:
M.pop(i+1);T.pop(i+1)
print("check data in T=%.0f ,M[T] is ignored"%(T[i]))
elif (T[i+1]-T[i])>1.:
slope,b=polyfit([T[i],T[i+1]],[M[i],M[i+1]],1)
for j in range(int(T[i+1])-int(T[i])-1):
M.insert(i+1,slope*(T[i]+1.)+b)
T.insert(i+1,(T[i]+1.))
i=i+1
i=i+1
# calculate the smoothed signal
M=array(M,'f')
T=array(T,'f')
M_smooth=[]
M_smooth=smooth(M,window_len)
#plot the original data and the smooth data
PLT={'M_T':1,'der1':2,'der2':3,'Curie':4}
pmagplotlib.plot_init(PLT['M_T'],5,5)
string='M-T (sliding window=%i)'%int(window_len)
pmagplotlib.plotXY(PLT['M_T'],T,M_smooth,sym='-')
pmagplotlib.plotXY(PLT['M_T'],T,M,sym='--',xlab='Temperature C',ylab='Magnetization',title=string)
#calculate first derivative
d1,T_d1=[],[]
for i in range(len(M_smooth)-1):
Dy=M_smooth[i-1]-M_smooth[i+1]
Dx=T[i-1]-T[i+1]
d1.append(old_div(Dy,Dx))
T_d1=T[1:len(T-1)]
d1=array(d1,'f')
d1_smooth=smooth(d1,window_len)
#plot the first derivative
pmagplotlib.plot_init(PLT['der1'],5,5)
string='1st derivative (sliding window=%i)'%int(window_len)
pmagplotlib.plotXY(PLT['der1'],T_d1,d1_smooth,sym='-',xlab='Temperature C',title=string)
pmagplotlib.plotXY(PLT['der1'],T_d1,d1,sym='b--')
#calculate second derivative
d2,T_d2=[],[]
for i in range(len(d1_smooth)-1):
Dy=d1_smooth[i-1]-d1_smooth[i+1]
Dx=T[i-1]-T[i+1]
#print Dy/Dx
d2.append(old_div(Dy,Dx))
T_d2=T[2:len(T-2)]
d2=array(d2,'f')
d2_smooth=smooth(d2,window_len)
#plot the second derivative
pmagplotlib.plot_init(PLT['der2'],5,5)
string='2nd derivative (sliding window=%i)'%int(window_len)
pmagplotlib.plotXY(PLT['der2'],T_d2,d2,sym='-',xlab='Temperature C',title=string)
d2=list(d2)
print('second derivative maximum is at T=%i'%int(T_d2[d2.index(max(d2))]))
# calculate Curie temperature for different width of sliding windows
curie,curie_1=[],[]
wn=list(range(5,50,1))
for win in wn:
# calculate the smoothed signal
M_smooth=[]
M_smooth=smooth(M,win)
#calculate first derivative
d1,T_d1=[],[]
for i in range(len(M_smooth)-1):
Dy=M_smooth[i-1]-M_smooth[i+1]
Dx=T[i-1]-T[i+1]
d1.append(old_div(Dy,Dx))
T_d1=T[1:len(T-1)]
d1=array(d1,'f')
d1_smooth=smooth(d1,win)
#calculate second derivative
d2,T_d2=[],[]
for i in range(len(d1_smooth)-1):
Dy=d1_smooth[i-1]-d1_smooth[i+1]
Dx=T[i-1]-T[i+1]
d2.append(old_div(Dy,Dx))
T_d2=T[2:len(T-2)]
d2=array(d2,'f')
d2_smooth=smooth(d2,win)
d2=list(d2)
d2_smooth=list(d2_smooth)
curie.append(T_d2[d2.index(max(d2))])
curie_1.append(T_d2[d2_smooth.index(max(d2_smooth))])
#plot Curie temp for different sliding window length
pmagplotlib.plot_init(PLT['Curie'],5,5)
pmagplotlib.plotXY(PLT['Curie'],wn,curie,sym='.',xlab='Sliding Window Width (degrees)',ylab='Curie Temp',title='Curie Statistics')
files = {}
for key in list(PLT.keys()): files[key]=str(key) + "." +fmt
if plot==0:
pmagplotlib.drawFIGS(PLT)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a": pmagplotlib.saveP(PLT,files)
else: pmagplotlib.saveP(PLT,files)
sys.exit()
main()
|
|
#
"""Sliver manager API.
This module exposes an XMLRPC interface that allows PlanetLab users to
create/destroy slivers with delegated instantiation, start and stop
slivers, make resource loans, and examine resource allocations. The
XMLRPC is provided on a localhost-only TCP port as well as via a Unix
domain socket that is accessible by ssh-ing into a delegate account
with the forward_api_calls shell.
"""
import xmlrpc.server
import socketserver
import errno
import os
import pwd
import socket
import struct
import threading
import xmlrpc.client
import slivermanager
try:
from PLC.Parameter import Parameter, Mixed
except:
def Parameter(a = None, b = None): pass
def Mixed(a = None, b = None, c = None): pass
import account
import logger
# TODO: These try/excepts are a hack to allow doc/DocBookLocal.py to
# import this file in order to extract the documentation from each
# exported function.
# A better approach will involve more extensive code splitting, I think.
try: import database
except: import logger as database
import ticket as ticket_module
import tools
deliver_ticket = None # set in slivermanager.start()
api_method_dict = {}
nargs_dict = {}
def export_to_api(nargs):
def export(method):
nargs_dict[method.__name__] = nargs
api_method_dict[method.__name__] = method
return method
return export
def export_to_docbook(**kwargs):
keywords = {
"group" : "NMAPI",
"status" : "current",
"name": None,
"args": None,
"roles": [],
"accepts": [],
"returns": [],
}
def export(method):
def args():
# Inspect method. Remove self from the argument list.
max_args = method.__code__.co_varnames[0:method.__code__.co_argcount]
defaults = method.__defaults__
if defaults is None:
defaults = ()
min_args = max_args[0:len(max_args) - len(defaults)]
defaults = tuple([None for arg in min_args]) + defaults
return (min_args, max_args, defaults)
keywords['name'] = method.__name__
keywords['args'] = args
for arg in keywords:
method.__setattr__(arg, keywords[arg])
for arg in kwargs:
method.__setattr__(arg, kwargs[arg])
return method
return export
# status
# roles,
# accepts,
# returns
@export_to_docbook(roles=['self'],
accepts=[],
returns=Parameter([], 'A list of supported functions'))
@export_to_api(0)
def Help():
"""Get a list of functions currently supported by the Node Manager API"""
names=list(api_method_dict.keys())
names.sort()
return ''.join(['**** ' + api_method_dict[name].__name__ + '\n' + api_method_dict[name].__doc__ + '\n'
for name in names])
@export_to_docbook(roles=['self'],
accepts=[Parameter(str, 'A ticket returned from GetSliceTicket()')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Ticket(ticket):
"""The Node Manager periodically polls the PLC API for a list of all
slices that are allowed to exist on the given node. Before
actions are performed on a delegated slice (such as creation),
a controller slice must deliver a valid slice ticket to NM.
This ticket is the value retured by PLC's GetSliceTicket() API call."""
try:
data = ticket_module.verify(ticket)
name = data['slivers'][0]['name']
if data != None:
deliver_ticket(data)
logger.log('api_calls: Ticket delivered for %s' % name)
Create(database.db.get(name))
except Exception as err:
raise xmlrpc.client.Fault(102, 'Ticket error: ' + str(err))
@export_to_docbook(roles=['self'],
accepts=[Parameter(str, 'A ticket returned from GetSlivers()')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def AdminTicket(ticket):
"""Admin interface to create slivers based on ticket returned by GetSlivers()."""
try:
data, = xmlrpc.client.loads(ticket)[0]
name = data['slivers'][0]['name']
if data != None:
deliver_ticket(data)
logger.log('api_calls: Admin Ticket delivered for %s' % name)
Create(database.db.get(name))
except Exception as err:
raise xmlrpc.client.Fault(102, 'Ticket error: ' + str(err))
@export_to_docbook(roles=['self'],
accepts=[],
returns={'sliver_name' : Parameter(int, 'the associated xid')})
@export_to_api(0)
def GetXIDs():
"""Return an dictionary mapping Slice names to XIDs"""
return dict([(pwent[0], pwent[2]) for pwent in pwd.getpwall() if pwent[6] == slivermanager.sliver_password_shell])
@export_to_docbook(roles=['self'],
accepts=[],
returns={ 'sliver_name' : Parameter(str, 'the associated SSHKey')})
@export_to_api(0)
def GetSSHKeys():
"""Return an dictionary mapping slice names to SSH keys"""
keydict = {}
for rec in database.db.values():
if 'keys' in rec:
keydict[rec['name']] = rec['keys']
return keydict
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Create(sliver_name):
"""Create a non-PLC-instantiated sliver"""
rec = sliver_name
if rec['instantiation'] == 'delegated':
account.get(rec['name']).ensure_created(rec)
logger.log("api_calls: Create %s"%rec['name'])
else:
raise Exception("Only PLC can create non delegated slivers.")
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Destroy(sliver_name):
"""Destroy a non-PLC-instantiated sliver"""
rec = sliver_name
if rec['instantiation'] == 'delegated':
account.get(rec['name']).ensure_destroyed()
logger.log("api_calls: Destroy %s"%rec['name'])
else:
raise Exception("Only PLC can destroy non delegated slivers.")
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Start(sliver_name):
"""Configure and start sliver."""
rec = sliver_name
account.get(rec['name']).start(rec)
logger.log("api_calls: Start %s"%rec['name'])
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Stop(sliver_name):
"""Kill all processes belonging to the specified sliver"""
rec = sliver_name
account.get(rec['name']).stop()
logger.log("api_calls: Stop %s"%rec['name'])
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def ReCreate(sliver_name):
"""Stop, Destroy, Create, Start sliver in order to reinstall it."""
rec = sliver_name
account.get(rec['name']).stop()
account.get(rec['name']).ensure_created(rec)
account.get(rec['name']).start(rec)
logger.log("api_calls: ReCreate %s"%rec['name'])
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(dict, "A resource specification"))
@export_to_api(1)
def GetEffectiveRSpec(sliver_name):
"""Return the RSpec allocated to the specified sliver, including loans"""
rec = sliver_name
return rec.get('_rspec', {}).copy()
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns={"resource name" : Parameter(int, "amount")})
@export_to_api(1)
def GetRSpec(sliver_name):
"""Return the RSpec allocated to the specified sliver, excluding loans"""
rec = sliver_name
return rec.get('rspec', {}).copy()
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=[Mixed(Parameter(str, 'recipient slice name'),
Parameter(str, 'resource name'),
Parameter(int, 'resource amount'))])
@export_to_api(1)
def GetLoans(sliver_name):
"""Return the list of loans made by the specified sliver"""
rec = sliver_name
return rec.get('_loans', [])[:]
def validate_loans(loans):
"""Check that <obj> is a list of valid loan specifications."""
def validate_loan(loan):
return (type(loan)==list or type(loan)==tuple) and len(loan)==3 \
and type(loan[0])==str and type(loan[1])==str and loan[1] in database.LOANABLE_RESOURCES and type(loan[2])==int and loan[2]>=0
return type(loans)==list and False not in [validate_loan(load) for loan in loans]
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[ Parameter(str, 'A sliver/slice name.'),
[Mixed(Parameter(str, 'recipient slice name'),
Parameter(str, 'resource name'),
Parameter(int, 'resource amount'))], ],
returns=Parameter(int, '1 if successful'))
@export_to_api(2)
def SetLoans(sliver_name, loans):
"""Overwrite the list of loans made by the specified sliver.
Also, note that SetLoans will not throw an error if more capacity than the
RSpec is handed out, but it will silently discard those loans that would
put it over capacity. This behavior may be replaced with error semantics
in the future. As well, there is currently no asynchronous notification
of loss of resources."""
rec = sliver_name
if not validate_loans(loans):
raise xmlrpc.client.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification')
rec['_loans'] = loans
database.db.sync()
@export_to_docbook(roles=['nm-controller', 'self'],
returns=Parameter(dict, 'Record dictionary'))
@export_to_api(0)
def GetRecord(sliver_name):
"""Return sliver record"""
rec = sliver_name
return rec
|
|
"""Base class for sparse matrices"""
__all__ = ['spmatrix', 'isspmatrix', 'issparse',
'SparseWarning','SparseEfficiencyWarning']
from warnings import warn
import numpy as np
from sputils import isdense, isscalarlike, isintlike
class SparseWarning(Warning): pass
class SparseFormatWarning(SparseWarning): pass
class SparseEfficiencyWarning(SparseWarning): pass
# The formats that we might potentially understand.
_formats = {'csc':[0, "Compressed Sparse Column"],
'csr':[1, "Compressed Sparse Row"],
'dok':[2, "Dictionary Of Keys"],
'lil':[3, "LInked List"],
'dod':[4, "Dictionary of Dictionaries"],
'sss':[5, "Symmetric Sparse Skyline"],
'coo':[6, "COOrdinate"],
'lba':[7, "Linpack BAnded"],
'egd':[8, "Ellpack-itpack Generalized Diagonal"],
'dia':[9, "DIAgonal"],
'bsr':[10, "Block Sparse Row"],
'msr':[11, "Modified compressed Sparse Row"],
'bsc':[12, "Block Sparse Column"],
'msc':[13, "Modified compressed Sparse Column"],
'ssk':[14, "Symmetric SKyline"],
'nsk':[15, "Nonsymmetric SKyline"],
'jad':[16, "JAgged Diagonal"],
'uss':[17, "Unsymmetric Sparse Skyline"],
'vbr':[18, "Variable Block Row"],
'und':[19, "Undefined"]
}
MAXPRINT = 50
class spmatrix(object):
""" This class provides a base class for all sparse matrices. It
cannot be instantiated. Most of the work is provided by subclasses.
"""
__array_priority__ = 10.1
ndim = 2
def __init__(self, maxprint=MAXPRINT):
self.format = self.__class__.__name__[:3]
self._shape = None
if self.format == 'spm':
raise ValueError("This class is not intended"
" to be instantiated directly.")
self.maxprint = maxprint
def set_shape(self,shape):
shape = tuple(shape)
if len(shape) != 2:
raise ValueError("Only two-dimensional sparse arrays "
"are supported.")
try:
shape = int(shape[0]),int(shape[1]) #floats, other weirdness
except:
raise TypeError('invalid shape')
if not (shape[0] >= 1 and shape[1] >= 1):
raise ValueError('invalid shape')
if (self._shape != shape) and (self._shape is not None):
try:
self = self.reshape(shape)
except NotImplementedError:
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__)
self._shape = shape
def get_shape(self):
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def reshape(self,shape):
raise NotImplementedError
def astype(self, t):
return self.tocsr().astype(t).asformat(self.format)
def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
fp_types = ['f','d','F','D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name)
def __iter__(self):
for r in xrange(self.shape[0]):
yield self[r,:]
def getmaxprint(self):
try:
maxprint = self.maxprint
except AttributeError:
maxprint = MAXPRINT
return maxprint
#def typecode(self):
# try:
# typ = self.dtype.char
# except AttributeError:
# typ = None
# return typ
def getnnz(self):
try:
return self.nnz
except AttributeError:
raise AttributeError("nnz not defined")
def getformat(self):
try:
format = self.format
except AttributeError:
format = 'und'
return format
def __repr__(self):
nnz = self.getnnz()
format = self.getformat()
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements in %s format>" % \
(self.shape + (self.dtype.type, nnz, _formats[format][1]))
def __str__(self):
maxprint = self.getmaxprint()
A = self.tocoo()
nnz = self.getnnz()
# helper function, outputs "(i,j) v"
def tostr(row,col,data):
triples = zip(zip(row,col),data)
return '\n'.join( [ (' %s\t%s' % t) for t in triples] )
if nnz > maxprint:
half = maxprint // 2
out = tostr(A.row[:half], A.col[:half], A.data[:half])
out += "\n :\t:\n"
half = maxprint - maxprint//2
out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
else:
out = tostr(A.row, A.col, A.data)
return out
def __nonzero__(self): # Simple -- other ideas?
return self.getnnz() > 0
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? But for some uses the number of
# non-zeros is more important. For now, raise an exception!
def __len__(self):
# return self.getnnz()
raise TypeError("sparse matrix length is ambiguous; use getnnz()"
" or shape[0]")
def asformat(self, format):
"""Return this matrix in a given sparse format
Parameters
----------
format : {string, None}
desired sparse matrix format
- None for no format conversion
- "csr" for csr_matrix format
- "csc" for csc_matrix format
- "lil" for lil_matrix format
- "dok" for dok_matrix format and so on
"""
if format is None or format == self.format:
return self
else:
return getattr(self,'to' + format)()
###################################################################
# NOTE: All arithmetic operations use csr_matrix by default.
# Therefore a new sparse matrix format just needs to define a
# .tocsr() method to provide arithmetic support. Any of these
# methods can be overridden for efficiency.
####################################################################
def multiply(self, other):
"""Point-wise multiplication by another matrix
"""
return self.tocsr().multiply(other)
def dot(self, other):
return self * other
def __abs__(self):
return abs(self.tocsr())
def __add__(self, other): # self + other
return self.tocsr().__add__(other)
def __radd__(self, other): # other + self
return self.tocsr().__radd__(other)
def __sub__(self, other): # self - other
#note: this can't be replaced by self + (-other) for unsigned types
return self.tocsr().__sub__(other)
def __rsub__(self, other): # other - self
return self.tocsr().__rsub__(other)
def __mul__(self, other):
"""interpret other and call one of the following
self._mul_scalar()
self._mul_vector()
self._mul_multivector()
self._mul_sparse_matrix()
"""
M,N = self.shape
if isscalarlike(other):
# scalar value
return self._mul_scalar(other)
if issparse(other):
if self.shape[1] != other.shape[0]:
raise ValueError('dimension mismatch')
return self._mul_sparse_matrix(other)
try:
other.shape
except AttributeError:
# If it's a list or whatever, treat it like a matrix
other = np.asanyarray(other)
other = np.asanyarray(other)
if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
# dense row or column vector
if other.shape != (N,) and other.shape != (N,1):
raise ValueError('dimension mismatch')
result = self._mul_vector(np.ravel(other))
if isinstance(other, np.matrix):
result = np.asmatrix(result)
if other.ndim == 2 and other.shape[1] == 1:
# If 'other' was an (nx1) column vector, reshape the result
result = result.reshape(-1,1)
return result
elif other.ndim == 2:
##
# dense 2D array or matrix ("multivector")
if other.shape[0] != self.shape[1]:
raise ValueError('dimension mismatch')
result = self._mul_multivector(np.asarray(other))
if isinstance(other, np.matrix):
result = np.asmatrix(result)
return result
else:
raise ValueError('could not interpret dimensions')
# by default, use CSR for __mul__ handlers
def _mul_scalar(self, other):
return self.tocsr()._mul_scalar(other)
def _mul_vector(self, other):
return self.tocsr()._mul_vector(other)
def _mul_multivector(self, other):
return self.tocsr()._mul_multivector(other)
def _mul_sparse_matrix(self, other):
return self.tocsr()._mul_sparse_matrix(other)
def __rmul__(self, other): # other * self
if isscalarlike(other):
return self.__mul__(other)
else:
# Don't use asarray unless we have to
try:
tr = other.transpose()
except AttributeError:
tr = np.asarray(other).transpose()
return (self.transpose() * tr).transpose()
####################
# Other Arithmetic #
####################
def __truediv__(self, other):
if isscalarlike(other):
return self * (1./other)
else:
return self.tocsr().__truediv__(other)
def __div__(self, other):
# Always do true division
return self.__truediv__(other)
def __neg__(self):
return -self.tocsr()
def __iadd__(self, other):
raise NotImplementedError
def __isub__(self, other):
raise NotImplementedError
def __imul__(self, other):
raise NotImplementedError
def __idiv__(self, other):
return self.__itruediv__(other)
def __itruediv__(self, other):
raise NotImplementedError
def __pow__(self, other):
if self.shape[0] != self.shape[1]:
raise TypeError('matrix is not square')
if isintlike(other):
other = int(other)
if other < 0:
raise ValueError('exponent must be >= 0')
if other == 0:
from construct import identity
return identity( self.shape[0], dtype=self.dtype )
elif other == 1:
return self.copy()
else:
result = self
for i in range(1,other):
result = result*self
return result
elif isscalarlike(other):
raise ValueError('exponent must be an integer')
else:
raise NotImplementedError
def __getattr__(self, attr):
if attr == 'A':
return self.toarray()
elif attr == 'T':
return self.transpose()
elif attr == 'H':
return self.getH()
elif attr == 'real':
return self._real()
elif attr == 'imag':
return self._imag()
elif attr == 'size':
return self.getnnz()
else:
raise AttributeError(attr + " not found")
def transpose(self):
return self.tocsr().transpose()
def conj(self):
return self.tocsr().conj()
def conjugate(self):
return self.conj()
# Renamed conjtranspose() -> getH() for compatibility with dense matrices
def getH(self):
return self.transpose().conj()
def _real(self):
return self.tocsr()._real()
def _imag(self):
return self.tocsr()._imag()
def nonzero(self):
"""nonzero indices
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the matrix.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return (A.row[nz_mask],A.col[nz_mask])
def getcol(self, j):
"""Returns a copy of column j of the matrix, as an (m x 1) sparse
matrix (column vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Post-multiply by a (n x 1) column vector 'a' containing all zeros
# except for a_j = 1
from csc import csc_matrix
n = self.shape[1]
if j < 0:
j += n
if j < 0 or j >= n:
raise IndexError("index out of bounds")
col_selector = csc_matrix(([1], [[j], [0]]), shape=(n,1), dtype=self.dtype)
return self * col_selector
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n) sparse
matrix (row vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Pre-multiply by a (1 x m) row vector 'a' containing all zeros
# except for a_i = 1
from csr import csr_matrix
m = self.shape[0]
if i < 0:
i += m
if i < 0 or i >= m:
raise IndexError("index out of bounds")
row_selector = csr_matrix(([1], [[0], [i]]), shape=(1,m), dtype=self.dtype)
return row_selector * self
#def __array__(self):
# return self.toarray()
def todense(self):
return np.asmatrix(self.toarray())
def toarray(self):
return self.tocoo().toarray()
def todok(self):
return self.tocoo().todok()
def tocoo(self):
return self.tocsr().tocoo()
def tolil(self):
return self.tocsr().tolil()
def todia(self):
return self.tocoo().todia()
def tobsr(self, blocksize=None):
return self.tocsr().tobsr(blocksize=blocksize)
def copy(self):
return self.__class__(self,copy=True)
def sum(self, axis=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# We use multiplication by an array of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
if axis == 0:
# sum over columns
return np.asmatrix(np.ones((1, m), dtype=self.dtype)) * self
elif axis == 1:
# sum over rows
return self * np.asmatrix(np.ones((n, 1), dtype=self.dtype))
elif axis is None:
# sum over rows and columns
return ( self * np.asmatrix(np.ones((n, 1), dtype=self.dtype)) ).sum()
else:
raise ValueError("axis out of bounds")
def mean(self, axis=None):
"""Average the matrix over the given axis. If the axis is None,
average over both rows and columns, returning a scalar.
"""
if axis == 0:
mean = self.sum(0)
mean *= 1.0 / self.shape[0]
return mean
elif axis == 1:
mean = self.sum(1)
mean *= 1.0 / self.shape[1]
return mean
elif axis is None:
return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1])
else:
raise ValueError("axis out of bounds")
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
#TODO support k != 0
return self.tocsr().diagonal()
def setdiag(self, values, k=0):
"""Fills the diagonal elements {a_ii} with the values from the
given sequence. If k != 0, fills the off-diagonal elements
{a_{i,i+k}} instead.
values may have any length. If the diagonal is longer than values,
then the remaining diagonal entries will not be set. If values if
longer than the diagonal, then the remaining values are ignored.
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
raise ValueError("k exceedes matrix dimensions")
if k < 0:
max_index = min(M+k, N, len(values))
for i,v in enumerate(values[:max_index]):
self[i - k, i] = v
else:
max_index = min(M, N-k, len(values))
for i,v in enumerate(values[:max_index]):
self[i, i + k] = v
from sputils import _isinstance
def isspmatrix(x):
return _isinstance(x, spmatrix)
issparse = isspmatrix
|
|
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# from datasets.imdb import imdb
# import datasets.ds_utils as ds_utils
# from fast_rcnn.config import cfg
import os.path as osp
import sys
import os
import numpy as np
import scipy.sparse
import scipy.io as sio
import cPickle
import json
import uuid
# COCO API
# TODO: add this part into this project
from ..pycocotools.coco import COCO
from ..pycocotools.cocoeval import COCOeval
from ..pycocotools import mask as COCOmask
from .imdb import imdb
import ds_utils
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
def _filter_crowd_proposals(roidb, crowd_thresh):
"""
Finds proposals that are inside crowd regions and marks them with
overlap = -1 (for all gt rois), which means they will be excluded from
training.
"""
for ix, entry in enumerate(roidb):
overlaps = entry['gt_overlaps'].toarray()
crowd_inds = np.where(overlaps.max(axis=1) == -1)[0]
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
continue
iscrowd = [int(True) for _ in xrange(len(crowd_inds))]
crowd_boxes = ds_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :])
non_gt_boxes = ds_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd)
bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
overlaps[non_gt_inds[bad_inds], :] = -1
roidb[ix]['gt_overlaps'] = scipy.sparse.csr_matrix(overlaps)
return roidb
class coco(imdb):
def __init__(self, image_set, year):
imdb.__init__(self, 'coco_' + year + '_' + image_set)
# COCO specific config options
self.config = {'top_k' : 2000,
'use_salt' : True,
'cleanup' : True,
'crowd_thresh' : 0.7,
'min_size' : 2}
# name, paths
self._year = year
self._image_set = image_set
self._data_path = osp.join(cfg.DATA_DIR, 'coco')
# load COCO API, classes, class <-> id mappings
self._COCO = COCO(self._get_ann_file())
cats = self._COCO.loadCats(self._COCO.getCatIds())
self._classes = tuple(['__background__'] + [c['name'] for c in cats])
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._class_to_coco_cat_id = dict(zip([c['name'] for c in cats],
self._COCO.getCatIds()))
self._image_index = self._load_image_set_index()
# Default to roidb handler
self.set_proposal_method('selective_search')
self.competition_mode(False)
# Some image sets are "views" (i.e. subsets) into others.
# For example, minival2014 is a random 5000 image subset of val2014.
# This mapping tells us where the view's images and proposals come from.
self._view_map = {
'minival2014' : 'val2014', # 5k val2014 subset
'valminusminival2014' : 'val2014', # val2014 \setminus minival2014
}
coco_name = image_set + year # e.g., "val2014"
self._data_name = (self._view_map[coco_name]
if self._view_map.has_key(coco_name)
else coco_name)
# Dataset splits that have ground-truth annotations (test splits
# do not have gt annotations)
self._gt_splits = ('train', 'val', 'minival')
def _get_ann_file(self):
prefix = 'instances' if self._image_set.find('test') == -1 \
else 'image_info'
return osp.join(self._data_path, 'annotations',
prefix + '_' + self._image_set + self._year + '.json')
def _load_image_set_index(self):
"""
Load image ids.
"""
image_ids = self._COCO.getImgIds()
return image_ids
def _get_widths(self):
anns = self._COCO.loadImgs(self._image_index)
widths = [ann['width'] for ann in anns]
return widths
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
file_name = ('COCO_' + self._data_name + '_' +
str(index).zfill(12) + '.jpg')
image_path = osp.join(self._data_path, 'images',
self._data_name, file_name)
assert osp.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def selective_search_roidb(self):
return self._roidb_from_proposals('selective_search')
def edge_boxes_roidb(self):
return self._roidb_from_proposals('edge_boxes_AR')
def mcg_roidb(self):
return self._roidb_from_proposals('MCG')
def _roidb_from_proposals(self, method):
"""
Creates a roidb from pre-computed proposals of a particular methods.
"""
top_k = self.config['top_k']
cache_file = osp.join(self.cache_path, self.name +
'_{:s}_top{:d}'.format(method, top_k) +
'_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{:s} {:s} roidb loaded from {:s}'.format(self.name, method,
cache_file)
return roidb
if self._image_set in self._gt_splits:
gt_roidb = self.gt_roidb()
method_roidb = self._load_proposals(method, gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, method_roidb)
# Make sure we don't use proposals that are contained in crowds
roidb = _filter_crowd_proposals(roidb, self.config['crowd_thresh'])
else:
roidb = self._load_proposals(method, None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote {:s} roidb to {:s}'.format(method, cache_file)
return roidb
def _load_proposals(self, method, gt_roidb):
"""
Load pre-computed proposals in the format provided by Jan Hosang:
http://www.mpi-inf.mpg.de/departments/computer-vision-and-multimodal-
computing/research/object-recognition-and-scene-understanding/how-
good-are-detection-proposals-really/
For MCG, use boxes from http://www.eecs.berkeley.edu/Research/Projects/
CS/vision/grouping/mcg/ and convert the file layout using
lib/datasets/tools/mcg_munge.py.
"""
box_list = []
top_k = self.config['top_k']
valid_methods = [
'MCG',
'selective_search',
'edge_boxes_AR',
'edge_boxes_70']
assert method in valid_methods
print 'Loading {} boxes'.format(method)
for i, index in enumerate(self._image_index):
if i % 1000 == 0:
print '{:d} / {:d}'.format(i + 1, len(self._image_index))
box_file = osp.join(
cfg.DATA_DIR, 'coco_proposals', method, 'mat',
self._get_box_file(index))
raw_data = sio.loadmat(box_file)['boxes']
boxes = np.maximum(raw_data - 1, 0).astype(np.uint16)
if method == 'MCG':
# Boxes from the MCG website are in (y1, x1, y2, x2) order
boxes = boxes[:, (1, 0, 3, 2)]
# Remove duplicate boxes and very small boxes and then take top k
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
boxes = boxes[:top_k, :]
box_list.append(boxes)
# Sanity check
im_ann = self._COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
ds_utils.validate_boxes(boxes, width=width, height=height)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_coco_annotation(index)
for index in self._image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_coco_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = self._COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self._COCO.getAnnIds(imgIds=index, iscrowd=None)
objs = self._COCO.loadAnns(annIds)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
overlaps[ix, :] = -1.0
else:
overlaps[ix, cls] = 1.0
ds_utils.validate_boxes(boxes, width=width, height=height)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False,
'seg_areas' : seg_areas}
def _get_box_file(self, index):
# first 14 chars / first 22 chars / all chars + .mat
# COCO_val2014_0/COCO_val2014_000000447/COCO_val2014_000000447991.mat
file_name = ('COCO_' + self._data_name +
'_' + str(index).zfill(12) + '.mat')
return osp.join(file_name[:14], file_name[:22], file_name)
def _print_detection_eval_metrics(self, coco_eval):
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = \
coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
print ('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
'~~~~').format(IoU_lo_thresh, IoU_hi_thresh)
print '{:.1f}'.format(100 * ap_default)
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
print '{:.1f}'.format(100 * ap)
print '~~~~ Summary metrics ~~~~'
coco_eval.summarize()
def _do_detection_eval(self, res_file, output_dir):
ann_type = 'bbox'
coco_dt = self._COCO.loadRes(res_file)
coco_eval = COCOeval(self._COCO, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_eval_metrics(coco_eval)
eval_file = osp.join(output_dir, 'detection_results.pkl')
with open(eval_file, 'wb') as fid:
cPickle.dump(coco_eval, fid, cPickle.HIGHEST_PROTOCOL)
print 'Wrote COCO eval results to: {}'.format(eval_file)
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, index in enumerate(self.image_index):
dets = boxes[im_ind].astype(np.float)
if dets == []:
continue
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
results.extend(
[{'image_id' : index,
'category_id' : cat_id,
'bbox' : [xs[k], ys[k], ws[k], hs[k]],
'score' : scores[k]} for k in xrange(dets.shape[0])])
return results
def _write_coco_results_file(self, all_boxes, res_file):
# [{"image_id": 42,
# "category_id": 18,
# "bbox": [258.15,41.29,348.26,243.78],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
self.num_classes - 1)
coco_cat_id = self._class_to_coco_cat_id[cls]
results.extend(self._coco_results_one_category(all_boxes[cls_ind],
coco_cat_id))
print 'Writing results json to {}'.format(res_file)
with open(res_file, 'w') as fid:
json.dump(results, fid)
def evaluate_detections(self, all_boxes, output_dir):
res_file = osp.join(output_dir, ('detections_' +
self._image_set +
self._year +
'_results'))
if self.config['use_salt']:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
self._write_coco_results_file(all_boxes, res_file)
# Only do evaluation on non-test sets
if self._image_set.find('test') == -1:
self._do_detection_eval(res_file, output_dir)
# Optionally cleanup results json file
if self.config['cleanup']:
os.remove(res_file)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
|
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for chromium.ycm_extra_conf.
These tests should be getting picked up by the PRESUBMIT.py in /tools/vim.
Currently the tests only run on Linux and require 'ninja' to be available on
PATH. Due to these requirements, the tests should only be run on upload.
"""
import imp
import os
import shutil
import stat
import string
import subprocess
import sys
import tempfile
import unittest
def CreateFile(path,
copy_from = None,
format_with = None,
make_executable = False):
"""Creates a file.
If a file already exists at |path|, it will be overwritten.
Args:
path: (String) Absolute path for file to be created.
copy_from: (String or None) Absolute path to source file. If valid, the
contents of this file will be written to |path|.
format_with: (Dictionary or None) Only valid if |copy_from| is also valid.
The contents of the file at |copy_from| will be passed through
string.Formatter.vformat() with this parameter as the dictionary.
make_executable: (Boolean) If true, |file| will be made executable.
"""
if not os.path.isabs(path):
raise Exception(
'Argument |path| needs to be an absolute path. Got: "{}"'.format(path))
with open(path, 'w') as f:
if copy_from:
with open(copy_from, 'r') as source:
contents = source.read()
if format_with:
formatter = string.Formatter()
contents = formatter.vformat(contents, None, format_with)
f.write(contents)
if make_executable:
statinfo = os.stat(path)
os.chmod(path, statinfo.st_mode | stat.S_IXUSR)
@unittest.skipIf(sys.platform.startswith('linux'),
'Tests are only valid on Linux.')
class Chromium_ycmExtraConfTest_NotOnLinux(unittest.TestCase):
def testAlwaysFailsIfNotRunningOnLinux(self):
self.fail('Changes to chromium.ycm_extra_conf.py currently need to be ' \
'uploaded from Linux since the tests only run on Linux.')
@unittest.skipUnless(sys.platform.startswith('linux'),
'Tests are only valid on Linux.')
class Chromium_ycmExtraConfTest(unittest.TestCase):
def SetUpFakeChromeTreeBelowPath(self):
"""Create fake Chromium source tree under self.test_root.
The fake source tree has the following contents:
<self.test_root>
| .gclient
|
+-- src
| | DEPS
| | three.cc
| |
| +-- .git
|
+-- out
|
+-- Debug
build.ninja
"""
self.chrome_root = os.path.abspath(os.path.normpath(
os.path.join(self.test_root, 'src')))
self.out_dir = os.path.join(self.chrome_root, 'out', 'Debug')
os.makedirs(self.chrome_root)
os.makedirs(os.path.join(self.chrome_root, '.git'))
os.makedirs(self.out_dir)
CreateFile(os.path.join(self.test_root, '.gclient'))
CreateFile(os.path.join(self.chrome_root, 'DEPS'))
CreateFile(os.path.join(self.chrome_root, 'three.cc'))
# Fake ninja build file. Applications of 'cxx' rule are tagged by which
# source file was used as input so that the test can verify that the correct
# build dependency was used.
CreateFile(os.path.join(self.out_dir, 'build.ninja'),
copy_from=os.path.join(self.test_data_path,
'fake_build_ninja.txt'))
def NormalizeString(self, string):
return string.replace(self.out_dir, '[OUT]').\
replace(self.chrome_root, '[SRC]')
def NormalizeStringsInList(self, list_of_strings):
return [self.NormalizeString(s) for s in list_of_strings]
def setUp(self):
self.actual_chrome_root = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../..'))
sys.path.append(os.path.join(self.actual_chrome_root, 'tools', 'vim'))
self.test_data_path = os.path.join(self.actual_chrome_root, 'tools', 'vim',
'tests', 'data')
self.ycm_extra_conf = imp.load_source('ycm_extra_conf',
'chromium.ycm_extra_conf.py')
self.test_root = tempfile.mkdtemp()
self.SetUpFakeChromeTreeBelowPath()
def tearDown(self):
if self.test_root:
shutil.rmtree(self.test_root)
def testNinjaIsAvailable(self):
p = subprocess.Popen(['ninja', '--version'], stdout=subprocess.PIPE)
_, _ = p.communicate()
self.assertFalse(p.returncode)
def testFindChromeSrc(self):
chrome_source = self.ycm_extra_conf.FindChromeSrcFromFilename(
os.path.join(self.chrome_root, 'chrome', 'one.cpp'))
self.assertEquals(chrome_source, self.chrome_root)
chrome_source = self.ycm_extra_conf.FindChromeSrcFromFilename(
os.path.join(self.chrome_root, 'one.cpp'))
self.assertEquals(chrome_source, self.chrome_root)
def testCommandLineForKnownCppFile(self):
command_line = self.ycm_extra_conf.GetClangCommandLineFromNinjaForSource(
self.out_dir, os.path.join(self.chrome_root, 'one.cpp'))
self.assertEquals(
command_line, ('../../fake-clang++ -Ia -isysroot /mac.sdk -Itag-one '
'../../one.cpp -o obj/one.o'))
def testCommandLineForUnknownCppFile(self):
command_line = self.ycm_extra_conf.GetClangCommandLineFromNinjaForSource(
self.out_dir, os.path.join(self.chrome_root, 'unknown.cpp'))
self.assertEquals(command_line, None)
def testGetClangOptionsForKnownCppFile(self):
clang_options = \
self.ycm_extra_conf.GetClangOptionsFromNinjaForFilename(
self.chrome_root, os.path.join(self.chrome_root, 'one.cpp'))
self.assertEquals(self.NormalizeStringsInList(clang_options), [
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-one'
])
def testGetFlagsForFileForKnownCppFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'one.cpp'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-one'
])
def testGetFlagsForFileForUnknownCppFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'nonexistent.cpp'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default'
])
def testGetFlagsForFileForUnknownHeaderFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'nonexistent.h'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default'
])
def testGetFlagsForFileForKnownHeaderFileWithAssociatedCppFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'three.h'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-three'
])
def testSourceFileWithNonClangOutputs(self):
# Verify assumption that four.cc has non-compiler-output listed as the first
# output.
p = subprocess.Popen(['ninja', '-C', self.out_dir, '-t',
'query', '../../four.cc'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
self.assertFalse(p.returncode)
self.assertEquals(stdout,
'../../four.cc:\n'
' outputs:\n'
' obj/linker-output.o\n'
' obj/four.o\n')
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'four.cc'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-four'
])
def testSourceFileWithOnlyNonClangOutputs(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'five.cc'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default'
])
if __name__ == '__main__':
unittest.main()
|
|
import binascii
import hashlib
import re
import struct
from bitcoin import changebase, is_python2, privkey_to_pubkey, pubkey_to_address, ecdsa_raw_sign, encode, \
SIGHASH_ALL, SIGHASH_ANYONECANPAY, SIGHASH_SINGLE, SIGHASH_NONE, \
deserialize, txhash, serialize, sign, mk_pubkey_script, der_encode_sig, serialize_script, \
deserialize_script, decode, ecdsa_raw_verify, der_decode_sig
def get_hashcode_strategy(hashcode):
return _STRATEGIES[hashcode]
class HashcodeStrategy():
def get_inputs_for_sequences(self, tx, i):
raise NotImplementedError()
def get_outputs(self, tx, i):
raise NotImplementedError()
def get_sequences(self, tx, i):
raise NotImplementedError()
class SIGHASH_ALL_Strategy(HashcodeStrategy):
def get_inputs_for_sequences(self, tx, i):
return tx['ins']
def get_outputs(self, tx, i):
return tx['outs']
def get_sequences(self, tx, i):
return [struct.pack('<I', (inps['sequence'])) for inps in self.get_inputs_for_sequences(tx, i)]
class SIGHASH_SINGLE_Strategy(HashcodeStrategy):
def get_inputs_for_sequences(self, tx, i):
return tx['ins']
def get_outputs(self, tx, i):
if len(tx['outs']) > i:
return [tx['outs'][i]]
return []
def get_sequences(self, tx, i):
return []
class SIGHASH_NONE_Strategy(HashcodeStrategy):
def get_inputs_for_sequences(self, tx, i):
return tx['ins']
def get_outputs(self, tx, i):
return []
def get_sequences(self, tx, i):
return []
class ANYONECANPAY_STRATEGY(HashcodeStrategy):
def __init__(self, default_strategy):
self.default = default_strategy
def get_inputs_for_sequences(self, tx, i):
return []
def get_outputs(self, tx, i):
return self.default.get_outputs(tx, i)
def get_sequences(self, tx, i):
return []
_STRATEGIES = {
SIGHASH_ALL: SIGHASH_ALL_Strategy(),
SIGHASH_SINGLE: SIGHASH_SINGLE_Strategy(),
SIGHASH_NONE: SIGHASH_NONE_Strategy(),
SIGHASH_ALL|SIGHASH_ANYONECANPAY: ANYONECANPAY_STRATEGY(SIGHASH_ALL_Strategy()),
SIGHASH_SINGLE|SIGHASH_ANYONECANPAY: ANYONECANPAY_STRATEGY(SIGHASH_SINGLE_Strategy()),
SIGHASH_NONE|SIGHASH_ANYONECANPAY: ANYONECANPAY_STRATEGY(SIGHASH_NONE_Strategy())
}
def is_segwit(tx, hashcode=None):
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
tx = changebase(tx, 16, 256)
return tx[4:6] == b'\x00\x01'
def segwit_signature_form_transaction_dict(tx, i, script, amount, hashcode=SIGHASH_ALL):
def parse_vout(o):
return b''.join([struct.pack('<Q', o['value']),
struct.pack('B', len(o['script']) // 2),
binascii.unhexlify(o['script'])])
def parse_vin(inp):
return b''.join([binascii.unhexlify(inp['outpoint']['hash'])[::-1],
struct.pack('<I', (inp['outpoint']['index']))])
vin_outpoint = [binascii.unhexlify(tx['ins'][i]['outpoint']['hash'])[::-1],
struct.pack('<I', (tx['ins'][i]['outpoint']['index']))]
hashcode_strategy = get_hashcode_strategy(hashcode)
outpoints = [parse_vin(inp) for inp in hashcode_strategy.get_inputs_for_sequences(tx, i)]
sequences = hashcode_strategy.get_sequences(tx, i)
outputs_to_sign = hashcode_strategy.get_outputs(tx, i)
outputs = [parse_vout(out) for out in outputs_to_sign]
hash_outputs = hashlib.sha256(hashlib.sha256(b''.join(outputs)).digest()).digest() if outputs_to_sign else b'\x00'*32
hash_sequences = hashlib.sha256(hashlib.sha256(b''.join(sequences)).digest()).digest() if sequences else b'\x00'*32
hash_outpoints = hashlib.sha256(hashlib.sha256(b''.join(outpoints)).digest()).digest() if outpoints else b'\x00'*32
preimage = [struct.pack('<I', tx['version']),
hash_outpoints,
hash_sequences,
b''.join(vin_outpoint),
struct.pack('B', len(script) // 2),
binascii.unhexlify(script),
struct.pack('<Q', amount),
struct.pack('<I', tx['ins'][i]['sequence']),
hash_outputs,
struct.pack('<I', tx['locktime']),
struct.pack('<I', hashcode)]
return binascii.hexlify(b''.join(preimage)).decode('ascii')
def segwit_signature_form(tx, i, script, amount, hashcode=SIGHASH_ALL):
return segwit_signature_form_transaction_dict(
deserialize(tx), i, script, amount, hashcode=hashcode
)
def segwit_txhash(tx):
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
tx = changebase(tx, 16, 256)
tx_hash = txhash(tx)
tx = strip_witness_data(tx)
tx_id = txhash(tx)
return {'hash': tx_hash, 'txid': tx_id}
def strip_witness_data(tx):
return serialize(
strip_witness_deserialized_data(
deserialize(tx)
)
)
def strip_witness_deserialized_data(tx_dict):
tx_dict.pop('segwit', '')
for inp in tx_dict['ins']:
inp.pop('txinwitness', '')
return tx_dict
def segwit_sign(tx, i, priv, amount, hashcode=SIGHASH_ALL, script=None, separator_index=None):
i = int(i)
txobj = tx if isinstance(tx, dict) else deserialize(tx)
if not isinstance(tx, dict) and ((not is_python2 and isinstance(re, bytes)) or not re.match('^[0-9a-fA-F]*$', tx)):
return binascii.unhexlify(sign(binascii.hexlify(tx), i, priv))
if len(priv) <= 33:
priv = binascii.hexlify(priv)
pub = privkey_to_pubkey(priv)
address = pubkey_to_address(pub)
wscript = mk_pubkey_script(address) if not script else script
stripped_script = segwit_strip_script_separator(wscript, separator_index)
signing_tx = segwit_signature_form(tx, i, stripped_script, amount, hashcode=hashcode)
rawsig = ecdsa_raw_sign(hashlib.sha256(hashlib.sha256(binascii.unhexlify(signing_tx)).digest()).hexdigest(), priv)
sig = der_encode_sig(*rawsig)+encode(hashcode, 16, 2)
txobj['ins'][i]['txinwitness'] = [sig, pub if not script else script]
return serialize(txobj)
def apply_multisignatures(*args):
# tx,i,script,sigs OR tx,i,script,sig1,sig2...,sig[n]
tx, i, script = args[0], int(args[1]), args[2]
sigs = args[3] if isinstance(args[3], list) else list(args[3:])
if isinstance(script, str) and re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
sigs = [binascii.unhexlify(x) if x[:2] == '30' else x for x in sigs]
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
signed_tx = apply_multisignatures(binascii.unhexlify(tx), i, script, sigs)
return binascii.hexlify(signed_tx)
# Not pushing empty elements on the top of the stack if passing no
# script (in case of bare multisig inputs there is no script)
script_blob = [] if script.__len__() == 0 else [script]
txobj = deserialize(tx)
txobj["ins"][i]["script"] = serialize_script([None]+sigs+script_blob)
return serialize(txobj)
def apply_segwit_multisignatures(tx, i, witness_program, signatures, dummy=True, nested=False):
txobj = deserialize(tx)
signed = apply_segwit_multisignatures_deserialized_data(
txobj, i, witness_program, signatures, dummy=dummy, nested=nested
)
return serialize(signed)
def apply_segwit_multisignatures_deserialized_data(
deserialized_tx, i, witness_program, signatures, dummy=True, nested=True
):
o = [""] + signatures + [witness_program] if dummy else signatures + [witness_program]
deserialized_tx['ins'][i]['txinwitness'] = o
if nested:
redeem_script = hashlib.sha256(binascii.unhexlify(
witness_program
)).hexdigest()
length = len(redeem_script) // 2
redeem_script = serialize_script(
[length + 2, None, redeem_script])
deserialized_tx["ins"][i]["script"] = redeem_script
return deserialized_tx
def segwit_strip_script_separator(script, index=0):
if index == None:
return script
OP_CODESEPARATOR = 171
def get_pos(script, index):
i = 0
for x in range(0, index):
try:
i = script.index(OP_CODESEPARATOR, i)
except ValueError:
return i
return i + 1
deserialized_script = deserialize_script(str(script))
pos = get_pos(deserialized_script, index)
return serialize_script(deserialized_script[pos:])
def segwit_multisign(tx, i, script, pk, amount, hashcode=SIGHASH_ALL, separator_index=None):
wscript = segwit_strip_script_separator(script, index=separator_index)
signing_tx = segwit_signature_form(tx, i, wscript, amount, hashcode=hashcode)
rawsig = ecdsa_raw_sign(hashlib.sha256(hashlib.sha256(
binascii.unhexlify(signing_tx)).digest()).hexdigest(), pk)
sig = der_encode_sig(*rawsig)+encode(hashcode, 16, 2)
return sig
def segwit_verify_tx_input(tx, i, script, sig, pub, amount):
hashcode = decode(sig[-2:], 16)
signing_tx = segwit_signature_form(tx, int(i), script, amount, hashcode=hashcode)
hashed_signing_tx = hashlib.sha256(hashlib.sha256(binascii.unhexlify(signing_tx)).digest()).hexdigest()
return ecdsa_raw_verify(hashed_signing_tx, der_decode_sig(sig), pub)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
cmdline utility to perform cluster reconnaissance
"""
from __future__ import print_function
from eventlet.green import socket
from six import string_types
from six.moves.urllib.parse import urlparse
from swift.common.utils import (
SWIFT_CONF_FILE, md5_hash_for_file, set_swift_dir)
from swift.common.ring import Ring
from swift.common.storage_policy import POLICIES, reload_storage_policies
import eventlet
import json
import optparse
import time
import sys
import six
import os
if six.PY3:
from eventlet.green.urllib import request as urllib2
else:
from eventlet.green import urllib2
def seconds2timeunit(seconds):
elapsed = seconds
unit = 'seconds'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'minutes'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'hours'
if elapsed >= 24:
elapsed = elapsed / 24.0
unit = 'days'
return elapsed, unit
def size_suffix(size):
suffixes = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
for suffix in suffixes:
if size < 1000:
return "%s %s" % (size, suffix)
size = size // 1000
return "%s %s" % (size, suffix)
class Scout(object):
"""
Obtain swift recon information
"""
def __init__(self, recon_type, verbose=False, suppress_errors=False,
timeout=5):
self.recon_type = recon_type
self.verbose = verbose
self.suppress_errors = suppress_errors
self.timeout = timeout
def scout_host(self, base_url, recon_type):
"""
Perform the actual HTTP request to obtain swift recon telemetry.
:param base_url: the base url of the host you wish to check. str of the
format 'http://127.0.0.1:6200/recon/'
:param recon_type: the swift recon check to request.
:returns: tuple of (recon url used, response body, and status)
"""
url = base_url + recon_type
try:
body = urllib2.urlopen(url, timeout=self.timeout).read()
content = json.loads(body)
if self.verbose:
print("-> %s: %s" % (url, content))
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except (urllib2.URLError, socket.timeout) as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
def scout(self, host):
"""
Obtain telemetry from a host running the swift recon middleware.
:param host: host to check
:returns: tuple of (recon url used, response body, status, time start
and time end)
"""
base_url = "http://%s:%s/recon/" % (host[0], host[1])
ts_start = time.time()
url, content, status = self.scout_host(base_url, self.recon_type)
ts_end = time.time()
return url, content, status, ts_start, ts_end
def scout_server_type(self, host):
"""
Obtain Server header by calling OPTIONS.
:param host: host to check
:returns: Server type, status
"""
try:
url = "http://%s:%s/" % (host[0], host[1])
req = urllib2.Request(url)
req.get_method = lambda: 'OPTIONS'
conn = urllib2.urlopen(req)
header = conn.info().getheader('Server')
server_header = header.split('/')
content = server_header[0]
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except (urllib2.URLError, socket.timeout) as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
class SwiftRecon(object):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
def __init__(self):
self.verbose = False
self.suppress_errors = False
self.timeout = 5
self.pool_size = 30
self.pool = eventlet.GreenPool(self.pool_size)
self.check_types = ['account', 'container', 'object']
self.server_type = 'object'
def _gen_stats(self, stats, name=None):
"""Compute various stats from a list of values."""
cstats = [x for x in stats if x is not None]
if len(cstats) > 0:
ret_dict = {'low': min(cstats), 'high': max(cstats),
'total': sum(cstats), 'reported': len(cstats),
'number_none': len(stats) - len(cstats), 'name': name}
ret_dict['average'] = \
ret_dict['total'] / float(len(cstats))
ret_dict['perc_none'] = \
ret_dict['number_none'] * 100.0 / len(stats)
else:
ret_dict = {'reported': 0}
return ret_dict
def _print_stats(self, stats):
"""
print out formatted stats to console
:param stats: dict of stats generated by _gen_stats
"""
print('[%(name)s] low: %(low)d, high: %(high)d, avg: '
'%(average).1f, total: %(total)d, '
'Failed: %(perc_none).1f%%, no_result: %(number_none)d, '
'reported: %(reported)d' % stats)
def _ptime(self, timev=None):
"""
:param timev: a unix timestamp or None
:returns: a pretty string of the current time or provided time in UTC
"""
if timev:
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timev))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def get_hosts(self, region_filter, zone_filter, swift_dir, ring_names):
"""
Get a list of hosts in the rings.
:param region_filter: Only list regions matching given filter
:param zone_filter: Only list zones matching given filter
:param swift_dir: Directory of swift config, usually /etc/swift
:param ring_names: Collection of ring names, such as
['object', 'object-2']
:returns: a set of tuples containing the ip and port of hosts
"""
rings = [Ring(swift_dir, ring_name=n) for n in ring_names]
devs = [d for r in rings for d in r.devs if d]
if region_filter is not None:
devs = [d for d in devs if d['region'] == region_filter]
if zone_filter is not None:
devs = [d for d in devs if d['zone'] == zone_filter]
return set((d['ip'], d['port']) for d in devs)
def get_ringmd5(self, hosts, swift_dir):
"""
Compare ring md5sum's with those on remote host
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param swift_dir: The local directory with the ring files.
"""
matches = 0
errors = 0
ring_names = set()
if self.server_type == 'object':
for ring_name in os.listdir(swift_dir):
if ring_name.startswith('object') and \
ring_name.endswith('.ring.gz'):
ring_names.add(ring_name)
else:
ring_name = '%s.ring.gz' % self.server_type
ring_names.add(ring_name)
rings = {}
for ring_name in ring_names:
rings[ring_name] = md5_hash_for_file(
os.path.join(swift_dir, ring_name))
recon = Scout("ringmd5", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking ring md5sums" % self._ptime())
if self.verbose:
for ring_file, ring_sum in rings.items():
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
success = True
for remote_ring_file, remote_ring_sum in response.items():
remote_ring_name = os.path.basename(remote_ring_file)
if not remote_ring_name.startswith(self.server_type):
continue
ring_sum = rings.get(remote_ring_name, None)
if remote_ring_sum != ring_sum:
success = False
print("!! %s (%s => %s) doesn't match on disk md5sum" % (
url, remote_ring_name, remote_ring_sum))
if not success:
errors += 1
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def get_swiftconfmd5(self, hosts, printfn=print):
"""
Compare swift.conf md5sum with that on remote hosts
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param printfn: function to print text; defaults to print()
"""
matches = 0
errors = 0
conf_sum = md5_hash_for_file(SWIFT_CONF_FILE)
recon = Scout("swiftconfmd5", self.verbose, self.suppress_errors,
self.timeout)
printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" %
(url, response[SWIFT_CONF_FILE]))
else:
matches = matches + 1
if self.verbose:
printfn("-> %s matches." % url)
else:
errors = errors + 1
printfn("%s/%s hosts matched, %s error[s] while checking hosts."
% (matches, len(hosts), errors))
printfn("=" * 79)
def async_check(self, hosts):
"""
Obtain and print async pending statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
scan = {}
recon = Scout("async", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking async pendings" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['async_pending']
stats = self._gen_stats(scan.values(), 'async_pending')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
def driveaudit_check(self, hosts):
"""
Obtain and print drive audit error statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]
"""
scan = {}
recon = Scout("driveaudit", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking drive-audit errors" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['drive_audit_errors']
stats = self._gen_stats(scan.values(), 'drive_audit_errors')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[drive_audit_errors] - No hosts returned valid data.")
print("=" * 79)
def umount_check(self, hosts):
"""
Check for and print unmounted drives
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
unmounted = {}
errors = {}
recon = Scout("unmounted", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Getting unmounted drives from %s hosts..." %
(self._ptime(), len(hosts)))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
unmounted[url] = []
errors[url] = []
for i in response:
if not isinstance(i['mounted'], bool):
errors[url].append(i['device'])
else:
unmounted[url].append(i['device'])
for host in unmounted:
node = urlparse(host).netloc
for entry in unmounted[host]:
print("Not mounted: %s on %s" % (entry, node))
for host in errors:
node = urlparse(host).netloc
for entry in errors[host]:
print("Device errors: %s on %s" % (entry, node))
print("=" * 79)
def server_type_check(self, hosts):
"""
Check for server types on the ring
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
errors = {}
recon = Scout("server_type_check", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Validating server type '%s' on %s hosts..." %
(self._ptime(), self.server_type, len(hosts)))
for url, response, status in self.pool.imap(
recon.scout_server_type, hosts):
if status == 200:
if response != self.server_type + '-server':
errors[url] = response
print("%s/%s hosts ok, %s error[s] while checking hosts." % (
len(hosts) - len(errors), len(hosts), len(errors)))
for host in errors:
print("Invalid: %s is %s" % (host, errors[host]))
print("=" * 79)
def expirer_check(self, hosts):
"""
Obtain and print expirer statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {'object_expiration_pass': [], 'expired_last_pass': []}
recon = Scout("expirer/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on expirers" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['object_expiration_pass'].append(
response.get('object_expiration_pass'))
stats['expired_last_pass'].append(
response.get('expired_last_pass'))
for k in stats:
if stats[k]:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
print("=" * 79)
def replication_check(self, hosts):
"""
Obtain and print replication statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {'replication_time': [], 'failure': [], 'success': [],
'attempted': []}
recon = Scout("replication/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on replication" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
most_recent_url = None
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['replication_time'].append(
response.get('replication_time',
response.get('object_replication_time', 0)))
repl_stats = response.get('replication_stats')
if repl_stats:
for stat_key in ['attempted', 'failure', 'success']:
stats[stat_key].append(repl_stats.get(stat_key))
last = response.get('replication_last',
response.get('object_replication_last', 0))
if last < least_recent_time:
least_recent_time = last
least_recent_url = url
if last > most_recent_time:
most_recent_time = last
most_recent_url = url
for k in stats:
if stats[k]:
if k != 'replication_time':
computed = self._gen_stats(stats[k],
name='replication_%s' % k)
else:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
self._ptime(least_recent_time),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
self._ptime(most_recent_time),
elapsed, elapsed_unit, host))
print("=" * 79)
def updater_check(self, hosts):
"""
Obtain and print updater statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = []
recon = Scout("updater/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking updater times" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response['%s_updater_sweep' % self.server_type]:
stats.append(response['%s_updater_sweep' %
self.server_type])
if len(stats) > 0:
computed = self._gen_stats(stats, name='updater_last_sweep')
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[updater_last_sweep] - No hosts returned valid data.")
else:
print("[updater_last_sweep] - No hosts returned valid data.")
print("=" * 79)
def auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
scan = {}
adone = '%s_auditor_pass_completed' % self.server_type
afail = '%s_audits_failed' % self.server_type
apass = '%s_audits_passed' % self.server_type
asince = '%s_audits_since' % self.server_type
recon = Scout("auditor/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking auditor stats" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response
if len(scan) < 1:
print("Error: No hosts available")
return
stats = {}
stats[adone] = [scan[i][adone] for i in scan
if scan[i][adone] is not None]
stats[afail] = [scan[i][afail] for i in scan
if scan[i][afail] is not None]
stats[apass] = [scan[i][apass] for i in scan
if scan[i][apass] is not None]
stats[asince] = [scan[i][asince] for i in scan
if scan[i][asince] is not None]
for k in stats:
if len(stats[k]) < 1:
print("[%s] - No hosts returned valid data." % k)
else:
if k != asince:
computed = self._gen_stats(stats[k], k)
if computed['reported'] > 0:
self._print_stats(computed)
if len(stats[asince]) >= 1:
low = min(stats[asince])
high = max(stats[asince])
total = sum(stats[asince])
average = total / len(stats[asince])
print('[last_pass] oldest: %s, newest: %s, avg: %s' %
(self._ptime(low), self._ptime(high), self._ptime(average)))
print("=" * 79)
def nested_get_value(self, key, recon_entry):
"""
Generator that yields all values for given key in a recon cache entry.
This is for use with object auditor recon cache entries. If the
object auditor has run in parallel, the recon cache will have entries
of the form: {'object_auditor_stats_ALL': { 'disk1': {..},
'disk2': {..},
'disk3': {..},
...}}
If the object auditor hasn't run in parallel, the recon cache will have
entries of the form: {'object_auditor_stats_ALL': {...}}.
The ZBF auditor doesn't run in parallel. However, if a subset of
devices is selected for auditing, the recon cache will have an entry
of the form: {'object_auditor_stats_ZBF': { 'disk1disk2..diskN': {}}
We use this generator to find all instances of a particular key in
these multi-level dictionaries.
"""
for k, v in recon_entry.items():
if isinstance(v, dict):
for value in self.nested_get_value(key, v):
yield value
if k == key:
yield v
def object_auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
all_scan = {}
zbf_scan = {}
atime = 'audit_time'
bprocessed = 'bytes_processed'
passes = 'passes'
errors = 'errors'
quarantined = 'quarantined'
recon = Scout("auditor/object", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking auditor stats " % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response['object_auditor_stats_ALL']:
all_scan[url] = response['object_auditor_stats_ALL']
if response['object_auditor_stats_ZBF']:
zbf_scan[url] = response['object_auditor_stats_ZBF']
if len(all_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, all_scan[i]))
for i in all_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
all_scan[i])) for i in all_scan]
stats[passes] = [sum(self.nested_get_value(passes, all_scan[i]))
for i in all_scan]
stats[errors] = [sum(self.nested_get_value(errors, all_scan[i]))
for i in all_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
all_scan[i])) for i in all_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ALL_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ALL_auditor] - No hosts returned valid data.")
else:
print("[ALL_auditor] - No hosts returned valid data.")
if len(zbf_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, zbf_scan[i]))
for i in zbf_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
zbf_scan[i])) for i in zbf_scan]
stats[errors] = [sum(self.nested_get_value(errors, zbf_scan[i]))
for i in zbf_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
zbf_scan[i])) for i in zbf_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ZBF_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ZBF_auditor] - No hosts returned valid data.")
else:
print("[ZBF_auditor] - No hosts returned valid data.")
print("=" * 79)
def load_check(self, hosts):
"""
Obtain and print load average statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
load1 = {}
load5 = {}
load15 = {}
recon = Scout("load", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking load averages" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
load1[url] = response['1m']
load5[url] = response['5m']
load15[url] = response['15m']
stats = {"1m": load1, "5m": load5, "15m": load15}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='%s_load_avg' % item)
self._print_stats(computed)
else:
print("[%s_load_avg] - No hosts returned valid data." % item)
print("=" * 79)
def quarantine_check(self, hosts):
"""
Obtain and print quarantine statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
objq = {}
conq = {}
acctq = {}
stats = {}
recon = Scout("quarantined", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking quarantine" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
objq[url] = response['objects']
conq[url] = response['containers']
acctq[url] = response['accounts']
for key in response.get('policies', {}):
pkey = "objects_%s" % key
stats.setdefault(pkey, {})
stats[pkey][url] = response['policies'][key]['objects']
stats.update({"objects": objq, "containers": conq, "accounts": acctq})
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='quarantined_%s' % item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def socket_usage(self, hosts):
"""
Obtain and print /proc/net/sockstat statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
inuse4 = {}
mem = {}
inuse6 = {}
timewait = {}
orphan = {}
recon = Scout("sockstat", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking socket usage" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
inuse4[url] = response['tcp_in_use']
mem[url] = response['tcp_mem_allocated_bytes']
inuse6[url] = response.get('tcp6_in_use', 0)
timewait[url] = response['time_wait']
orphan[url] = response['orphan']
stats = {"tcp_in_use": inuse4, "tcp_mem_allocated_bytes": mem,
"tcp6_in_use": inuse6, "time_wait": timewait,
"orphan": orphan}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(), item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def disk_usage(self, hosts, top=0, lowest=0, human_readable=False):
"""
Obtain and print disk usage statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {}
highs = []
lows = []
raw_total_used = []
raw_total_avail = []
percents = {}
top_percents = [(None, 0)] * top
low_percents = [(None, 100)] * lowest
recon = Scout("diskusage", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking disk usage now" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
hostusage = []
for entry in response:
if not isinstance(entry['mounted'], bool):
print("-> %s/%s: Error: %s" % (url, entry['device'],
entry['mounted']))
elif entry['mounted']:
used = float(entry['used']) / float(entry['size']) \
* 100.0
raw_total_used.append(entry['used'])
raw_total_avail.append(entry['avail'])
hostusage.append(round(used, 2))
for ident, oused in top_percents:
if oused < used:
top_percents.append(
(url + ' ' + entry['device'], used))
top_percents.sort(key=lambda x: -x[1])
top_percents.pop()
break
for ident, oused in low_percents:
if oused > used:
low_percents.append(
(url + ' ' + entry['device'], used))
low_percents.sort(key=lambda x: x[1])
low_percents.pop()
break
stats[url] = hostusage
for url in stats:
if len(stats[url]) > 0:
# get per host hi/los for another day
low = min(stats[url])
high = max(stats[url])
highs.append(high)
lows.append(low)
for percent in stats[url]:
percents[int(percent)] = percents.get(int(percent), 0) + 1
else:
print("-> %s: Error. No drive info available." % url)
if len(lows) > 0:
low = min(lows)
high = max(highs)
# dist graph shamelessly stolen from https://github.com/gholt/tcod
print("Distribution Graph:")
mul = 69.0 / max(percents.values())
for percent in sorted(percents):
print('% 3d%%%5d %s' % (percent, percents[percent],
'*' * int(percents[percent] * mul)))
raw_used = sum(raw_total_used)
raw_avail = sum(raw_total_avail)
raw_total = raw_used + raw_avail
avg_used = 100.0 * raw_used / raw_total
if human_readable:
raw_used = size_suffix(raw_used)
raw_avail = size_suffix(raw_avail)
raw_total = size_suffix(raw_total)
print("Disk usage: space used: %s of %s" % (raw_used, raw_total))
print("Disk usage: space free: %s of %s" % (raw_avail, raw_total))
print("Disk usage: lowest: %s%%, highest: %s%%, avg: %s%%" %
(low, high, avg_used))
else:
print("No hosts returned valid data.")
print("=" * 79)
if top_percents:
print('TOP %s' % top)
for ident, used in top_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
if low_percents:
print('LOWEST %s' % lowest)
for ident, used in low_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
def time_check(self, hosts):
"""
Check a time synchronization of hosts with current time
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
matches = 0
errors = 0
recon = Scout("time", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking time-sync" % self._ptime())
for url, ts_remote, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
if (ts_remote < ts_start or ts_remote > ts_end):
diff = abs(ts_end - ts_remote)
ts_end_f = self._ptime(ts_end)
ts_remote_f = self._ptime(ts_remote)
print("!! %s current time is %s, but remote is %s, "
"differs by %.2f sec" % (
url,
ts_end_f,
ts_remote_f,
diff))
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def _get_ring_names(self, policy=None):
"""
Retrieve name of ring files.
If no policy is passed and the server type is object,
the ring names of all storage-policies are retrieved.
:param policy: name or index of storage policy, only applicable
with server_type==object.
:returns: list of ring names.
"""
if self.server_type == 'object':
ring_names = [p.ring_name for p in POLICIES if (
p.name == policy or not policy or (
policy.isdigit() and int(policy) == int(p) or
(isinstance(policy, string_types)
and policy in p.aliases)))]
else:
ring_names = [self.server_type]
return ring_names
def main(self):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
print("=" * 79)
usage = '''
usage: %prog <server_type> [<server_type> [<server_type>]]
[-v] [--suppress] [-a] [-r] [-u] [-d]
[-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
[--human-readable]
<server_type>\taccount|container|object
Defaults to object server.
ex: %prog container -l --auditor
'''
args = optparse.OptionParser(usage)
args.add_option('--verbose', '-v', action="store_true",
help="Print verbose info")
args.add_option('--suppress', action="store_true",
help="Suppress most connection related errors")
args.add_option('--async', '-a', action="store_true",
help="Get async stats")
args.add_option('--replication', '-r', action="store_true",
help="Get replication stats")
args.add_option('--auditor', action="store_true",
help="Get auditor stats")
args.add_option('--updater', action="store_true",
help="Get updater stats")
args.add_option('--expirer', action="store_true",
help="Get expirer stats")
args.add_option('--unmounted', '-u', action="store_true",
help="Check cluster for unmounted devices")
args.add_option('--diskusage', '-d', action="store_true",
help="Get disk usage stats")
args.add_option('--human-readable', action="store_true",
help="Use human readable suffix for disk usage stats")
args.add_option('--loadstats', '-l', action="store_true",
help="Get cluster load average stats")
args.add_option('--quarantined', '-q', action="store_true",
help="Get cluster quarantine stats")
args.add_option('--validate-servers', action="store_true",
help="Validate servers on the ring")
args.add_option('--md5', action="store_true",
help="Get md5sum of servers ring and compare to "
"local copy")
args.add_option('--sockstat', action="store_true",
help="Get cluster socket usage stats")
args.add_option('--driveaudit', action="store_true",
help="Get drive audit error stats")
args.add_option('--time', '-T', action="store_true",
help="Check time synchronization")
args.add_option('--top', type='int', metavar='COUNT', default=0,
help='Also show the top COUNT entries in rank order.')
args.add_option('--lowest', type='int', metavar='COUNT', default=0,
help='Also show the lowest COUNT entries in rank \
order.')
args.add_option('--all', action="store_true",
help="Perform all checks. Equal to \t\t\t-arudlqT "
"--md5 --sockstat --auditor --updater --expirer "
"--driveaudit --validate-servers")
args.add_option('--region', type="int",
help="Only query servers in specified region")
args.add_option('--zone', '-z', type="int",
help="Only query servers in specified zone")
args.add_option('--timeout', '-t', type="int", metavar="SECONDS",
help="Time to wait for a response from a server",
default=5)
args.add_option('--swiftdir', default="/etc/swift",
help="Default = /etc/swift")
args.add_option('--policy', '-p',
help='Only query object servers in specified '
'storage policy (specified as name or index).')
options, arguments = args.parse_args()
if len(sys.argv) <= 1 or len(arguments) > len(self.check_types):
args.print_help()
sys.exit(0)
if arguments:
arguments = set(arguments)
if arguments.issubset(self.check_types):
server_types = arguments
else:
print("Invalid Server Type")
args.print_help()
sys.exit(1)
else: # default
server_types = ['object']
swift_dir = options.swiftdir
if set_swift_dir(swift_dir):
reload_storage_policies()
self.verbose = options.verbose
self.suppress_errors = options.suppress
self.timeout = options.timeout
for server_type in server_types:
self.server_type = server_type
ring_names = self._get_ring_names(options.policy)
if not ring_names:
print('Invalid Storage Policy: %s' % options.policy)
args.print_help()
sys.exit(0)
hosts = self.get_hosts(options.region, options.zone,
swift_dir, ring_names)
print("--> Starting reconnaissance on %s hosts (%s)" %
(len(hosts), self.server_type))
print("=" * 79)
if options.all:
if self.server_type == 'object':
self.async_check(hosts)
self.object_auditor_check(hosts)
self.updater_check(hosts)
self.expirer_check(hosts)
elif self.server_type == 'container':
self.auditor_check(hosts)
self.updater_check(hosts)
elif self.server_type == 'account':
self.auditor_check(hosts)
self.replication_check(hosts)
self.umount_check(hosts)
self.load_check(hosts)
self.disk_usage(hosts, options.top, options.lowest,
options.human_readable)
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
self.quarantine_check(hosts)
self.socket_usage(hosts)
self.server_type_check(hosts)
self.driveaudit_check(hosts)
self.time_check(hosts)
else:
if options.async:
if self.server_type == 'object':
self.async_check(hosts)
else:
print("Error: Can't check asyncs on non object "
"servers.")
print("=" * 79)
if options.unmounted:
self.umount_check(hosts)
if options.replication:
self.replication_check(hosts)
if options.auditor:
if self.server_type == 'object':
self.object_auditor_check(hosts)
else:
self.auditor_check(hosts)
if options.updater:
if self.server_type == 'account':
print("Error: Can't check updaters on account "
"servers.")
print("=" * 79)
else:
self.updater_check(hosts)
if options.expirer:
if self.server_type == 'object':
self.expirer_check(hosts)
else:
print("Error: Can't check expired on non object "
"servers.")
print("=" * 79)
if options.validate_servers:
self.server_type_check(hosts)
if options.loadstats:
self.load_check(hosts)
if options.diskusage:
self.disk_usage(hosts, options.top, options.lowest,
options.human_readable)
if options.md5:
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
if options.quarantined:
self.quarantine_check(hosts)
if options.sockstat:
self.socket_usage(hosts)
if options.driveaudit:
self.driveaudit_check(hosts)
if options.time:
self.time_check(hosts)
def main():
try:
reconnoiter = SwiftRecon()
reconnoiter.main()
except KeyboardInterrupt:
print('\n')
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
import mock
from neutron.common import constants as n_consts
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import base as test_extensions_base
from webob import exc
from gbpservice.neutron.extensions import group_policy as gp
from gbpservice.neutron.tests.unit import common as cm
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
GP_PLUGIN_BASE_NAME = (
gp.GroupPolicyPluginBase.__module__ + '.' +
gp.GroupPolicyPluginBase.__name__)
GROUPPOLICY_URI = 'grouppolicy'
POLICY_TARGETS_URI = GROUPPOLICY_URI + '/' + 'policy_targets'
POLICY_TARGET_GROUPS_URI = GROUPPOLICY_URI + '/' + 'policy_target_groups'
L2_POLICIES_URI = GROUPPOLICY_URI + '/' + 'l2_policies'
L3_POLICIES_URI = GROUPPOLICY_URI + '/' + 'l3_policies'
POLICY_RULES_URI = GROUPPOLICY_URI + '/' + 'policy_rules'
POLICY_CLASSIFIERS_URI = GROUPPOLICY_URI + '/' + 'policy_classifiers'
POLICY_ACTIONS_URI = GROUPPOLICY_URI + '/' + 'policy_actions'
POLICY_RULE_SETS_URI = GROUPPOLICY_URI + '/' + 'policy_rule_sets'
NET_SVC_POLICIES_URI = GROUPPOLICY_URI + '/' + 'network_service_policies'
EP_POLICIES_URI = GROUPPOLICY_URI + '/' + 'external_policies'
ES_POLICIES_URI = GROUPPOLICY_URI + '/' + 'external_segments'
NP_POLICIES_URI = GROUPPOLICY_URI + '/' + 'nat_pools'
RES_TO_URI = {'external_policy': EP_POLICIES_URI,
'external_segment': ES_POLICIES_URI,
'nat_pool': NP_POLICIES_URI}
class GroupPolicyExtensionTestCase(test_extensions_base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(GroupPolicyExtensionTestCase, self).setUp()
plural_mappings = {
'l2_policy': 'l2_policies', 'l3_policy': 'l3_policies',
'network_service_policy': 'network_service_policies',
'external_policy': 'external_policies'}
self._setUpExtension(
GP_PLUGIN_BASE_NAME, constants.GROUP_POLICY,
gp.RESOURCE_ATTRIBUTE_MAP, gp.Group_policy, GROUPPOLICY_URI,
plural_mappings=plural_mappings)
self.instance = self.plugin.return_value
def __getattr__(self, item):
# Verify is an update of a proper GBP object
def _is_gbp_resource(plural):
return plural in gp.RESOURCE_ATTRIBUTE_MAP
# Update Method
if re.match("^get_(create|update).+(default|)_attrs$", item):
resource = re.sub("^get_(create|update)_", "", item)
resource = re.sub("(_default|)_attrs$", "", resource)
if _is_gbp_resource(cm.get_resource_plural(resource)):
return getattr(cm, item)
raise AttributeError
def _test_create_policy_target(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_policy_target.return_value = expected_value
res = self.api.post(_get_path(POLICY_TARGETS_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_policy_target.assert_called_once_with(
mock.ANY, policy_target=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_target', res)
self.assertEqual(expected_value, res['policy_target'])
def test_create_policy_target_with_defaults(self):
policy_target_id = _uuid()
data = {'policy_target': {'policy_target_group_id': _uuid(),
'tenant_id': _uuid()}}
default_attrs = self.get_create_policy_target_default_attrs()
default_data = copy.copy(data)
default_data['policy_target'].update(default_attrs)
expected_value = dict(default_data['policy_target'])
expected_value['id'] = policy_target_id
self._test_create_policy_target(data, expected_value, default_data)
def test_create_policy_target(self):
policy_target_id = _uuid()
data = {'policy_target': self.get_create_policy_target_attrs()}
expected_value = dict(data['policy_target'])
expected_value['id'] = policy_target_id
self._test_create_policy_target(data, expected_value)
def test_list_policy_targets(self):
policy_target_id = _uuid()
expected_value = [{'tenant_id': _uuid(), 'id': policy_target_id}]
self.instance.get_policy_targets.return_value = expected_value
res = self.api.get(_get_path(POLICY_TARGETS_URI, fmt=self.fmt))
self.instance.get_policy_targets.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_targets', res)
self.assertEqual(expected_value, res['policy_targets'])
def test_get_policy_target(self):
policy_target_id = _uuid()
expected_value = {'tenant_id': _uuid(), 'id': policy_target_id}
self.instance.get_policy_target.return_value = expected_value
res = self.api.get(_get_path(POLICY_TARGETS_URI, id=policy_target_id,
fmt=self.fmt))
self.instance.get_policy_target.assert_called_once_with(
mock.ANY, policy_target_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_target', res)
self.assertEqual(expected_value, res['policy_target'])
def test_update_policy_target(self):
policy_target_id = _uuid()
update_data = {'policy_target': self.get_update_policy_target_attrs()}
expected_value = {'tenant_id': _uuid(), 'id': policy_target_id}
self.instance.update_policy_target.return_value = expected_value
res = self.api.put(_get_path(POLICY_TARGETS_URI, id=policy_target_id,
fmt=self.fmt),
self.serialize(update_data))
self.instance.update_policy_target.assert_called_once_with(
mock.ANY, policy_target_id, policy_target=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_target', res)
self.assertEqual(expected_value, res['policy_target'])
def test_delete_policy_target(self):
self._test_entity_delete('policy_target')
def _test_create_policy_target_group(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_policy_target_group.return_value = expected_value
res = self.api.post(_get_path(POLICY_TARGET_GROUPS_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_policy_target_group.assert_called_once_with(
mock.ANY, policy_target_group=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_target_group', res)
self.assertEqual(expected_value, res['policy_target_group'])
def test_create_policy_target_group_with_defaults(self):
policy_target_group_id = _uuid()
data = {'policy_target_group': {'tenant_id': _uuid()}}
default_attrs = self.get_create_policy_target_group_default_attrs()
default_data = copy.copy(data)
default_data['policy_target_group'].update(default_attrs)
expected_value = copy.deepcopy(default_data['policy_target_group'])
expected_value['id'] = policy_target_group_id
self._test_create_policy_target_group(data, expected_value,
default_data)
def test_create_policy_target_group(self):
policy_target_group_id = _uuid()
data = {'policy_target_group':
self.get_create_policy_target_group_attrs()}
expected_value = copy.deepcopy(data['policy_target_group'])
expected_value['id'] = policy_target_group_id
self._test_create_policy_target_group(data, expected_value)
def test_list_policy_target_groups(self):
policy_target_group_id = _uuid()
expected_value = [{'tenant_id': _uuid(), 'id': policy_target_group_id}]
self.instance.get_policy_target_groups.return_value = expected_value
res = self.api.get(_get_path(POLICY_TARGET_GROUPS_URI, fmt=self.fmt))
self.instance.get_policy_target_groups.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_target_groups', res)
self.assertEqual(expected_value, res['policy_target_groups'])
def test_get_policy_target_group(self):
policy_target_group_id = _uuid()
expected_value = {'tenant_id': _uuid(), 'id': policy_target_group_id}
self.instance.get_policy_target_group.return_value = expected_value
res = self.api.get(_get_path(POLICY_TARGET_GROUPS_URI,
id=policy_target_group_id,
fmt=self.fmt))
self.instance.get_policy_target_group.assert_called_once_with(
mock.ANY, policy_target_group_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_target_group', res)
self.assertEqual(expected_value, res['policy_target_group'])
def test_update_policy_target_group(self):
policy_target_group_id = _uuid()
update_data = {'policy_target_group':
self.get_update_policy_target_group_attrs()}
expected_value = {'tenant_id': _uuid(), 'id': policy_target_group_id}
self.instance.update_policy_target_group.return_value = expected_value
res = self.api.put(_get_path(POLICY_TARGET_GROUPS_URI,
id=policy_target_group_id, fmt=self.fmt),
self.serialize(update_data))
self.instance.update_policy_target_group.assert_called_once_with(
mock.ANY, policy_target_group_id, policy_target_group=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_target_group', res)
self.assertEqual(expected_value, res['policy_target_group'])
def test_delete_policy_target_group(self):
self._test_entity_delete('policy_target_group')
def _test_create_l2_policy(self, data, expected_value, default_data=None):
if not default_data:
default_data = data
self.instance.create_l2_policy.return_value = expected_value
res = self.api.post(_get_path(L2_POLICIES_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_l2_policy.assert_called_once_with(
mock.ANY, l2_policy=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('l2_policy', res)
self.assertEqual(expected_value, res['l2_policy'])
def test_create_l2_policy_with_defaults(self):
l2_policy_id = _uuid()
data = {'l2_policy': {'tenant_id': _uuid(), 'l3_policy_id': _uuid()}}
default_attrs = self.get_create_l2_policy_default_attrs()
default_data = copy.copy(data)
default_data['l2_policy'].update(default_attrs)
expected_value = dict(default_data['l2_policy'])
expected_value['id'] = l2_policy_id
self._test_create_l2_policy(data, expected_value, default_data)
def test_create_l2_policy(self):
l2_policy_id = _uuid()
data = {'l2_policy': self.get_create_l2_policy_attrs()}
expected_value = dict(data['l2_policy'])
expected_value['id'] = l2_policy_id
self._test_create_l2_policy(data, expected_value)
def test_list_l2_policies(self):
l2_policy_id = _uuid()
expected_value = [{'tenant_id': _uuid(), 'id': l2_policy_id}]
self.instance.get_l2_policies.return_value = expected_value
res = self.api.get(_get_path(L2_POLICIES_URI, fmt=self.fmt))
self.instance.get_l2_policies.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('l2_policies', res)
self.assertEqual(expected_value, res['l2_policies'])
def test_get_l2_policy(self):
l2_policy_id = _uuid()
expected_value = {'tenant_id': _uuid(), 'id': l2_policy_id}
self.instance.get_l2_policy.return_value = expected_value
res = self.api.get(_get_path(L2_POLICIES_URI, id=l2_policy_id,
fmt=self.fmt))
self.instance.get_l2_policy.assert_called_once_with(
mock.ANY, l2_policy_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('l2_policy', res)
self.assertEqual(expected_value, res['l2_policy'])
def test_update_l2_policy(self):
l2_policy_id = _uuid()
update_data = {'l2_policy': self.get_update_l2_policy_attrs()}
expected_value = {'tenant_id': _uuid(), 'id': l2_policy_id}
self.instance.update_l2_policy.return_value = expected_value
res = self.api.put(_get_path(L2_POLICIES_URI, id=l2_policy_id,
fmt=self.fmt),
self.serialize(update_data))
self.instance.update_l2_policy.assert_called_once_with(
mock.ANY, l2_policy_id, l2_policy=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('l2_policy', res)
self.assertEqual(expected_value, res['l2_policy'])
def test_delete_l2_policy(self):
self._test_entity_delete('l2_policy')
def _test_create_l3_policy(self, data, expected_value, default_data=None):
if not default_data:
default_data = data
self.instance.create_l3_policy.return_value = expected_value
res = self.api.post(_get_path(L3_POLICIES_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_l3_policy.assert_called_once_with(
mock.ANY, l3_policy=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('l3_policy', res)
self.assertEqual(res['l3_policy'], expected_value)
def test_create_l3_policy_with_defaults(self):
l3_policy_id = _uuid()
data = {'l3_policy': {'tenant_id': _uuid()}}
default_attrs = self.get_create_l3_policy_default_attrs()
default_data = copy.copy(data)
default_data['l3_policy'].update(default_attrs)
expected_value = dict(default_data['l3_policy'])
expected_value['id'] = l3_policy_id
self._test_create_l3_policy(data, expected_value, default_data)
def test_create_l3_policy(self):
l3_policy_id = _uuid()
data = {'l3_policy': self.get_create_l3_policy_attrs()}
expected_value = dict(data['l3_policy'])
expected_value.update({'id': l3_policy_id})
self._test_create_l3_policy(data, expected_value)
def test_list_l3_policies(self):
l3_policy_id = _uuid()
expected_value = [{'tenant_id': _uuid(), 'id': l3_policy_id}]
self.instance.get_l3_policies.return_value = expected_value
res = self.api.get(_get_path(L3_POLICIES_URI, fmt=self.fmt))
self.instance.get_l3_policies.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('l3_policies', res)
self.assertEqual(expected_value, res['l3_policies'])
def test_get_l3_policy(self):
l3_policy_id = _uuid()
expected_value = {'tenant_id': _uuid(), 'id': l3_policy_id}
self.instance.get_l3_policy.return_value = expected_value
res = self.api.get(_get_path(L3_POLICIES_URI, id=l3_policy_id,
fmt=self.fmt))
self.instance.get_l3_policy.assert_called_once_with(
mock.ANY, l3_policy_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('l3_policy', res)
self.assertEqual(expected_value, res['l3_policy'])
def test_update_l3_policy(self):
l3_policy_id = _uuid()
update_data = {'l3_policy': self.get_update_l3_policy_attrs()}
expected_value = {'tenant_id': _uuid(), 'id': l3_policy_id}
self.instance.update_l3_policy.return_value = expected_value
res = self.api.put(_get_path(L3_POLICIES_URI, id=l3_policy_id,
fmt=self.fmt),
self.serialize(update_data))
self.instance.update_l3_policy.assert_called_once_with(
mock.ANY, l3_policy_id, l3_policy=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('l3_policy', res)
self.assertEqual(expected_value, res['l3_policy'])
def test_delete_l3_policy(self):
self._test_entity_delete('l3_policy')
def _test_create_policy_action(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_policy_action.return_value = expected_value
res = self.api.post(_get_path(POLICY_ACTIONS_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_policy_action.assert_called_once_with(
mock.ANY, policy_action=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_action', res)
self.assertEqual(expected_value, res['policy_action'])
def test_create_policy_action_with_defaults(self):
policy_action_id = _uuid()
data = {'policy_action': {'tenant_id': _uuid()}}
default_attrs = self.get_create_policy_action_default_attrs()
default_data = copy.copy(data)
default_data['policy_action'].update(default_attrs)
expected_value = dict(default_data['policy_action'])
expected_value['id'] = policy_action_id
self._test_create_policy_action(data, expected_value, default_data)
def test_create_policy_action(self):
policy_action_id = _uuid()
data = {'policy_action': self.get_create_policy_action_attrs()}
expected_value = dict(data['policy_action'])
expected_value['id'] = policy_action_id
self._test_create_policy_action(data, expected_value)
def test_list_policy_actions(self):
policy_action_id = _uuid()
expected_value = [{'tenant_id': _uuid(),
'id': policy_action_id}]
instance = self.plugin.return_value
instance.get_policy_actions.return_value = expected_value
res = self.api.get(_get_path(POLICY_ACTIONS_URI, fmt=self.fmt))
instance.get_policy_actions.assert_called_once_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_get_policy_action(self):
policy_action_id = _uuid()
expected_value = {'tenant_id': _uuid(),
'id': policy_action_id}
instance = self.plugin.return_value
instance.get_policy_action.return_value = expected_value
res = self.api.get(_get_path(POLICY_ACTIONS_URI,
id=policy_action_id, fmt=self.fmt))
instance.get_policy_action.assert_called_once_with(mock.ANY,
policy_action_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('policy_action', res)
self.assertEqual(expected_value, res['policy_action'])
def test_update_policy_action(self):
policy_action_id = _uuid()
update_data = {'policy_action':
self.get_update_policy_action_attrs()}
expected_value = {'tenant_id': _uuid(),
'id': policy_action_id}
instance = self.plugin.return_value
instance.update_policy_action.return_value = expected_value
res = self.api.put(_get_path(POLICY_ACTIONS_URI,
id=policy_action_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_policy_action.assert_called_once_with(
mock.ANY, policy_action_id, policy_action=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('policy_action', res)
self.assertEqual(expected_value, res['policy_action'])
def test_delete_policy_action(self):
self._test_entity_delete('policy_action')
def _test_create_policy_classifier(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_policy_classifier.return_value = expected_value
res = self.api.post(_get_path(POLICY_CLASSIFIERS_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_policy_classifier.assert_called_once_with(
mock.ANY, policy_classifier=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_classifier', res)
self.assertEqual(expected_value, res['policy_classifier'])
def test_create_policy_classifier_with_defaults(self):
policy_classifier_id = _uuid()
data = {'policy_classifier': {'tenant_id': _uuid()}}
default_attrs = self.get_create_policy_classifier_default_attrs()
default_data = copy.copy(data)
default_data['policy_classifier'].update(default_attrs)
expected_value = dict(default_data['policy_classifier'])
expected_value['id'] = policy_classifier_id
self._test_create_policy_classifier(data, expected_value, default_data)
def test_create_policy_classifier(self):
policy_classifier_id = _uuid()
data = {'policy_classifier':
self.get_create_policy_classifier_attrs()}
expected_value = dict(data['policy_classifier'])
expected_value['id'] = policy_classifier_id
self._test_create_policy_classifier(data, expected_value)
def test_list_policy_classifiers(self):
policy_classifier_id = _uuid()
expected_value = [{'tenant_id': _uuid(),
'id': policy_classifier_id}]
instance = self.plugin.return_value
instance.get_policy_classifiers.return_value = expected_value
res = self.api.get(_get_path(POLICY_CLASSIFIERS_URI, fmt=self.fmt))
instance.get_policy_classifiers.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_get_policy_classifier(self):
policy_classifier_id = _uuid()
expected_value = {'tenant_id': _uuid(),
'id': policy_classifier_id}
instance = self.plugin.return_value
instance.get_policy_classifier.return_value = expected_value
res = self.api.get(_get_path(POLICY_CLASSIFIERS_URI,
id=policy_classifier_id, fmt=self.fmt))
instance.get_policy_classifier.assert_called_once_with(
mock.ANY, policy_classifier_id, fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('policy_classifier', res)
self.assertEqual(expected_value, res['policy_classifier'])
def test_update_policy_classifier(self):
policy_classifier_id = _uuid()
update_data = {'policy_classifier':
self.get_update_policy_classifier_attrs()}
expected_value = {'tenant_id': _uuid(),
'id': policy_classifier_id}
instance = self.plugin.return_value
instance.update_policy_classifier.return_value = expected_value
res = self.api.put(_get_path(POLICY_CLASSIFIERS_URI,
id=policy_classifier_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_policy_classifier.assert_called_once_with(
mock.ANY, policy_classifier_id, policy_classifier=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('policy_classifier', res)
self.assertEqual(expected_value, res['policy_classifier'])
def test_delete_policy_classifier(self):
self._test_entity_delete('policy_action')
def _test_create_policy_rule(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_policy_rule.return_value = expected_value
res = self.api.post(_get_path(POLICY_RULES_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_policy_rule.assert_called_once_with(
mock.ANY, policy_rule=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('policy_rule', res)
self.assertEqual(expected_value, res['policy_rule'])
def test_create_policy_rule_with_defaults(self):
policy_rule_id = _uuid()
data = {'policy_rule': {'tenant_id': _uuid(), 'policy_classifier_id':
_uuid()}}
default_attrs = self.get_create_policy_rule_default_attrs()
default_data = copy.copy(data)
default_data['policy_rule'].update(default_attrs)
expected_value = dict(default_data['policy_rule'])
expected_value['id'] = policy_rule_id
self._test_create_policy_rule(data, expected_value, default_data)
def test_create_policy_rule(self):
policy_rule_id = _uuid()
data = {'policy_rule':
self.get_create_policy_rule_attrs()}
expected_value = dict(data['policy_rule'])
expected_value['id'] = policy_rule_id
self._test_create_policy_rule(data, expected_value)
def test_list_policy_rules(self):
policy_rule_id = _uuid()
expected_value = [{'tenant_id': _uuid(),
'id': policy_rule_id}]
instance = self.plugin.return_value
instance.get_policy_rules.return_value = expected_value
res = self.api.get(_get_path(POLICY_RULES_URI, fmt=self.fmt))
instance.get_policy_rules.assert_called_once_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_get_policy_rule(self):
policy_rule_id = _uuid()
expected_value = {'tenant_id': _uuid(),
'id': policy_rule_id}
instance = self.plugin.return_value
instance.get_policy_rule.return_value = expected_value
res = self.api.get(_get_path(POLICY_RULES_URI,
id=policy_rule_id, fmt=self.fmt))
instance.get_policy_rule.assert_called_once_with(
mock.ANY, policy_rule_id, fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('policy_rule', res)
self.assertEqual(expected_value, res['policy_rule'])
def test_update_policy_rule(self):
policy_rule_id = _uuid()
update_data = {'policy_rule':
self.get_update_policy_rule_attrs()}
expected_value = {'tenant_id': _uuid(),
'id': policy_rule_id}
instance = self.plugin.return_value
instance.update_policy_rule.return_value = expected_value
res = self.api.put(_get_path(POLICY_RULES_URI,
id=policy_rule_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_policy_rule.assert_called_once_with(
mock.ANY, policy_rule_id, policy_rule=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('policy_rule', res)
self.assertEqual(expected_value, res['policy_rule'])
def test_delete_policy_rule(self):
self._test_entity_delete('policy_action')
def _test_create_policy_rule_set(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_policy_rule_set.return_value = expected_value
res = self.api.post(_get_path(POLICY_RULE_SETS_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_policy_rule_set.assert_called_once_with(
mock.ANY, policy_rule_set=default_data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('policy_rule_set', res)
self.assertEqual(expected_value, res['policy_rule_set'])
def test_create_policy_rule_set_with_defaults(self):
policy_rule_set_id = _uuid()
data = {'policy_rule_set': {'tenant_id': _uuid()}}
default_attrs = self.get_create_policy_rule_set_default_attrs()
default_data = copy.copy(data)
default_data['policy_rule_set'].update(default_attrs)
expected_value = dict(default_data['policy_rule_set'])
expected_value['id'] = policy_rule_set_id
self._test_create_policy_rule_set(data, expected_value, default_data)
def test_create_policy_rule_set(self):
policy_rule_set_id = _uuid()
data = {'policy_rule_set':
self.get_create_policy_rule_set_attrs()}
expected_value = dict(data['policy_rule_set'])
expected_value['id'] = policy_rule_set_id
self._test_create_policy_rule_set(data, expected_value)
def test_list_policy_rule_sets(self):
policy_rule_set_id = _uuid()
expected_value = [{'tenant_id': _uuid(),
'id': policy_rule_set_id}]
instance = self.plugin.return_value
instance.get_policy_rule_sets.return_value = expected_value
res = self.api.get(_get_path(POLICY_RULE_SETS_URI, fmt=self.fmt))
instance.get_policy_rule_sets.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_get_policy_rule_set(self):
policy_rule_set_id = _uuid()
expected_value = {'tenant_id': _uuid(),
'id': policy_rule_set_id}
instance = self.plugin.return_value
instance.get_policy_rule_set.return_value = expected_value
res = self.api.get(_get_path(POLICY_RULE_SETS_URI,
id=policy_rule_set_id, fmt=self.fmt))
instance.get_policy_rule_set.assert_called_once_with(
mock.ANY, policy_rule_set_id, fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('policy_rule_set', res)
self.assertEqual(expected_value, res['policy_rule_set'])
def test_update_policy_rule_set(self):
policy_rule_set_id = _uuid()
update_data = {'policy_rule_set':
self.get_update_policy_rule_set_attrs()}
expected_value = {'tenant_id': _uuid(),
'id': policy_rule_set_id}
instance = self.plugin.return_value
instance.update_policy_rule_set.return_value = expected_value
res = self.api.put(_get_path(POLICY_RULE_SETS_URI,
id=policy_rule_set_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_policy_rule_set.assert_called_once_with(
mock.ANY, policy_rule_set_id, policy_rule_set=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('policy_rule_set', res)
self.assertEqual(expected_value, res['policy_rule_set'])
def test_delete_policy_rule_set(self):
self._test_entity_delete('policy_rule_set')
def _test_create_network_service_policy(
self, data, expected_value, default_data=None):
if not default_data:
default_data = data
create_svc_policy = self.instance.create_network_service_policy
create_svc_policy.return_value = expected_value
res = self.api.post(_get_path(NET_SVC_POLICIES_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
create_svc_policy.assert_called_once_with(
mock.ANY, network_service_policy=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network_service_policy', res)
self.assertEqual(expected_value, res['network_service_policy'])
def test_create_network_service_policy_with_defaults(self):
network_service_policy_id = _uuid()
data = {'network_service_policy': {'tenant_id': _uuid()}}
default_attrs = self.get_create_network_service_policy_default_attrs()
default_data = copy.copy(data)
default_data['network_service_policy'].update(default_attrs)
expected_value = dict(default_data['network_service_policy'])
expected_value['id'] = network_service_policy_id
self._test_create_network_service_policy(
data, expected_value, default_data)
def test_create_network_service_policy(self):
network_service_policy_id = _uuid()
data = {'network_service_policy':
self.get_create_network_service_policy_attrs()}
expected_value = copy.deepcopy(data['network_service_policy'])
expected_value['id'] = network_service_policy_id
self._test_create_network_service_policy(data, expected_value)
def test_list_network_service_policies(self):
network_service_policy_id = _uuid()
expected_value = [{'tenant_id': _uuid(),
'id': network_service_policy_id}]
get_svc_policies = self.instance.get_network_service_policies
get_svc_policies.return_value = expected_value
res = self.api.get(_get_path(NET_SVC_POLICIES_URI, fmt=self.fmt))
get_svc_policies.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network_service_policies', res)
self.assertEqual(expected_value, res['network_service_policies'])
def test_get_network_service_policy(self):
network_service_policy_id = _uuid()
expected_value = {'tenant_id': _uuid(),
'id': network_service_policy_id}
self.instance.get_network_service_policy.return_value = expected_value
res = self.api.get(_get_path(NET_SVC_POLICIES_URI,
id=network_service_policy_id,
fmt=self.fmt))
self.instance.get_network_service_policy.assert_called_once_with(
mock.ANY, network_service_policy_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network_service_policy', res)
self.assertEqual(expected_value, res['network_service_policy'])
def test_update_network_service_policy(self):
network_service_policy_id = _uuid()
update_data = {'network_service_policy':
self.get_update_network_service_policy_attrs()}
expected_value = {'tenant_id': _uuid(),
'id': network_service_policy_id}
update_svc_policy = self.instance.update_network_service_policy
update_svc_policy.return_value = expected_value
res = self.api.put(_get_path(NET_SVC_POLICIES_URI,
id=network_service_policy_id,
fmt=self.fmt),
self.serialize(update_data))
update_svc_policy.assert_called_once_with(
mock.ANY, network_service_policy_id,
network_service_policy=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network_service_policy', res)
self.assertEqual(expected_value, res['network_service_policy'])
def test_delete_network_service_policy(self):
self._test_entity_delete('network_service_policy')
def _test_entity_create(self, entity, data, expected_value,
default_data=None, non_specified=None):
default_data = default_data or data
create_method = getattr(self.instance, 'create_%s' % entity)
create_method.return_value = expected_value
res = self.api.post(_get_path(RES_TO_URI[entity], fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
default_data[entity].update(non_specified or {})
kwargs = {entity: default_data}
create_method.assert_called_once_with(
mock.ANY, **kwargs)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn(entity, res)
self.assertEqual(expected_value, res[entity])
def _test_create_entity_with_defaults(self, entity, default_attrs,
non_specified=None):
entity_id = _uuid()
data = {entity: {'tenant_id': _uuid()}}
default_data = copy.copy(data)
default_data[entity].update(default_attrs)
expected_value = copy.deepcopy(default_data[entity])
expected_value['id'] = entity_id
self._test_entity_create(entity, data, expected_value, default_data,
non_specified)
def _test_create_entity_with_attrs(self, entity, attrs):
entity_id = _uuid()
data = {entity: attrs}
expected_value = copy.deepcopy(data[entity])
expected_value['id'] = entity_id
self._test_entity_create(entity, data, expected_value)
def _test_get_entity(self, entity, list=False):
entity_id = _uuid()
value = {'tenant_id': _uuid(), 'id': entity_id}
expected_value = value if not list else [value]
resource = entity if not list else self._plural_mappings.get(
entity, entity + 's')
list_method = getattr(self.instance, 'get_%s' % resource)
list_method.return_value = expected_value
kwargs = {'fmt': self.fmt}
if not list:
kwargs['id'] = entity_id
res = self.api.get(_get_path(RES_TO_URI[entity], **kwargs))
if list:
list_method.assert_called_once_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
else:
list_method.assert_called_once_with(mock.ANY, entity_id,
fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn(resource, res)
self.assertEqual(expected_value, res[resource])
def _test_update_entity(self, entity, attrs):
entity_id = _uuid()
update_data = {entity: attrs}
expected_value = {'tenant_id': _uuid(), 'id': entity_id}
update_method = getattr(self.instance, 'update_%s' % entity)
update_method.return_value = expected_value
res = self.api.put(_get_path(RES_TO_URI[entity], id=entity_id,
fmt=self.fmt),
self.serialize(update_data))
kwargs = {entity: update_data}
update_method.assert_called_once_with(mock.ANY, entity_id, **kwargs)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn(entity, res)
self.assertEqual(expected_value, res[entity])
def test_create_external_policy_with_defaults(self):
default_attrs = self.get_create_external_policy_default_attrs()
self._test_create_entity_with_defaults('external_policy',
default_attrs)
def test_create_external_policy(self):
attrs = self.get_create_external_policy_attrs()
self._test_create_entity_with_attrs('external_policy', attrs)
def test_list_external_policies(self):
self._test_get_entity('external_policy', list=True)
def test_get_external_policy(self):
self._test_get_entity('external_policy')
def test_update_external_policy(self):
update_data = self.get_update_external_policy_attrs()
self._test_update_entity('external_policy', update_data)
def test_delete_external_policy_(self):
self._test_entity_delete('external_policy')
def test_create_external_segment_with_defaults(self):
default_attrs = (
self.get_create_external_segment_default_attrs())
self._test_create_entity_with_defaults('external_segment',
default_attrs)
def test_create_external_segment(self):
attrs = self.get_create_external_segment_attrs()
self._test_create_entity_with_attrs('external_segment', attrs)
def test_list_external_segments(self):
self._test_get_entity('external_segment', list=True)
def test_get_external_segment(self):
self._test_get_entity('external_segment')
def test_update_external_segment(self):
update_data = self.get_update_external_segment_attrs()
self._test_update_entity('external_segment', update_data)
def test_delete_external_segment_(self):
self._test_entity_delete('external_segment')
def test_create_nat_pool_with_defaults(self):
default_attrs = (
self.get_create_nat_pool_default_attrs())
self._test_create_entity_with_defaults('nat_pool',
default_attrs)
def test_create_nat_pool(self):
attrs = self.get_create_nat_pool_attrs()
self._test_create_entity_with_attrs('nat_pool', attrs)
def test_list_nat_pools(self):
self._test_get_entity('nat_pool', list=True)
def test_get_nat_pool(self):
self._test_get_entity('nat_pool')
def test_update_nat_pool(self):
update_data = self.get_update_nat_pool_attrs()
self._test_update_entity('nat_pool', update_data)
def test_delete_nat_pool_(self):
self._test_entity_delete('nat_pool')
class TestGroupPolicyAttributeConverters(base.BaseTestCase):
def test_convert_action_to_case_insensitive(self):
self.assertEqual(
gp.convert_action_to_case_insensitive('ALLOW'), 'allow')
self.assertEqual(gp.convert_action_to_case_insensitive('In'), 'in')
self.assertEqual(gp.convert_action_to_case_insensitive('bi'), 'bi')
self.assertEqual(gp.convert_action_to_case_insensitive(''), '')
def test_convert_port_to_string(self):
self.assertEqual(gp.convert_port_to_string(100), '100')
self.assertEqual(gp.convert_port_to_string('200'), '200')
self.assertEqual(gp.convert_port_to_string(''), '')
def test_convert_protocol_check_valid_protocols(self):
self.assertEqual(gp.convert_protocol('tcp'), n_consts.PROTO_NAME_TCP)
self.assertEqual(gp.convert_protocol('TCP'), n_consts.PROTO_NAME_TCP)
self.assertEqual(gp.convert_protocol('udp'), n_consts.PROTO_NAME_UDP)
self.assertEqual(gp.convert_protocol('UDP'), n_consts.PROTO_NAME_UDP)
self.assertEqual(gp.convert_protocol('icmp'),
n_consts.PROTO_NAME_ICMP)
self.assertEqual(gp.convert_protocol('ICMP'),
n_consts.PROTO_NAME_ICMP)
def test_convert_protocol_check_invalid_protocols(self):
self.assertRaises(gp.GroupPolicyInvalidProtocol,
gp.convert_protocol, 'garbage')
def test_convert_numeric_protocol(self):
self.assertIsInstance(gp.convert_protocol('2'), str)
def test_convert_bad_protocol(self):
for val in ['bad', '256', '-1']:
self.assertRaises(
gp.GroupPolicyInvalidProtocol, gp.convert_protocol, val)
class TestGroupPolicyAttributeValidators(base.BaseTestCase):
def test_validate_port_range(self):
self.assertIsNone(gp._validate_gbp_port_range(None))
self.assertIsNone(gp._validate_gbp_port_range('10'))
self.assertIsNone(gp._validate_gbp_port_range(10))
self.assertEqual(gp._validate_gbp_port_range(-1),
"Invalid port '-1', valid range 0 < port < 65536")
self.assertEqual(gp._validate_gbp_port_range('66000'),
"Invalid port '66000', valid range 0 < port < 65536")
self.assertIsNone(gp._validate_gbp_port_range('10:20'))
self.assertIsNone(gp._validate_gbp_port_range('1:65535'))
self.assertEqual(gp._validate_gbp_port_range('0:65535'),
"Invalid port '0', valid range 0 < port < 65536")
self.assertEqual(gp._validate_gbp_port_range('1:65536'),
"Invalid port '65536', valid range 0 < port < 65536")
msg = gp._validate_gbp_port_range('abc:efg')
self.assertEqual(msg, "Port value 'abc' is not a valid number")
msg = gp._validate_gbp_port_range('1:efg')
self.assertEqual(msg, "Port value 'efg' is not a valid number")
msg = gp._validate_gbp_port_range('-1:10')
self.assertEqual(msg,
"Invalid port '-1', valid range 0 < port < 65536")
msg = gp._validate_gbp_port_range('66000:10')
self.assertEqual(msg,
"Invalid port '66000', valid range 0 < port < 65536")
msg = gp._validate_gbp_port_range('10:66000')
self.assertEqual(msg,
"Invalid port '66000', valid range 0 < port < 65536")
msg = gp._validate_gbp_port_range('1:-10')
self.assertEqual(msg,
"Invalid port '-10', valid range 0 < port < 65536")
msg = gp._validate_gbp_port_range('1:2:3')
self.assertEqual(msg, "Port value '2:3' is not a valid number")
msg = gp._validate_gbp_port_range('3:2')
self.assertEqual(
msg, "Invalid port range: 3:2, valid range 0 < port1 < port2")
msg = gp._validate_gbp_port_range('2:2')
self.assertEqual(
msg, "Invalid port range: 2:2, valid range 0 < port1 < port2")
def test_validate_network_service_params(self):
test_params = [{'type': 'ip_single', 'name': 'vip_internal',
'value': 'self_subnet'}]
self.assertIsNone(gp._validate_network_svc_params(test_params))
test_params = [{'type': 'ip_pool', 'name': 'vip_internal',
'value': 'nat_pool'},
{'type': 'string', 'name': 'abc', 'value': 'xyz'}]
self.assertIsNone(gp._validate_network_svc_params(test_params))
test_params = [{'type': 'ip_single', 'name': 'vip_external',
'value': 'nat_pool'},
{'type': 'string', 'name': 'abc', 'value': 'xyz'}]
self.assertIsNone(gp._validate_network_svc_params(test_params))
def test_validate_network_service_params_not_a_listt(self):
test_params = 'ip'
msg = gp._validate_network_svc_params(test_params)
self.assertEqual(msg, "'ip' is not a list")
def test_validate_network_service_params_element_not_a_dict(self):
test_params = ['ip']
msg = gp._validate_network_svc_params(test_params)
self.assertEqual(msg, "'ip' is not a dictionary")
def test_validate_network_service_params_bad_type(self):
test_params = [{'type': 'ip_', 'name': 'vip', 'value': 'self_subnet'}]
msg = gp._validate_network_svc_params(test_params)
self.assertEqual(
msg, "Network service param type(s) 'ip_' not supported")
def test_validate_network_service_params_bad_key(self):
test_params = [{'type': 'ip_pool', 'n': 'vip', 'value': 'self_subnet'}]
msg = gp._validate_network_svc_params(test_params)
self.assertEqual(
msg, "Unknown key(s) 'n' in network service params")
def test_validate_network_service_params_bad_value(self):
test_params = [{'type': 'ip_pool', 'name': 'vip', 'value': 'subnet'}]
msg = gp._validate_network_svc_params(test_params)
self.assertEqual(
msg, "Network service param value 'subnet' is not supported")
def test_validate_external_dict(self):
self.assertIsNone(gp._validate_external_dict(None))
uuid = uuidutils.generate_uuid()
uuid_2 = uuidutils.generate_uuid()
correct = [{uuid: []}, {}, {uuid: ['192.168.1.1']},
{uuid_2: ['192.168.0.1'], uuid: []}]
for x in correct:
self.assertIsNone(gp._validate_external_dict(x))
incorrect = 'not_a_dict'
self.assertEqual(gp._validate_external_dict(incorrect),
"'%s' is not a dictionary" % incorrect)
not_a_uuid = 'not_a_uuid'
incorrect = {'not_a_uuid': []}
self.assertEqual(gp._validate_external_dict(incorrect),
"'%s' is not a valid UUID" % not_a_uuid)
not_a_list = 'not_a_list'
incorrect = {uuid: not_a_list}
self.assertEqual(gp._validate_external_dict(incorrect),
"'%s' is not a list" % not_a_list)
|
|
#!/usr/bin/env python2
import argparse
import collections
import os
import pyximport;
import sys
pyximport.install()
from model import BPGraph, CType
import file_ops
import numpy as np
from operator import mul
import itertools
from decimal import Decimal
import random
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
import matplotlib.pyplot as plt
def expected_dcj_distance(genome1, genome2, n=0):
a = len(genome1.adjacency_set())
a2 = len(genome2.adjacency_set())
BP = a - len(genome1.common_adjacencies(genome2))
g = len(genome1.gene_set())
# n = np.math.sqrt(g)
n = g
if BP == n:
BP = n-1
# import ipdb;ipdb.set_trace()
return np.math.log(1.0 - (BP * (2.0*n - 1)) / (a * (2.0*n - 2))) / np.math.log(1 - (1.0 / (n - 1)) - 1.0 / n)
def probability(n, cycle_dist, st):
return Decimal(n_scenarios(cycle_dist, st)) / (n * (n - 1)) ** (st)
def cycle_splits(size):
# total: s(s-1)/2
# s of (1,s-1)
# s of (2,s-2)
# s of (3,s-3)
# ...
# and s/2 of (s/2,s/2) if s even
for i in range(1, (size - 1) / 2 + 1): # python 2: size/2 is rounded down, that is like a floor;
yield size, (i, size - i)
if size % 2 == 0:
yield size / 2, (size / 2, size / 2)
def memoize(f):
cache = {}
return lambda *args: cache[args] if args in cache else cache.update({args: f(*args)}) or cache[args]
@memoize
def n_scenarios(cycle_dist, steps):
n = sum(cycle_dist)
c = len(cycle_dist)
dist = n - c
if steps < dist:
return 0
# d+1 I know:
elif steps == dist + 1:
l = [(cycle - 1) for cycle in cycle_dist if cycle > 1]
m = reduce(mul, [(l_i + 1) ** (l_i - 1) for l_i in l], 1)
s = 0
for l_p in l:
f = np.math.factorial(l_p)
s1 = sum([f*((l_p+1)**i)/np.math.factorial(i) for i in range(l_p)])
s1 *= m
s1 /= (l_p+1)**(l_p-1)
s += s1
p1 = np.math.factorial(dist + 1)*s/2
p1 /= reduce(mul, [np.math.factorial(l_i) for l_i in l], 1)
return p1
# d is simple:
elif steps == dist:
l = [(cycle - 1) for cycle in cycle_dist if cycle > 1]
p1 = np.math.factorial(dist) / reduce(mul, [np.math.factorial(l_i) for l_i in l], 1)
p2 = reduce(mul, [(l_i + 1) ** (l_i - 1) for l_i in l], 1)
return p1 * p2
else: # more steps than distance; recursive:
# generate all possible cycle distributions from the current one:
cycle_dist_count = collections.defaultdict(lambda: 0)
cycle_dist_l = list(cycle_dist)
# find all cycle splits:
for idx, size_i in enumerate(cycle_dist_l):
for qty, (size_1, size_2) in cycle_splits(size_i):
new_dist = tuple(sorted(cycle_dist_l[:idx] + [size_1, size_2] + cycle_dist_l[(idx + 1):]))
cycle_dist_count[new_dist] += qty
# cycle freezes:
# freezes: C(s_i,2) for each cycle;
n_freezes = sum([l_i * (l_i - 1) / 2 for l_i in cycle_dist])
cycle_dist_count[cycle_dist] += n_freezes
# cycle merges:
# s_i x s_j of (s_i+s_j) for each pair
for i, j in itertools.combinations(range(len(cycle_dist)), 2):
l_i, l_j = cycle_dist[i], cycle_dist[j]
new_dist = tuple(sorted(cycle_dist_l[:i] + cycle_dist_l[(i + 1):j] + [l_i + l_j] + cycle_dist_l[(j + 1):]))
cycle_dist_count[new_dist] += 2 * l_i * l_j
# print cycle_dist_count
return sum(
[count_i * n_scenarios(cycle_dist_i, steps - 1) for cycle_dist_i, count_i in cycle_dist_count.iteritems()])
def random_walk(g1, g2, steps, n_walks=100000):
adj_2 = sorted(g2.adjacency_set())
hit = 0
for i in range(n_walks):
adj_1 = [[a, b] for a, b in g1.adjacency_set()]
for j in range(steps):
p, q = random.sample(range(len(adj_1)), 2)
if p < q:
adj_1[p][0], adj_1[q][0] = adj_1[q][0], adj_1[p][0]
else:
adj_1[p][0], adj_1[q][1] = adj_1[q][1], adj_1[p][0]
adj_1 = sorted([tuple(sorted((a, b))) for a, b in adj_1])
if adj_1 == adj_2:
hit += 1
print "hits: %e" % (float(hit) / n_walks)
# def sort_cycle_with_one_freeze(n):
# return sum([reduce(mul, [x for x in range(k+1, n+1)]) * (n ** k) for k in range(n - 1)])/2
def sort_cycle_with_one_freeze(n):
f = np.math.factorial(n)
return sum([f * n ** k / np.math.factorial(k) for k in range(n - 1)])/2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Finds the ML estimate for the DCJ distance between 2 genomes.")
parser.add_argument("-g", type=str, nargs='+', help="Genomes file(s)")
parser.add_argument("-i", type=int, nargs=2, default=[0, 1], help="Idx of the genomes")
param = parser.parse_args()
# n = 100
# print n_scenarios((n,), n)
# print sort_cycle_with_one_freeze(n)
# print sort_cycle_with_one_freeze2(n)
# print ",".join(map(str, [(i,n_scenarios((i,), i)) for i in range(30, 31)]))
# print ",".join(map(str, [ (n ,sort_cycle_with_one_freeze(n)) for n in range(30, 31)]))
# sys.exit()
n1, n2 = param.i
for filename in param.g:
genomes = file_ops.open_genome_file(filename, as_list=True)
g1 = genomes[int(n1)]
g2 = genomes[int(n2)]
bp = BPGraph(g1, g2)
n = len(bp.common_AB)
c = len(bp.type_dict[CType.CYCLE])
cycle_distribution = tuple(sorted([len(x) / 2 for x in bp.type_dict[CType.CYCLE]]))
# cycle_distribution = tuple([len(x) / 2 for x in bp.type_dict[CType.CYCLE]])
d = n - c
x = []
y = []
last_step = 0
down = 0
max_p = 0
max_k = 0
# DCJ estimate:
est_DCJ = expected_dcj_distance(g1,g2)
print "Distance:%d" % d,
print " Estimate: %.1f" % est_DCJ
# if there is no common adjacency, estimate goes to infinity, also in the DCJ estimate;
if all([c > 1 for c in cycle_distribution]):
# cheat and build a new one, by randomly picking an element and then removing a cycle from it;
cycle_distribution = list(cycle_distribution)
random.shuffle(cycle_distribution)
cycle = cycle_distribution.pop()
cycle_distribution = tuple(sorted([1, cycle - 1] + cycle_distribution))
for i in range(3*n):
prob = probability(n, cycle_distribution, d + i)
print >> sys.stderr, "Steps:%d P:%e" % (d + i, prob)
x.append(d + i)
y.append(prob)
if prob < last_step:
down += 1
if down == 2:
break
else:
down = 0
if max_p < prob:
max_p = prob
max_k = i + d
last_step = prob
plt.plot(x, y, 'o-')
plt.savefig(os.path.join(os.path.dirname(filename), 'ml.pdf'), bbox_inches='tight')
print "Max:", max_k
# save results:
with open(filename+".ml", "w") as f:
print >> f, "DCJ\tML\tEDCJ"
print >> f, "%d\t%d\t%.1f" % (d, max_k, est_DCJ)
|
|
import sublime
from collections import defaultdict
from collections import namedtuple
import json
vi_user_setting = namedtuple('vi_editor_setting', 'scope values default parser action negatable')
WINDOW_SETTINGS = [
'last_buffer_search',
'_cmdline_cd',
]
SCOPE_WINDOW = 1
SCOPE_VIEW = 2
SCOPE_VI_VIEW = 3
SCOPE_VI_WINDOW = 4
def volatile(f):
VintageSettings._volatile_settings.append(f.__name__)
return f
def destroy(view):
try:
del VintageSettings._volatile[view.id()]
except KeyError:
pass
def set_generic_view_setting(view, name, value, opt, globally=False):
if opt.scope == SCOPE_VI_VIEW:
name = 'vintageous_' + name
if not opt.parser:
if not globally or (opt.scope not in (SCOPE_VI_VIEW, SCOPE_VI_WINDOW)):
view.settings().set(name, value)
else:
prefs = sublime.load_settings('Preferences.sublime-settings')
prefs.set(name, value)
sublime.save_settings('Preferences.sublime-settings')
return
else:
if not globally or (opt.scope not in (SCOPE_VI_VIEW, SCOPE_VI_WINDOW)):
view.settings().set(name, opt.parser(value))
else:
#name = 'vintageous_' + name
prefs = sublime.load_settings('Preferences.sublime-settings')
prefs.set(name, opt.parser(value))
sublime.save_settings('Preferences.sublime-settings')
return
raise ValueError("Vintageous: bad option value")
def set_minimap(view, name, value, opt, globally=False):
# TODO: Ensure the minimap gets hidden when so desired.
view.window().run_command('toggle_minimap')
def set_sidebar(view, name, value, opt, globally=False):
# TODO: Ensure the minimap gets hidden when so desired.
view.window().run_command('toggle_side_bar')
def opt_bool_parser(value):
if value.lower() in ('false', 'true', '0', '1', 'yes', 'no'):
if value.lower() in ('true', '1', 'yes'):
return True
return False
def opt_rulers_parser(value):
try:
converted = json.loads(value)
if isinstance(converted, list):
return converted
else:
raise ValueError
except ValueError:
raise
except TypeError:
raise ValueError
VI_OPTIONS = {
'autoindent': vi_user_setting(scope=SCOPE_VI_VIEW, values=(True, False, '0', '1'), default=True, parser=None, action=set_generic_view_setting, negatable=False),
'hlsearch': vi_user_setting(scope=SCOPE_VI_VIEW, values=(True, False, '0', '1'), default=True, parser=opt_bool_parser, action=set_generic_view_setting, negatable=True),
'ignorecase': vi_user_setting(scope=SCOPE_VI_VIEW, values=(True, False, '0', '1'), default=False, parser=opt_bool_parser, action=set_generic_view_setting, negatable=True),
'incsearch': vi_user_setting(scope=SCOPE_VI_VIEW, values=(True, False, '0', '1'), default=True, parser=opt_bool_parser, action=set_generic_view_setting, negatable=True),
'magic': vi_user_setting(scope=SCOPE_VI_VIEW, values=(True, False, '0', '1'), default=True, parser=opt_bool_parser, action=set_generic_view_setting, negatable=True),
'visualbell': vi_user_setting(scope=SCOPE_VI_WINDOW, values=(True, False, '0', '1'), default=True, parser=opt_bool_parser, action=set_generic_view_setting, negatable=True),
'rulers': vi_user_setting(scope=SCOPE_VIEW, values=None, default=[], parser=opt_rulers_parser, action=set_generic_view_setting, negatable=False),
'showminimap': vi_user_setting(scope=SCOPE_WINDOW, values=(True, False, '0', '1'), default=True, parser=None, action=set_minimap, negatable=True),
'showsidebar': vi_user_setting(scope=SCOPE_WINDOW, values=(True, False, '0', '1'), default=True, parser=None, action=set_sidebar, negatable=True),
}
VI_OPTION_SHORTCUTS = {
'ic': 'ignorecase',
'noic': 'noignorecase'
}
# For completions.
def iter_settings(prefix=''):
if prefix.startswith('no'):
for item in (x for (x, y) in VI_OPTIONS.items() if y.negatable):
if ('no' + item).startswith(prefix):
yield 'no' + item
else:
for k in sorted(VI_OPTIONS.keys()):
if (prefix == '') or k.startswith(prefix):
yield k
def set_local(view, name, value):
try:
opt = VI_OPTIONS[name]
if not value and opt.negatable:
opt.action(view, name, '1', opt)
return
opt.action(view, name, value, opt)
except KeyError as e:
if name.startswith('no'):
try:
opt = VI_OPTIONS[name[2:]]
if opt.negatable:
opt.action(view, name[2:], '0', opt)
return
except KeyError:
pass
raise
def set_global(view, name, value):
try:
opt = VI_OPTIONS[name]
if not value and opt.negatable:
opt.action(view, name, '1', opt, globally=True)
return
opt.action(view, name, value, opt, globally=True)
except KeyError as e:
if name.startswith('no'):
try:
opt = VI_OPTIONS[name[2:]]
if opt.negatable:
opt.action(view, name[2:], '0', opt, globally=True)
return
except KeyError:
pass
raise
def get_option(view, name):
# TODO: Should probably return global, local values.
try:
option_data = VI_OPTIONS[name]
except KeyError:
raise KeyError('not a vi editor option')
if option_data.scope == SCOPE_WINDOW:
value = view.window().settings().get('vintageous_' + name)
else:
value = view.settings().get('vintageous_' + name)
return value if (value in option_data.values) else option_data.default
class SublimeSettings(object):
""" Helper class for accessing settings values from views """
def __init__(self, view=None):
self.view = view
def __get__(self, instance, owner):
if instance is not None:
return SublimeSettings(instance.v)
return SublimeSettings()
def __getitem__(self, key):
return self.view.settings().get(key)
def __setitem__(self, key, value):
self.view.settings().set(key, value)
class VintageSettings(object):
"""
Helper class for accessing settings related to Vintage.
Vintage settings data can be stored in:
a) the view.Settings object
b) the window.Settings object
c) VintageSettings._volatile
This class knows where to store the settings' data it's passed.
It is meant to be used as a descriptor.
"""
_volatile_settings = []
# Stores volatile settings indexed by view.id().
_volatile = defaultdict(dict)
def __init__(self, view=None):
self.view = view
if view is not None and not isinstance(self.view.settings().get('vintage'), dict):
self.view.settings().set('vintage', dict())
if view is not None and view.window() is not None and not isinstance(self.view.window().settings().get('vintage'), dict):
self.view.window().settings().set('vintage', dict())
def __get__(self, instance, owner):
# This method is called when this class is accessed as a data member.
if instance is not None:
return VintageSettings(instance.v)
return VintageSettings()
def __getitem__(self, key):
# Deal with editor options first.
try:
return get_option(self.view, key)
except KeyError:
pass
# Deal with state settings.
try:
if key not in WINDOW_SETTINGS:
try:
return self._get_volatile(key)
except KeyError:
value = self._get_vintageous_view_setting(key)
else:
value = self._get_vintageous_window_setting(key)
except (KeyError, AttributeError):
value = None
return value
def __setitem__(self, key, value):
if key not in WINDOW_SETTINGS:
if key in VintageSettings._volatile_settings:
self._set_volatile(key, value)
return
setts, target = self.view.settings().get('vintage'), self.view
else:
setts, target = self.view.window().settings().get('vintage'), self.view.window()
setts[key] = value
target.settings().set('vintage', setts)
def _get_vintageous_view_setting(self, key):
return self.view.settings().get('vintage').get(key)
def _get_vintageous_window_setting(self, key):
return self.view.window().settings().get('vintage').get(key)
def _get_volatile(self, key):
try:
return VintageSettings._volatile[self.view.id()][key]
except KeyError:
raise KeyError('error accessing volatile key: %s' % key)
def _set_volatile(self, key, value):
try:
VintageSettings._volatile[self.view.id()][key] = value
except KeyError:
raise KeyError('error while setting key "%s" to value "%s"' % (key, value))
class SublimeWindowSettings(object):
""" Helper class for accessing settings values from views """
def __init__(self, view=None):
self.view = view
def __get__(self, instance, owner):
if instance is not None:
return SublimeSettings(instance.v.window())
return SublimeSettings()
def __getitem__(self, key):
return self.view.window().settings().get(key)
def __setitem__(self, key, value):
self.view.window().settings().set(key, value)
# TODO: Make this a descriptor; avoid instantiation.
class SettingsManager(object):
view = SublimeSettings()
vi = VintageSettings()
window = SublimeWindowSettings()
def __init__(self, view):
self.v = view
|
|
import glob
import importlib
import os
import sys
import typing
import cauldron
from cauldron import environ
from cauldron.environ import Response
from cauldron.runner import source
from cauldron.session.projects import Project
from cauldron.session.projects import ProjectStep
def add_library_path(path: str) -> bool:
"""
Adds the path to the Python system path if not already added and the path
exists.
:param path:
The path to add to the system paths
:return:
Whether or not the path was added. Only returns False if the path was
not added because it doesn't exist
"""
if not path or not os.path.exists(path):
return False
if path not in sys.path:
sys.path.append(path)
return True
def remove_library_path(path: str) -> bool:
"""
Removes the path from the Python system path if it is found in the system
paths.
:param path:
The path to remove from the system paths
:return:
Whether or not the path was removed.
"""
if path in sys.path:
sys.path.remove(path)
return True
return False
def initialize(project: typing.Union[str, Project]):
"""
:param project:
:return:
"""
if isinstance(project, str):
project = Project(source_directory=project)
# When opening a project, if there are any steps in the project, the
# first step should be selected by default.
has_selected_step = any([s.is_selected for s in project.steps])
if not has_selected_step and project.steps:
project.steps[0].is_selected = True
cauldron.project.load(project)
return project
def close():
"""..."""
os.chdir(environ.configs.fetch('directory', os.path.expanduser('~')))
project = cauldron.project.internal_project
if not project:
return False
[remove_library_path(path) for path in project.library_directories]
remove_library_path(project.source_directory)
cauldron.project.unload()
return True
def _reload_module(path: str, library_directory: str):
"""
Reloads the module at the specified path within the package rooted at
the given library_directory.
"""
path = os.path.dirname(path) if path.endswith('__init__.py') else path
start_index = len(library_directory) + 1
end_index = -3 if path.endswith('.py') else None
package_path = path[start_index:end_index]
module = sys.modules.get(package_path.replace(os.sep, '.'))
return importlib.reload(module) if module is not None else None
def _reload_library(directory: str) -> list:
"""
Carries out a reload action on the specified root library directory that is
assumed to contain a python local package with potential module changes.
:param directory:
Root directory of the library package to reload.
"""
if not add_library_path(directory):
# If the library wasn't added because it doesn't exist, remove it
# in case the directory has recently been deleted and then return
# an empty result
remove_library_path(directory)
return []
glob_path = os.path.join(os.path.realpath(directory), '**', '*.py')
# Force file paths to be sorted by hierarchy from deepest to shallowest,
# which ensures that changes are reloaded by children before any dependencies
# are encountered in parents.
found_file_paths = sorted(
glob.glob(glob_path, recursive=True),
key=lambda p: "{}--{}".format(str(p.count(os.sep)).zfill(4), p),
reverse=True,
)
# Iterate over imports multiple times in case there's a failed import as the
# result of dependency changes between multiple files. However, after 20
# iterations give up and fail.
outputs = []
last_error = None
for i in range(20):
for path in [*found_file_paths]:
try:
outputs.append(_reload_module(path, directory))
# Remove the path if the reload operation succeeded.
found_file_paths.remove(path)
except Exception as error:
# Ignore failures and hope they can be resolved in another pass.
last_error = error
if not found_file_paths:
# If there's nothing left to reload, return the reloaded modules.
return outputs
# If 20 attempts to reload modules fail, it's time to error out.
raise RuntimeError(
"Failed to reload modified modules. This could be due to a circular import."
) from last_error
def reload_libraries(library_directories: list = None) -> list:
"""
Reload the libraries stored in the project's local and shared library
directories to ensure that any modifications since the previous load/reload
have been refreshed.
"""
directories = library_directories or []
project = cauldron.project.get_internal_project()
if project:
directories += project.library_directories
if not directories:
return []
return [
reloaded_module
for directory in directories
for reloaded_module in _reload_library(directory)
if reloaded_module is not None
]
def section(
response: Response,
project: typing.Union[Project, None],
starting: ProjectStep = None,
limit: int = 1,
force: bool = False,
skips: typing.List[ProjectStep] = None
) -> list:
"""
:param response:
:param project:
:param starting:
:param limit:
:param force:
:param skips:
Steps that should be skipped while running this section
:return:
"""
limit = max(1, limit)
if project is None:
project = cauldron.project.get_internal_project()
starting_index = 0
if starting:
starting_index = project.steps.index(starting)
count = 0
steps_run = []
for ps in project.steps:
if count >= limit:
break
if ps.index < starting_index:
continue
if skips and ps in skips:
continue
if not force and count == 0 and not ps.is_dirty():
continue
steps_run.append(ps)
if not source.run_step(response, project, ps, force=force):
return steps_run
count += 1
return steps_run
def complete(
response: Response,
project: typing.Union[Project, None],
starting: ProjectStep = None,
force: bool = False,
limit: int = -1
) -> list:
"""
Runs the entire project, writes the results files, and returns the URL to
the report file
:param response:
:param project:
:param starting:
:param force:
:param limit:
:return:
Local URL to the report path
"""
if project is None:
project = cauldron.project.get_internal_project()
starting_index = 0
if starting:
starting_index = project.steps.index(starting)
count = 0
steps_run = []
for ps in project.steps:
if 0 < limit <= count:
break
if ps.index < starting_index:
continue
if not force and not ps.is_dirty():
if limit < 1:
environ.log(
'[{}]: Nothing to update'.format(ps.definition.name)
)
continue
count += 1
steps_run.append(ps)
success = source.run_step(response, project, ps, force=True)
if not success or project.stop_condition.halt:
return steps_run
return steps_run
|
|
import copy
from itertools import groupby, starmap, zip_longest
from .content import Content
from .printing import Printing
from .color import Color
from .multipart import CardClass
from ._util import wrapLines, cheap_repr, for_json
sep = ' // '
fields = {
"name": 'Name:',
"cost": 'Cost:',
"cmc": 'CMC:',
"color_indicator": 'Color:',
"supertypes": 'Super:',
"types": 'Types:',
"subtypes": 'Sub:',
"type": 'Type:',
"text": 'Text:',
"power": 'Power:',
"toughness": 'Tough:',
"loyalty": 'Loyalty:',
"hand": 'Hand:',
"life": 'Life:',
"PT": 'P/T:',
"HandLife": 'H/L:',
#"printings": 'Printings:',
}
def scalarField(field):
def getter(self):
fields = [getattr(c, field) for c in self.content]
if all(f is None for f in fields):
return None
else:
#return tuple(fields)
#return tuple(fields) if len(fields) > 1 else fields[0]
return sep.join(str(f) or '' for f in fields)
return property(getter)
class Card:
def __init__(self, cardClass, content, printings=None, rulings=None):
self.cardClass = cardClass
self.content = list(content) # list of envec.content objects
self.printings = list(printings or []) # list of envec.printing objects
self.rulings = list(rulings or [])
# list of dicts with the following fields:
# - date
# - ruling
# - subcard - 0 or 1 (optional)
# TODO: Create a `namedtuple` Rulings class?
@classmethod
def newCard(cls, **attrs):
content = {}
for field in ("name", "cost", "text", "power", "toughness", "loyalty",
"hand", "life", "color_indicator", "supertypes", "types",
"subtypes"):
if field in attrs:
content[field] = attrs[field]
attrs2 = attrs.copy()
attrs2["content"] = [content]
return cls.fromDict(attrs2)
@classmethod
def fromDict(cls, obj):
if isinstance(obj, cls):
return copy.deepcopy(obj)
### TODO: Move all of these transformations to __init__?
cardClass = CardClass[obj.get("cardClass", "normal")]
content = obj["content"]
if isinstance(content, (list, tuple)):
if not content:
raise ValueError("'content' field must be a nonempty list")
content = map(Content.fromDict, content)
else:
content = [Content.fromDict(content)]
printings = map(Printing.fromDict, obj.get("printings", ()))
rulings = obj.get("rulings", [])
return cls(cardClass, content, printings, rulings)
@property
def color(self):
return sum([c.color for c in self.content], Color.COLORLESS)
@property
def colorID(self):
return sum([c.colorID for c in self.content], Color.COLORLESS)
@property
def cmc(self):
if self.cardClass == CardClass.flip:
return self.part1.cmc
else:
return sum(c.cmc for c in self.content)
def devotion(self, to_color):
return tuple(c.devotion(to_color) for c in self.content)
@property
def parts(self):
return len(self.content)
@property
def part1(self):
return self.content[0]
@property
def part2(self):
return self.content[1] if len(self.content) > 1 else None
def isMultipart(self):
return self.parts > 1
def isNormal(self):
return self.cardClass == CardClass.normal
def isSplit(self):
return self.cardClass == CardClass.split
def isFlip(self):
return self.cardClass == CardClass.flip
def isDouble(self):
return self.cardClass == CardClass.double_faced
def sets(self):
return tuple(set(p.set for p in self.printings))
def firstSet(self):
return min(self.sets())
def inSet(self, set_):
return [p for p in self.printings if p.set == set_]
name = scalarField("name")
text = scalarField("text")
power = scalarField("power")
toughness = scalarField("toughness")
loyalty = scalarField("loyalty")
hand = scalarField("hand")
life = scalarField("life")
color_indicator = scalarField("color_indicator")
type = scalarField("type")
PT = scalarField("PT")
HandLife = scalarField("HandLife")
baseText = scalarField("baseText")
@property
def supertypes(self):
return (t for c in self.content for t in c.supertypes)
@property
def types(self):
return (t for c in self.content for t in c.types)
@property
def subtypes(self):
return (t for c in self.content for t in c.subtypes)
@property
def cost(self):
if self.isNormal() or self.isSplit():
return sep.join(c.cost or '' for c in self.content)
else:
return self.part1.cost
def isSupertype(self, type_):
return type_ in self.supertypes
def isType(self, type_):
return type_ in self.types
def isSubtype(self, type_):
return type_ in self.subtypes
def hasType(self, type_):
return self.isType(type_) or self.isSubtype(type_) \
or self.isSupertype(type_)
def isNontraditional(self):
return self.isType('Vanguard') or self.isType('Plane') \
or self.isType('Phenomenon') or self.isType('Scheme')
tagwidth = 8 # may be modified externally
def showField1(self, field, width=None):
if not width:
width = 79
width = width - Card.tagwidth - 1
if not field:
return ''
elif field == 'sets':
def showPrnt(prnt):
try:
rare = prnt.rarity.shortname
except AttributeError:
rare = str(prnt.rarity)
return prnt.set.name + ' (' + rare + ')'
text = ', '.join(k for k,_ in groupby(map(showPrnt,
sorted(self.printings))))
lines = wrapLines(text, width, 2)
(first, rest) = (lines[0], lines[1:]) if lines else ('', [])
return ''.join(["%-*s %s\n" % (Card.tagwidth, 'Sets:', first)] \
+ [' ' * Card.tagwidth + ' ' + r + "\n" for r in rest])
elif field == 'cardClass':
return "%-*s %s\n" % (Card.tagwidth, 'Format:',
self.cardClass.name.title())
elif field in fields:
width = (width - (self.parts - 1) * len(sep)) // self.parts
def lineify(c):
val = getattr(c, field)
if val is None:
val = ''
elif isinstance(val, (list, tuple)):
val = ' '.join(map(str, val))
else:
val = str(val)
return ['%-*s' % (width, s) for s in wrapLines(val, width, 2)]
def joining(tag, *ls):
line = sep.join(s or ' ' * width for s in ls).rstrip()
return "%-*s %s\n" % (Card.tagwidth, tag, line)
lines = map(lineify, self.content)
return ''.join(starmap(joining, zip_longest([fields[field]], *lines, fillvalue='')))
else:
return ''
def toText1(self, width=None, showSets=False):
txt = self.showField1('name', width)
txt += self.showField1('type', width)
if self.cost:
txt += self.showField1('cost', width)
if self.color_indicator is not None:
txt += self.showField1('color_indicator', width)
if self.text:
txt += self.showField1('text', width)
if self.power is not None:
txt += self.showField1('PT', width)
if self.loyalty is not None:
txt += self.showField1('loyalty', width)
if self.hand is not None:
txt += self.showField1('HandLife', width)
if self.isMultipart():
txt += self.showField1('cardClass', width)
if showSets:
txt += self.showField1('sets', width)
return txt
def __repr__(self):
return cheap_repr(self)
def for_json(self):
return for_json(vars(self), trim=True)
|
|
"""
This demo demonstrates how to embed a matplotlib (mpl) plot
into a PyQt4 GUI application, including:
* Using the navigation toolbar
* Adding data to the plot
* Dynamically modifying the plot's properties
* Processing mpl events
* Saving the plot to a file from a menu
The main goal is to serve as a basis for developing rich PyQt GUI
applications featuring mpl plots (using the mpl OO API).
Eli Bendersky (eliben@gmail.com)
License: this code is in the public domain
Last modified: 19.01.2009
"""
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.textbox.setText('1 2 3 4')
self.on_draw()
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def on_about(self):
msg = """ A demo of using PyQt with matplotlib:
* Use the matplotlib navigation bar
* Add values to the text box and press Enter (or click "Draw")
* Show or hide the grid
* Drag the slider to modify the width of the bars
* Save the plot to a file using the File menu
* Click on a bar to receive an informative message
"""
QMessageBox.about(self, "About the demo", msg.strip())
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
QMessageBox.information(self, "Click!", msg)
def on_draw(self):
""" Redraws the figure
"""
str = unicode(self.textbox.text())
self.data = map(int, str.split())
x = range(len(self.data))
# clear the axes and redraw the plot anew
#
self.axes.clear()
self.axes.grid(self.grid_cb.isChecked())
self.axes.bar(
left=x,
height=self.data,
width=self.slider.value() / 100.0,
align='center',
alpha=0.44,
picker=5)
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Other GUI controls
#
self.textbox = QLineEdit()
self.textbox.setMinimumWidth(200)
self.connect(self.textbox, SIGNAL('editingFinished ()'), self.on_draw)
self.draw_button = QPushButton("&Draw")
self.connect(self.draw_button, SIGNAL('clicked()'), self.on_draw)
self.grid_cb = QCheckBox("Show &Grid")
self.grid_cb.setChecked(False)
self.connect(self.grid_cb, SIGNAL('stateChanged(int)'), self.on_draw)
slider_label = QLabel('Bar width (%):')
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(1, 100)
self.slider.setValue(20)
self.slider.setTracking(True)
self.slider.setTickPosition(QSlider.TicksBothSides)
self.connect(self.slider, SIGNAL('valueChanged(int)'), self.on_draw)
#
# Layout with box sizers
#
hbox = QHBoxLayout()
for w in [ self.textbox, self.draw_button, self.grid_cb,
slider_label, self.slider]:
hbox.addWidget(w)
hbox.setAlignment(w, Qt.AlignVCenter)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(hbox)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("This is a demo")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",
shortcut="Ctrl+S", slot=self.save_plot,
tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = AppForm()
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
|
"""
@package mi.instrument.noaa.lily.ooicore.test.test_driver
@file marine-integrations/mi/instrument/noaa/ooicore/test/test_driver.py
@author Pete Cable
@brief Test cases for ooicore driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
import time
import ntplib
import unittest
import mi.instrument.noaa.botpt.ooicore.particles as particles
from mi.core.instrument.port_agent_client import PortAgentPacket
from mock import Mock, call
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.exceptions import InstrumentDataException, SampleException, InstrumentProtocolException
from mi.instrument.noaa.botpt.ooicore.driver import Prompt, ScheduledJob
from mi.instrument.noaa.botpt.ooicore.driver import Parameter
from mi.instrument.noaa.botpt.ooicore.driver import ProtocolState
from mi.instrument.noaa.botpt.ooicore.driver import ProtocolEvent
from mi.instrument.noaa.botpt.ooicore.driver import InstrumentDriver
from mi.instrument.noaa.botpt.ooicore.driver import Protocol
from mi.instrument.noaa.botpt.ooicore.driver import ParameterConstraint
from mi.instrument.noaa.botpt.ooicore.driver import Capability
from mi.instrument.noaa.botpt.ooicore.driver import InstrumentCommand
from mi.instrument.noaa.botpt.ooicore.driver import NEWLINE
import mi.instrument.noaa.botpt.ooicore.test.test_samples as samples
from mi.core.exceptions import BadRequest, ResourceError
__author__ = 'Pete Cable'
__license__ = 'Apache 2.0'
log = get_logger()
botpt_startup_config = {
DriverConfigKey.PARAMETERS: {
Parameter.AUTO_RELEVEL: True,
Parameter.LEVELING_TIMEOUT: 600,
Parameter.XTILT_TRIGGER: 300.0,
Parameter.YTILT_TRIGGER: 300.0,
Parameter.HEAT_DURATION: 1,
Parameter.OUTPUT_RATE: 40,
}
}
# ##
# Driver parameters for the tests
# ##
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.noaa.botpt.ooicore.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='1D644T',
instrument_agent_name='noaa_botpt_ooicore',
instrument_agent_packet_config=particles.DataParticleType(),
driver_startup_config=botpt_startup_config
)
GO_ACTIVE_TIMEOUT = 180
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class BotptTestMixinSub(DriverTestMixin):
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_parameters = {
# Parameters defined in the IOS
# RW
Parameter.AUTO_RELEVEL: {TYPE: bool, READONLY: False, DA: False, STARTUP: True, VALUE: True},
Parameter.XTILT_TRIGGER: {TYPE: float, READONLY: False, DA: False, STARTUP: True, VALUE: 300},
Parameter.YTILT_TRIGGER: {TYPE: float, READONLY: False, DA: False, STARTUP: True, VALUE: 300},
Parameter.LEVELING_TIMEOUT: {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 600},
Parameter.OUTPUT_RATE: {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 40},
Parameter.HEAT_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 1},
# RO
Parameter.LILY_LEVELING: {TYPE: bool, READONLY: True, DA: False, STARTUP: False, VALUE: False},
Parameter.HEATER_ON: {TYPE: bool, READONLY: True, DA: False, STARTUP: False, VALUE: False},
Parameter.LEVELING_FAILED: {TYPE: bool, READONLY: True, DA: False, STARTUP: False, VALUE: False},
}
_samples = [samples.LILY_VALID_SAMPLE_01, samples.LILY_VALID_SAMPLE_02, samples.HEAT_VALID_SAMPLE_01,
samples.HEAT_VALID_SAMPLE_02, samples.IRIS_VALID_SAMPLE_01, samples.IRIS_VALID_SAMPLE_02,
samples.NANO_VALID_SAMPLE_01, samples.NANO_VALID_SAMPLE_02, samples.LEVELING_STATUS,
samples.SWITCHING_STATUS, samples.LEVELED_STATUS, samples.X_OUT_OF_RANGE, samples.Y_OUT_OF_RANGE]
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE]},
Capability.START_LEVELING: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.STOP_LEVELING: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.START_HEATER: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.STOP_HEATER: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
}
_capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_START_DIRECT',
'PROTOCOL_EVENT_START_LEVELING',
'PROTOCOL_EVENT_STOP_LEVELING',
'PROTOCOL_EVENT_LEVELING_TIMEOUT',
'PROTOCOL_EVENT_HEATER_TIMEOUT',
'PROTOCOL_EVENT_START_HEATER',
'PROTOCOL_EVENT_STOP_HEATER',
'PROTOCOL_EVENT_NANO_TIME_SYNC'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS',
'PROTOCOL_EVENT_START_LEVELING',
'PROTOCOL_EVENT_STOP_LEVELING',
'PROTOCOL_EVENT_LEVELING_TIMEOUT',
'PROTOCOL_EVENT_HEATER_TIMEOUT',
'PROTOCOL_EVENT_START_HEATER',
'PROTOCOL_EVENT_STOP_HEATER',
'PROTOCOL_EVENT_NANO_TIME_SYNC'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT',
'EXECUTE_DIRECT'],
}
lily_sample_parameters_01 = {
particles.LilySampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilySampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/06/24 23:36:02', REQUIRED: True},
particles.LilySampleParticleKey.X_TILT: {TYPE: float, VALUE: -235.500, REQUIRED: True},
particles.LilySampleParticleKey.Y_TILT: {TYPE: float, VALUE: 25.930, REQUIRED: True},
particles.LilySampleParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 194.30, REQUIRED: True},
particles.LilySampleParticleKey.TEMP: {TYPE: float, VALUE: 26.04, REQUIRED: True},
particles.LilySampleParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.96, REQUIRED: True},
particles.LilySampleParticleKey.SN: {TYPE: unicode, VALUE: 'N9655', REQUIRED: True},
}
lily_sample_parameters_02 = {
particles.LilySampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilySampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/06/24 23:36:04', REQUIRED: True},
particles.LilySampleParticleKey.X_TILT: {TYPE: float, VALUE: -235.349, REQUIRED: True},
particles.LilySampleParticleKey.Y_TILT: {TYPE: float, VALUE: 26.082, REQUIRED: True},
particles.LilySampleParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 194.26, REQUIRED: True},
particles.LilySampleParticleKey.TEMP: {TYPE: float, VALUE: 26.04, REQUIRED: True},
particles.LilySampleParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.96, REQUIRED: True},
particles.LilySampleParticleKey.SN: {TYPE: unicode, VALUE: 'N9655', REQUIRED: True},
}
nano_sample_parameters_01 = {
particles.NanoSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'NANO', REQUIRED: True},
particles.NanoSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/08/22 22:48:36.013', REQUIRED: True},
particles.NanoSampleParticleKey.PRESSURE: {TYPE: float, VALUE: 13.888533, REQUIRED: True},
particles.NanoSampleParticleKey.TEMP: {TYPE: float, VALUE: 26.147947328, REQUIRED: True},
particles.NanoSampleParticleKey.PPS_SYNC: {TYPE: unicode, VALUE: u'V', REQUIRED: True},
}
nano_sample_parameters_02 = {
particles.NanoSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'NANO', REQUIRED: True},
particles.NanoSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/08/22 23:13:36.000', REQUIRED: True},
particles.NanoSampleParticleKey.PRESSURE: {TYPE: float, VALUE: 13.884067, REQUIRED: True},
particles.NanoSampleParticleKey.TEMP: {TYPE: float, VALUE: 26.172926006, REQUIRED: True},
particles.NanoSampleParticleKey.PPS_SYNC: {TYPE: unicode, VALUE: u'P', REQUIRED: True},
}
iris_sample_parameters_01 = {
particles.IrisSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'IRIS', REQUIRED: True},
particles.IrisSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/05/29 00:25:34', REQUIRED: True},
particles.IrisSampleParticleKey.X_TILT: {TYPE: float, VALUE: -0.0882, REQUIRED: True},
particles.IrisSampleParticleKey.Y_TILT: {TYPE: float, VALUE: -0.7524, REQUIRED: True},
particles.IrisSampleParticleKey.TEMP: {TYPE: float, VALUE: 28.45, REQUIRED: True},
particles.IrisSampleParticleKey.SN: {TYPE: unicode, VALUE: 'N8642', REQUIRED: True}
}
iris_sample_parameters_02 = {
particles.IrisSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'IRIS', REQUIRED: True},
particles.IrisSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/05/29 00:25:36', REQUIRED: True},
particles.IrisSampleParticleKey.X_TILT: {TYPE: float, VALUE: -0.0885, REQUIRED: True},
particles.IrisSampleParticleKey.Y_TILT: {TYPE: float, VALUE: -0.7517, REQUIRED: True},
particles.IrisSampleParticleKey.TEMP: {TYPE: float, VALUE: 28.49, REQUIRED: True},
particles.IrisSampleParticleKey.SN: {TYPE: unicode, VALUE: 'N8642', REQUIRED: True}
}
heat_sample_parameters_01 = {
particles.HeatSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'HEAT', REQUIRED: True},
particles.HeatSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/04/19 22:54:11', REQUIRED: True},
particles.HeatSampleParticleKey.X_TILT: {TYPE: int, VALUE: -1, REQUIRED: True},
particles.HeatSampleParticleKey.Y_TILT: {TYPE: int, VALUE: 1, REQUIRED: True},
particles.HeatSampleParticleKey.TEMP: {TYPE: int, VALUE: 25, REQUIRED: True}
}
heat_sample_parameters_02 = {
particles.HeatSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'HEAT', REQUIRED: True},
particles.HeatSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/04/19 22:54:11', REQUIRED: True},
particles.HeatSampleParticleKey.X_TILT: {TYPE: int, VALUE: 1, REQUIRED: True},
particles.HeatSampleParticleKey.Y_TILT: {TYPE: int, VALUE: 1, REQUIRED: True},
particles.HeatSampleParticleKey.TEMP: {TYPE: int, VALUE: 25, REQUIRED: True}
}
botpt_status_parameters_01 = {
particles.BotptStatusParticleKey.LILY1: {TYPE: unicode, VALUE: samples.LILY_FILTERED_STATUS1, REQUIRED: True},
particles.BotptStatusParticleKey.LILY2: {TYPE: unicode, VALUE: samples.LILY_FILTERED_STATUS2, REQUIRED: True},
particles.BotptStatusParticleKey.IRIS1: {TYPE: unicode, VALUE: samples.IRIS_FILTERED_STATUS1, REQUIRED: True},
particles.BotptStatusParticleKey.IRIS2: {TYPE: unicode, VALUE: samples.IRIS_FILTERED_STATUS2, REQUIRED: True},
particles.BotptStatusParticleKey.NANO: {TYPE: unicode, VALUE: samples.NANO_FILTERED_STATUS, REQUIRED: True},
particles.BotptStatusParticleKey.SYST: {TYPE: unicode, VALUE: samples.SYST_FILTERED_STATUS, REQUIRED: True},
}
lily_leveling_parameters_01 = {
particles.LilyLevelingParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilyLevelingParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/07/24 20:36:27', REQUIRED: True},
particles.LilyLevelingParticleKey.X_TILT: {TYPE: float, VALUE: 14.667, REQUIRED: True},
particles.LilyLevelingParticleKey.Y_TILT: {TYPE: float, VALUE: 81.642, REQUIRED: True},
particles.LilyLevelingParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 185.21, REQUIRED: True},
particles.LilyLevelingParticleKey.TEMP: {TYPE: float, VALUE: 33.67, REQUIRED: True},
particles.LilyLevelingParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.59, REQUIRED: True},
particles.LilyLevelingParticleKey.SN: {TYPE: unicode, VALUE: u'N9651', REQUIRED: True},
particles.LilyLevelingParticleKey.STATUS: {TYPE: unicode, VALUE: u'None', REQUIRED: True}
}
lily_leveling_parameters_02 = {
particles.LilyLevelingParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilyLevelingParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/06/28 17:29:21', REQUIRED: True},
particles.LilyLevelingParticleKey.X_TILT: {TYPE: float, VALUE: -2.277, REQUIRED: True},
particles.LilyLevelingParticleKey.Y_TILT: {TYPE: float, VALUE: -2.165, REQUIRED: True},
particles.LilyLevelingParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 190.81, REQUIRED: True},
particles.LilyLevelingParticleKey.TEMP: {TYPE: float, VALUE: 25.69, REQUIRED: True},
particles.LilyLevelingParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.87, REQUIRED: True},
particles.LilyLevelingParticleKey.SN: {TYPE: unicode, VALUE: u'N9651', REQUIRED: True},
particles.LilyLevelingParticleKey.STATUS: {TYPE: unicode, VALUE: u'Leveled', REQUIRED: True}
}
lily_leveling_parameters_03 = {
particles.LilyLevelingParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilyLevelingParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/06/28 18:04:41', REQUIRED: True},
particles.LilyLevelingParticleKey.X_TILT: {TYPE: float, VALUE: -7.390, REQUIRED: True},
particles.LilyLevelingParticleKey.Y_TILT: {TYPE: float, VALUE: -14.063, REQUIRED: True},
particles.LilyLevelingParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 190.91, REQUIRED: True},
particles.LilyLevelingParticleKey.TEMP: {TYPE: float, VALUE: 25.83, REQUIRED: True},
particles.LilyLevelingParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.87, REQUIRED: True},
particles.LilyLevelingParticleKey.SN: {TYPE: unicode, VALUE: u'N9651', REQUIRED: True},
particles.LilyLevelingParticleKey.STATUS: {TYPE: unicode, VALUE: u'Switching to Y', REQUIRED: True}
}
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
def assert_particle(self, data_particle, particle_type, particle_keys, sample_data, verify_values=False):
"""
Verify sample particle
@param data_particle: data particle
@param particle_type: particle type
@param particle_keys: particle data keys
@param sample_data: sample values to verify against
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(particle_keys, sample_data)
self.assert_data_particle_header(data_particle, particle_type, require_instrument_timestamp=True)
self.assert_data_particle_parameters(data_particle, sample_data, verify_values)
def assert_particle_lily_sample_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_SAMPLE,
particles.LilySampleParticleKey, self.lily_sample_parameters_01, verify_values)
def assert_particle_lily_sample_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_SAMPLE,
particles.LilySampleParticleKey, self.lily_sample_parameters_02, verify_values)
def assert_particle_nano_sample_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.NANO_SAMPLE,
particles.NanoSampleParticleKey, self.nano_sample_parameters_01, verify_values)
def assert_particle_nano_sample_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.NANO_SAMPLE,
particles.NanoSampleParticleKey, self.nano_sample_parameters_02, verify_values)
def assert_particle_iris_sample_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.IRIS_SAMPLE,
particles.IrisSampleParticleKey, self.iris_sample_parameters_01, verify_values)
def assert_particle_iris_sample_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.IRIS_SAMPLE,
particles.IrisSampleParticleKey, self.iris_sample_parameters_02, verify_values)
def assert_particle_heat_sample_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.HEAT_SAMPLE,
particles.HeatSampleParticleKey, self.heat_sample_parameters_01, verify_values)
def assert_particle_heat_sample_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.HEAT_SAMPLE,
particles.HeatSampleParticleKey, self.heat_sample_parameters_02, verify_values)
def assert_particle_botpt_status(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.BOTPT_STATUS,
particles.BotptStatusParticleKey, self.botpt_status_parameters_01, verify_values)
def assert_particle_lily_leveling_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_LEVELING,
particles.LilyLevelingParticleKey, self.lily_leveling_parameters_01, verify_values)
def assert_particle_lily_leveling_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_LEVELING,
particles.LilyLevelingParticleKey, self.lily_leveling_parameters_02, verify_values)
def assert_particle_lily_leveling_03(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_LEVELING,
particles.LilyLevelingParticleKey, self.lily_leveling_parameters_03, verify_values)
def _create_port_agent_packet(self, data_item):
ts = ntplib.system_to_ntp_time(time.time())
port_agent_packet = PortAgentPacket()
port_agent_packet.attach_data(data_item)
port_agent_packet.attach_timestamp(ts)
port_agent_packet.pack_header()
return port_agent_packet
def _send_port_agent_packet(self, driver, data_item):
driver._protocol.got_data(self._create_port_agent_packet(data_item))
def send_side_effect(self, driver):
def inner(data):
response = self._responses.get(data)
if response is not None:
log.debug("my_send: data: %s, my_response: %s", data, response)
self._send_port_agent_packet(driver, response + samples.NEWLINE)
else:
log.debug('No response found for %r', data)
return inner
_responses = {
'NANO,*0100IF\n': samples.NANO_STATUS, # need this for _update_params
'LILY,*9900XYC2\n': 'LILY,2013/06/28 18:04:41,*9900XYC2', # lily on
'IRIS,*9900XYC2\n': 'IRIS,2013/06/28 18:04:41,*9900XYC2', # iris on
'LILY,*9900XY-LEVEL,0\n': 'LILY,2013/06/28 18:04:41,*9900XY-LEVEL,0', # level off
'LILY,*9900XYC-OFF\n': 'LILY,2013/06/28 18:04:41,*9900XYC-OFF', # lily off
'IRIS,*9900XYC-OFF\n': 'IRIS,2013/06/28 18:04:41,*9900XYC-OFF', # iris off
'SYST,1\n': samples.SYST_STATUS,
'LILY,*9900XY-DUMP-SETTINGS\n': samples.LILY_STATUS1,
'LILY,*9900XY-DUMP2\n': samples.LILY_STATUS2,
'IRIS,*9900XY-DUMP-SETTINGS\n': samples.IRIS_STATUS1,
'IRIS,*9900XY-DUMP2\n': samples.IRIS_STATUS2,
'LILY,*9900XY-LEVEL,1\n': 'LILY,2013/06/28 18:04:41,*9900XY-LEVEL,1',
'HEAT,1\n': 'HEAT,2013/06/28 18:04:41,*1',
'HEAT,0\n': 'HEAT,2013/06/28 18:04:41,*0',
'NANO,*0100E4\n': samples.NANO_VALID_SAMPLE_01,
'NANO,TS': samples.NANO_VALID_SAMPLE_01,
}
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
# noinspection PyProtectedMember,PyUnusedLocal,PyUnresolvedReferences
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, BotptTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_connect(self, initial_protocol_state=ProtocolState.COMMAND):
"""
Verify we can initialize the driver. Set up mock events for other tests.
@param initial_protocol_state: target protocol state for driver
@return: driver instance
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, initial_protocol_state)
driver._protocol.set_init_params(botpt_startup_config)
driver._connection.send.side_effect = self.send_side_effect(driver)
driver._protocol._protocol_fsm.on_event_actual = driver._protocol._protocol_fsm.on_event
driver._protocol._protocol_fsm.on_event = Mock()
driver._protocol._protocol_fsm.on_event.side_effect = driver._protocol._protocol_fsm.on_event_actual
driver._protocol._init_params()
return driver
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
driver = self.test_connect()
self.assert_particle_published(driver, samples.LILY_VALID_SAMPLE_01, self.assert_particle_lily_sample_01, True)
self.assert_particle_published(driver, samples.LILY_VALID_SAMPLE_02, self.assert_particle_lily_sample_02, True)
self.assert_particle_published(driver, samples.NANO_VALID_SAMPLE_01, self.assert_particle_nano_sample_01, True)
self.assert_particle_published(driver, samples.NANO_VALID_SAMPLE_02, self.assert_particle_nano_sample_02, True)
self.assert_particle_published(driver, samples.IRIS_VALID_SAMPLE_01, self.assert_particle_iris_sample_01, True)
self.assert_particle_published(driver, samples.IRIS_VALID_SAMPLE_02, self.assert_particle_iris_sample_02, True)
self.assert_particle_published(driver, samples.HEAT_VALID_SAMPLE_01, self.assert_particle_heat_sample_01, True)
self.assert_particle_published(driver, samples.HEAT_VALID_SAMPLE_02, self.assert_particle_heat_sample_02, True)
# disable leveling-related methods to avoid handling these messages (will raise exception)
driver._protocol._check_completed_leveling = Mock()
driver._protocol._check_for_autolevel = Mock()
self.assert_particle_published(driver, samples.LEVELING_STATUS, self.assert_particle_lily_leveling_01, True)
self.assert_particle_published(driver, samples.LEVELED_STATUS, self.assert_particle_lily_leveling_02, True)
self.assert_particle_published(driver, samples.SWITCHING_STATUS, self.assert_particle_lily_leveling_03, True)
self.assert_particle_published(driver, samples.X_OUT_OF_RANGE, self.assert_particle_lily_leveling_02, False)
self.assert_particle_published(driver, samples.Y_OUT_OF_RANGE, self.assert_particle_lily_leveling_02, False)
def test_corrupt_data(self):
"""
Verify corrupt data generates a SampleException
"""
driver = self.test_connect()
for sample, p_type in [
(samples.LILY_VALID_SAMPLE_01, particles.LilySampleParticle),
(samples.IRIS_VALID_SAMPLE_01, particles.IrisSampleParticle),
(samples.NANO_VALID_SAMPLE_01, particles.NanoSampleParticle),
(samples.HEAT_VALID_SAMPLE_01, particles.HeatSampleParticle),
(samples.LEVELING_STATUS, particles.LilyLevelingParticle),
(samples.LILY_STATUS1, particles.LilyStatusParticle1),
(samples.LILY_STATUS2, particles.LilyStatusParticle2),
(samples.IRIS_STATUS1, particles.IrisStatusParticle1),
(samples.IRIS_STATUS2, particles.IrisStatusParticle2),
(samples.NANO_STATUS, particles.NanoStatusParticle),
(samples.SYST_STATUS, particles.SystStatusParticle),
]:
sample = sample[:8] + 'GARBAGE123123124' + sample[8:]
with self.assertRaises(SampleException):
p_type(sample).generate()
def test_status_particle(self):
"""
This particle is not generated via the chunker (because it may contain embedded samples)
so we will test it by manually generating the particle.
"""
ts = ntplib.system_to_ntp_time(time.time())
status = NEWLINE.join([samples.SYST_STATUS, samples.LILY_STATUS1, samples.LILY_STATUS2,
samples.IRIS_STATUS1, samples.IRIS_STATUS2, samples.NANO_STATUS])
self.assert_particle_botpt_status(particles.BotptStatusParticle(status, port_timestamp=ts), verify_values=True)
def test_combined_samples(self):
"""
Verify combined samples produce the correct number of chunks
"""
chunker = StringChunker(Protocol.sieve_function)
ts = self.get_ntp_timestamp()
my_samples = [(samples.BOTPT_FIREHOSE_01, 6),
(samples.BOTPT_FIREHOSE_02, 7)]
for data, num_samples in my_samples:
chunker.add_chunk(data, ts)
results = []
while True:
timestamp, result = chunker.get_next_data()
if result:
results.append(result)
self.assertTrue(result in data)
self.assertEqual(timestamp, ts)
else:
break
self.assertEqual(len(results), num_samples)
def test_chunker(self):
"""
Test the chunker against all input samples
"""
chunker = StringChunker(Protocol.sieve_function)
ts = self.get_ntp_timestamp()
for sample in self._samples:
chunker.add_chunk(sample, ts)
(timestamp, result) = chunker.get_next_data()
self.assertEqual(result, sample)
self.assertEqual(timestamp, ts)
(timestamp, result) = chunker.get_next_data()
self.assertEqual(result, None)
def test_start_stop_autosample(self):
"""
Test starting/stopping autosample, verify state transitions
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START_AUTOSAMPLE)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.AUTOSAMPLE)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.STOP_AUTOSAMPLE)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
def test_status_handler(self):
"""
Test the acquire status handler
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.ACQUIRE_STATUS)
@unittest.skip('times out when run with other tests')
def test_leveling_timeout(self):
"""
Test that leveling times out, is stopped, and the appropriate flags are set.
"""
driver = self.test_connect()
expected = [call(ProtocolEvent.GET, Parameter.ALL), # startup get ALL
call(ProtocolEvent.START_LEVELING), # start leveling
call(ProtocolEvent.GET, Parameter.ALL), # config change get ALL
call(ProtocolEvent.LEVELING_TIMEOUT), # leveling timed out
call(ProtocolEvent.GET, Parameter.ALL)] # config change get ALL
try:
# set the leveling timeout to 1 to speed up timeout
driver._protocol._param_dict.set_value(Parameter.LEVELING_TIMEOUT, 1)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START_LEVELING)
self.assertEqual(driver._protocol._param_dict.get(Parameter.LILY_LEVELING), True)
# sleep for longer than the length of timeout
time.sleep(driver._protocol._param_dict.get(Parameter.LEVELING_TIMEOUT) + 1)
except InstrumentProtocolException:
# assert that we raised the expected events
self.assertEqual(driver._protocol._protocol_fsm.on_event.call_args_list, expected)
self.assertEqual(driver._protocol._param_dict.get(Parameter.LILY_LEVELING), False)
self.assertEqual(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL), False)
self.assertEqual(driver._protocol._param_dict.get(Parameter.LEVELING_FAILED), True)
def test_leveling_complete(self):
"""
Test the driver processes a leveling complete particle
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START_LEVELING)
# feed in a leveling complete status message
self._send_port_agent_packet(driver, samples.LEVELED_STATUS)
# Assert we have returned to the command state
self.assertEquals(driver._protocol.get_current_state(), ProtocolState.COMMAND)
expected = [call(ProtocolEvent.GET, Parameter.ALL), # startup get ALL
call(ProtocolEvent.START_LEVELING), # start leveling
call(ProtocolEvent.GET, Parameter.ALL), # config change get ALL
call(ProtocolEvent.STOP_LEVELING), # leveling timed out
call(ProtocolEvent.GET, Parameter.ALL)] # config change get ALL
time.sleep(.5)
# assert that we raised the expected events
self.assertEqual(driver._protocol._protocol_fsm.on_event.call_args_list, expected)
def test_leveling_failure(self):
"""
Test the driver processes a leveling failure particle, sets the correct flags.
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START_LEVELING)
# assert we have entered a leveling state
self.assertTrue(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL))
# feed in a leveling failed status message
try:
self._send_port_agent_packet(driver, samples.X_OUT_OF_RANGE + samples.NEWLINE)
time.sleep(1)
except InstrumentDataException:
self.assertFalse(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL))
try:
self._send_port_agent_packet(driver, samples.Y_OUT_OF_RANGE + samples.NEWLINE)
time.sleep(1)
except InstrumentDataException:
self.assertFalse(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL))
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
expected = [call(ProtocolEvent.GET, Parameter.ALL), # startup get ALL
call(ProtocolEvent.START_LEVELING), # start leveling
call(ProtocolEvent.GET, Parameter.ALL), # config change get ALL
call(ProtocolEvent.GET, Parameter.ALL), # config change get ALL
call(ProtocolEvent.STOP_LEVELING), # leveling timed out
call(ProtocolEvent.GET, Parameter.ALL)] # config change get ALL
# assert that we raised the expected events
self.assertEqual(driver._protocol._protocol_fsm.on_event.call_args_list, expected)
# assert the correct flags are set
self.assertEqual(driver._protocol._param_dict.get(Parameter.LILY_LEVELING), False)
self.assertEqual(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL), False)
self.assertEqual(driver._protocol._param_dict.get(Parameter.LEVELING_FAILED), True)
def test_pps_time_sync(self):
"""
Test that the time sync event is raised when PPS is regained.
"""
driver = self.test_connect()
self._send_port_agent_packet(driver, samples.NANO_VALID_SAMPLE_01) # PPS lost
self._send_port_agent_packet(driver, samples.NANO_VALID_SAMPLE_02) # PPS regained
expected = [call('DRIVER_EVENT_GET', 'DRIVER_PARAMETER_ALL'), # startup get ALL
call('PROTOCOL_EVENT_NANO_TIME_SYNC')] # Time sync event when PPS regained
# assert that we raised the expected events
self.assertEqual(driver._protocol._protocol_fsm.on_event.call_args_list, expected)
def test_heat_on(self):
"""
Test turning the heater on/off
"""
driver = self.test_connect()
driver._protocol._handler_start_heater()
self.assertEqual(driver._protocol._param_dict.get(Parameter.HEATER_ON), True)
driver._protocol._handler_stop_heater()
self.assertEqual(driver._protocol._param_dict.get(Parameter.HEATER_ON), False)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(particles.DataParticleType)
self.assert_enum_has_no_duplicates(ProtocolState)
self.assert_enum_has_no_duplicates(ProtocolEvent)
self.assert_enum_has_no_duplicates(Parameter)
# self.assert_enum_has_no_duplicates(InstrumentCommand())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability)
self.assert_enum_complete(Capability, ProtocolEvent)
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self._capabilities)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, samples.NEWLINE, mock_callback)
driver_capabilities = Capability.list()
test_capabilities = Capability.list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, BotptTestMixinSub):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def assert_acquire_status(self):
"""
Verify all status particles generated
"""
self.clear_events()
self.assert_async_particle_generation(particles.DataParticleType.BOTPT_STATUS,
self.assert_particle_botpt_status, timeout=20)
def assert_time_sync(self):
"""
Verify all status particles generated
"""
self.clear_events()
self.assert_async_particle_generation(particles.DataParticleType.NANO_SAMPLE,
self.assert_particle_nano_sample_01, timeout=20)
def test_connect(self):
self.assert_initialize_driver()
def test_get(self):
self.assert_initialize_driver()
for param in self._driver_parameters:
self.assert_get(param, self._driver_parameters[param][self.VALUE])
def test_set(self):
"""
Test all set commands. Verify all exception cases.
"""
self.assert_initialize_driver()
constraints = ParameterConstraint.dict()
parameters = Parameter.dict()
startup_config = self.test_config.driver_startup_config['parameters']
for key in constraints:
_type, minimum, maximum = constraints[key]
key = parameters[key]
if _type in [int, float]:
# assert we can set in range
self.assert_set(key, maximum - 1)
# assert exception when out of range
self.assert_set_exception(key, maximum + 1)
elif _type == bool:
# assert we can toggle a boolean parameter
if startup_config[key]:
self.assert_set(key, False)
else:
self.assert_set(key, True)
# assert bad types throw an exception
self.assert_set_exception(key, 'BOGUS')
def test_set_bogus_parameter(self):
"""
Verify setting a bad parameter raises an exception
"""
self.assert_initialize_driver()
self.assert_set_exception('BOGUS', 'CHEESE')
def test_startup_parameters(self):
new_values = {
Parameter.AUTO_RELEVEL: True,
Parameter.LEVELING_TIMEOUT: 601,
Parameter.XTILT_TRIGGER: 301,
Parameter.YTILT_TRIGGER: 301,
Parameter.HEAT_DURATION: 2,
Parameter.OUTPUT_RATE: 1,
}
self.assert_initialize_driver()
self.assert_startup_parameters(self.assert_driver_parameters, new_values,
self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS])
def test_incomplete_config(self):
"""
Break our startup config, then verify the driver raises an exception
"""
# grab the old config
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
old_value = startup_params[Parameter.LEVELING_TIMEOUT]
failed = False
try:
# delete a required parameter
del (startup_params[Parameter.LEVELING_TIMEOUT])
# re-init to take our broken config
self.init_driver_process_client()
self.assert_initialize_driver()
failed = True
except ResourceError as e:
log.info('Exception thrown, test should pass: %r', e)
finally:
startup_params[Parameter.LEVELING_TIMEOUT] = old_value
if failed:
self.fail('Failed to throw exception on missing parameter')
def test_auto_relevel(self):
"""
Test for verifying auto relevel
"""
self.assert_initialize_driver()
# set the leveling timeout low, so we're not here for long
self.assert_set(Parameter.LEVELING_TIMEOUT, 60, no_get=True)
# Set the XTILT to a low threshold so that the driver will
# automatically start the re-leveling operation
# NOTE: This test MAY fail if the instrument completes
# leveling before the triggers have been reset to 300
self.assert_set(Parameter.XTILT_TRIGGER, 0, no_get=True)
self.assert_driver_command(Capability.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE)
self.assert_async_particle_generation(particles.DataParticleType.LILY_LEVELING,
self.assert_particle_lily_leveling_01)
# verify the flag is set
self.assert_get(Parameter.LILY_LEVELING, True)
self.assert_driver_command(Capability.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND)
def test_autosample(self):
"""
Test for turning data on
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE)
rate = int(self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS][Parameter.OUTPUT_RATE])
# autosample for 10 seconds, then count the samples...
# we can't test "inline" because the nano data rate is too high.
time.sleep(10)
self.assert_driver_command(Capability.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
for particle_type, assert_func, count in [
(particles.DataParticleType.LILY_SAMPLE, self.assert_particle_lily_sample_01, 5),
(particles.DataParticleType.HEAT_SAMPLE, self.assert_particle_heat_sample_01, 5),
(particles.DataParticleType.IRIS_SAMPLE, self.assert_particle_iris_sample_01, 5),
(particles.DataParticleType.NANO_SAMPLE, self.assert_particle_nano_sample_01, 5 * rate)
]:
self.assert_async_particle_generation(particle_type, assert_func, particle_count=count, timeout=1)
def test_commanded_acquire_status(self):
"""
Test for acquiring status
"""
self.assert_initialize_driver()
# Issue acquire status command
self.assert_particle_generation(Capability.ACQUIRE_STATUS, particles.DataParticleType.BOTPT_STATUS,
self.assert_particle_botpt_status)
def test_leveling_complete(self):
"""
Test for leveling complete
"""
self.assert_initialize_driver()
# go to autosample
self.assert_driver_command(Capability.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=5)
#Issue start leveling command
self.assert_driver_command(Capability.START_LEVELING)
# Verify the flag is set
self.assert_get(Parameter.LILY_LEVELING, True)
# Leveling should complete or abort after DEFAULT_LEVELING_TIMEOUT seconds
timeout = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS][Parameter.LEVELING_TIMEOUT]
# wait for a sample particle to indicate leveling is complete
self.clear_events()
self.assert_async_particle_generation(particles.DataParticleType.LILY_SAMPLE,
self.assert_particle_lily_sample_01,
timeout=timeout+10)
# Verify the flag is unset
self.assert_get(Parameter.LILY_LEVELING, False)
self.assert_driver_command(Capability.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_acquire_status(self):
"""
Verify we can schedule an acquire status event
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status, delay=20)
def test_scheduled_time_sync(self):
"""
Verify we can schedule a time sync event.
If we sync time in command mode, we will generate at least one NANO sample particle.
"""
self.assert_scheduled_event(ScheduledJob.NANO_TIME_SYNC, self.assert_time_sync, delay=20)
def test_heat_on(self):
"""
Test turning the heater on and off.
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_HEATER)
self.assert_get(Parameter.HEATER_ON, True)
self.assert_driver_command(Capability.STOP_HEATER)
self.assert_get(Parameter.HEATER_ON, False)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(InstrumentDriverQualificationTestCase, BotptTestMixinSub):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def assert_cycle(self):
"""
Assert we can enter autosample, acquire all particles, acquire status,
stop autosample, acquire heat particle, acquire_status.
"""
self.assert_start_autosample()
# verify all particles in autosample
self.assert_particle_async(particles.DataParticleType.LILY_SAMPLE, self.assert_particle_lily_sample_01)
self.assert_particle_async(particles.DataParticleType.IRIS_SAMPLE, self.assert_particle_iris_sample_01)
self.assert_particle_async(particles.DataParticleType.NANO_SAMPLE, self.assert_particle_nano_sample_01)
self.assert_particle_async(particles.DataParticleType.HEAT_SAMPLE, self.assert_particle_heat_sample_01)
self.assert_particle_polled(Capability.ACQUIRE_STATUS, self.assert_particle_botpt_status,
particles.DataParticleType.BOTPT_STATUS, timeout=60)
self.assert_particle_async(particles.DataParticleType.LILY_SAMPLE, self.assert_particle_lily_sample_01)
self.assert_particle_async(particles.DataParticleType.IRIS_SAMPLE, self.assert_particle_iris_sample_01)
self.assert_particle_async(particles.DataParticleType.NANO_SAMPLE, self.assert_particle_nano_sample_01)
self.assert_particle_async(particles.DataParticleType.HEAT_SAMPLE, self.assert_particle_heat_sample_01)
self.assert_stop_autosample()
# verify all particles in command
self.assert_particle_async(particles.DataParticleType.HEAT_SAMPLE, self.assert_particle_heat_sample_01)
self.assert_particle_polled(Capability.ACQUIRE_STATUS, self.assert_particle_botpt_status,
particles.DataParticleType.BOTPT_STATUS, timeout=60)
def test_cycle(self):
"""
Verify we can run through the test cycle 4 times
"""
self.assert_enter_command_mode()
for x in xrange(4):
log.debug('test_cycle -- PASS %d', x + 1)
self.assert_cycle()
def test_direct_access_telnet_mode(self):
"""
This test manually tests that the Instrument Driver properly supports
direct access to the physical instrument. (telnet mode)
"""
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
self.tcp_client.send_data(InstrumentCommand.LILY_DUMP1 + samples.NEWLINE)
result = self.tcp_client.expect('-DUMP-SETTINGS')
self.assertTrue(result, msg='Failed to receive expected response in direct access mode.')
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 10)
def test_leveling(self):
"""
Verify we can stop/start leveling
"""
self.assert_enter_command_mode()
self.assert_resource_command(Capability.START_LEVELING)
self.assert_get_parameter(Parameter.LILY_LEVELING, True)
self.assert_particle_async(particles.DataParticleType.LILY_LEVELING, self.assert_particle_lily_leveling_01)
self.assert_resource_command(Capability.STOP_LEVELING)
self.assert_get_parameter(Parameter.LILY_LEVELING, False)
def test_get_set_parameters(self):
"""
verify that all parameters can be get set properly, this includes
ensuring that read only parameters fail on set.
"""
self.assert_enter_command_mode()
constraints = ParameterConstraint.dict()
parameters = Parameter.dict()
reverse_param = Parameter.reverse_dict()
startup_config = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
for key in self._driver_parameters:
if self._driver_parameters[key][self.READONLY]:
self.assert_read_only_parameter(key)
else:
name = reverse_param.get(key)
if name in constraints:
_type, minimum, maximum = constraints[name]
if _type in [int, float]:
# assert we can set in range
self.assert_set_parameter(key, maximum - 1)
# assert exception when out of range
with self.assertRaises(BadRequest):
self.assert_set_parameter(key, maximum + 1)
elif _type == bool:
# assert we can toggle a boolean parameter
if startup_config[key]:
self.assert_set_parameter(key, False)
else:
self.assert_set_parameter(key, True)
# assert bad types throw an exception
with self.assertRaises(BadRequest):
self.assert_set_parameter(key, 'BOGUS')
startup_config = self.test_config.driver_startup_config['parameters']
def test_get_capabilities(self):
"""
Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.START_LEVELING,
ProtocolEvent.STOP_LEVELING,
ProtocolEvent.START_HEATER,
ProtocolEvent.STOP_HEATER,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.GET,
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.START_LEVELING,
ProtocolEvent.STOP_LEVELING,
ProtocolEvent.START_HEATER,
ProtocolEvent.STOP_HEATER,
]
self.assert_start_autosample()
self.assert_capabilities(capabilities)
self.assert_stop_autosample()
##################
# DA Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands()
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
def test_direct_access_exit_from_autosample(self):
"""
Overridden. This driver always discovers to command
"""
def test_discover(self):
"""
Overridden. The driver always discovers to command
"""
# Verify the agent is in command mode
self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver which holds the current
# instrument state.
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
|
|
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import copy
import os
import os_vif
from os_vif import exception as osv_exception
from os_vif.objects import fields as osv_fields
from oslo_concurrency import processutils
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import linux_net
from nova.network import model as network_model
from nova.network import os_vif_util
from nova import objects
from nova import profiler
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt import osinfo
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
# vhostuser queues support
MIN_LIBVIRT_VHOSTUSER_MQ = (1, 2, 17)
# vlan tag for macvtap passthrough mode on SRIOV VFs
MIN_LIBVIRT_MACVTAP_PASSTHROUGH_VLAN = (1, 3, 5)
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_LAN9118,
network_model.VIF_MODEL_SPAPR_VLAN],
'kvm': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'xen': [network_model.VIF_MODEL_NETFRONT,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
'lxc': [],
'uml': [],
'parallels': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
@profiler.trace_cls("vif_driver")
class LibvirtGenericVIFDriver(object):
"""Generic VIF driver for libvirt networking."""
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_base_config(self, instance, mac, image_meta,
inst_type, virt_type, vnic_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
vhost_queues = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
model = osinfo.HardwareProperties(image_meta).network_model
# Else if the virt type is KVM/QEMU/VZ(Parallels), then use virtio
# according to the global config parameter
if (model is None and
virt_type in ('kvm', 'qemu', 'parallels') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
virt_type == "qemu"):
driver = "qemu"
if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=virt_type)
if (virt_type in ('kvm', 'parallels') and
model == network_model.VIF_MODEL_VIRTIO and
vnic_type not in network_model.VNIC_TYPES_SRIOV):
vhost_drv, vhost_queues = self._get_virtio_mq_settings(image_meta,
inst_type)
driver = vhost_drv or driver
designer.set_vif_guest_frontend_config(
conf, mac, model, driver, vhost_queues)
return conf
def get_base_hostdev_pci_config(self, vif):
conf = vconfig.LibvirtConfigGuestHostdevPCI()
pci_slot = vif['profile']['pci_slot']
designer.set_vif_host_backend_hostdev_pci_config(conf, pci_slot)
return conf
def _is_multiqueue_enabled(self, image_meta, flavor):
_, vhost_queues = self._get_virtio_mq_settings(image_meta, flavor)
return vhost_queues > 1 if vhost_queues is not None else False
def _get_virtio_mq_settings(self, image_meta, flavor):
"""A methods to set the number of virtio queues,
if it has been requested in extra specs.
"""
driver = None
vhost_queues = None
if not isinstance(image_meta, objects.ImageMeta):
image_meta = objects.ImageMeta.from_dict(image_meta)
img_props = image_meta.properties
if img_props.get('hw_vif_multiqueue_enabled'):
driver = 'vhost'
max_tap_queues = self._get_max_tap_queues()
if max_tap_queues:
vhost_queues = (max_tap_queues if flavor.vcpus > max_tap_queues
else flavor.vcpus)
else:
vhost_queues = flavor.vcpus
return (driver, vhost_queues)
def _get_max_tap_queues(self):
# NOTE(kengo.sakai): In kernels prior to 3.0,
# multiple queues on a tap interface is not supported.
# In kernels 3.x, the number of queues on a tap interface
# is limited to 8. From 4.0, the number is 256.
# See: https://bugs.launchpad.net/nova/+bug/1570631
kernel_version = int(os.uname()[2].split(".")[0])
if kernel_version <= 2:
return 1
elif kernel_version == 3:
return 8
elif kernel_version == 4:
return 256
else:
return None
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
@staticmethod
def is_no_op_firewall():
return CONF.firewall_driver == "nova.virt.firewall.NoopFirewallDriver"
def get_firewall_required(self, vif):
if vif.is_neutron_filtering_enabled():
return False
if self.is_no_op_firewall():
return False
return True
def get_firewall_required_os_vif(self, vif):
if vif.has_traffic_filtering:
return False
if self.is_no_op_firewall():
return False
return True
def get_config_bridge(self, instance, vif, image_meta,
inst_type, virt_type, host):
"""Get VIF configurations for bridge type."""
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance.name + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type, host):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type,
virt_type,
host)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance,
vif['address'],
image_meta,
inst_type,
virt_type,
vif['vnic_type'])
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
return conf
def get_config_ivs(self, instance, vif, image_meta,
inst_type, virt_type, host):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type,
host)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type,
virt_type,
host)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_802qbh_config(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_PROFILEID])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hw_veb(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_hw_veb(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_VLAN])
# NOTE(vladikr): Not setting vlan tags for macvtap on SR-IOV VFs
# as vlan tag is not supported in Libvirt until version 1.3.5
if (vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP and not
host.has_min_version(MIN_LIBVIRT_MACVTAP_PASSTHROUGH_VLAN)):
conf.vlan = None
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hostdev_physical(self, instance, vif, image_meta,
inst_type, virt_type, host):
return self.get_base_hostdev_pci_config(vif)
def get_config_macvtap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
vif_details = vif['details']
macvtap_src = vif_details.get(network_model.VIF_DETAILS_MACVTAP_SOURCE)
macvtap_mode = vif_details.get(network_model.VIF_DETAILS_MACVTAP_MODE)
phys_interface = vif_details.get(
network_model.VIF_DETAILS_PHYS_INTERFACE)
missing_params = []
if macvtap_src is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_SOURCE)
if macvtap_mode is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_MODE)
if phys_interface is None:
missing_params.append(network_model.VIF_DETAILS_PHYS_INTERFACE)
if len(missing_params) > 0:
raise exception.VifDetailsMissingMacvtapParameters(
vif_id=vif['id'],
missing_params=missing_params)
designer.set_vif_host_backend_direct_config(
conf, macvtap_src, macvtap_mode)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
return conf
def get_config_tap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
return conf
def _get_vhostuser_settings(self, vif):
vif_details = vif['details']
mode = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_MODE,
'server')
sock_path = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_SOCKET)
if sock_path is None:
raise exception.VifDetailsMissingVhostuserSockPath(
vif_id=vif['id'])
return mode, sock_path
def get_config_vhostuser(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
mode, sock_path = self._get_vhostuser_settings(vif)
designer.set_vif_host_backend_vhostuser_config(conf, mode, sock_path)
# (vladikr) Not setting up driver and queues for vhostuser
# as queues are not supported in Libvirt until version 1.2.17
if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ):
LOG.debug('Queues are not a vhostuser supported feature.')
conf.driver_name = None
conf.vhost_queues = None
return conf
def get_config_ib_hostdev(self, instance, vif, image_meta,
inst_type, virt_type, host):
return self.get_base_hostdev_pci_config(vif)
def get_config_vrouter(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'])
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def _set_config_VIFBridge(self, instance, vif, conf, host=None):
conf.net_type = "bridge"
conf.source_dev = vif.bridge_name
conf.target_dev = vif.vif_name
if self.get_firewall_required_os_vif(vif):
mac_id = vif.address.replace(':', '')
name = "nova-instance-" + instance.name + "-" + mac_id
conf.filtername = name
def _set_config_VIFOpenVSwitch(self, instance, vif, conf, host=None):
conf.net_type = "bridge"
conf.source_dev = vif.bridge_name
conf.target_dev = vif.vif_name
self._set_config_VIFPortProfile(instance, vif, conf)
def _set_config_VIFVHostUser(self, instance, vif, conf, host=None):
designer.set_vif_host_backend_vhostuser_config(
conf, vif.mode, vif.path)
if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ):
LOG.debug('Queues are not a vhostuser supported feature.')
conf.driver_name = None
conf.vhost_queues = None
def _set_config_VIFHostDevice(self, instance, vif, conf, host=None):
if vif.dev_type == osv_fields.VIFHostDeviceDevType.ETHERNET:
# This sets the required fields for an <interface type='hostdev'>
# section in a libvirt domain (by using a subset of hw_veb's
# options).
designer.set_vif_host_backend_hw_veb(
conf, 'hostdev', vif.dev_address, None)
else:
# TODO(jangutter): dev_type == VIFHostDeviceDevType.GENERIC
# is currently unsupported under os-vif. The corresponding conf
# class would be: LibvirtConfigGuestHostdevPCI
# but os-vif only returns a LibvirtConfigGuestInterface object
raise exception.InternalError(
_("Unsupported os-vif VIFHostDevice dev_type %(type)s") %
{'type': vif.dev_type})
def _set_config_VIFPortProfileOpenVSwitch(self, profile, conf):
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid",
profile.interface_id)
def _set_config_VIFPortProfile(self, instance, vif, conf):
# Set any port profile that may be required
profilefunc = "_set_config_" + vif.port_profile.obj_name()
func = getattr(self, profilefunc, None)
if not func:
raise exception.InternalError(
_("Unsupported VIF port profile type %(obj)s func %(func)s") %
{'obj': vif.port_profile.obj_name(), 'func': profilefunc})
func(vif.port_profile, conf)
def _get_config_os_vif(self, instance, vif, image_meta, inst_type,
virt_type, host, vnic_type):
"""Get the domain config for a VIF
:param instance: nova.objects.Instance
:param vif: os_vif.objects.vif.VIFBase subclass
:param image_meta: nova.objects.ImageMeta
:param inst_type: nova.objects.Flavor
:param virt_type: virtualization type
:param host: nova.virt.libvirt.host.Host
:param vnic_type: vnic type
:returns: nova.virt.libvirt.config.LibvirtConfigGuestInterface
"""
# Do the config that's common to all vif types
conf = self.get_base_config(instance, vif.address, image_meta,
inst_type, virt_type, vnic_type)
# Do the VIF type specific config
viffunc = "_set_config_" + vif.obj_name()
func = getattr(self, viffunc, None)
if not func:
raise exception.InternalError(
_("Unsupported VIF type %(obj)s func %(func)s") %
{'obj': vif.obj_name(), 'func': viffunc})
func(instance, vif, conf, host)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta,
inst_type, virt_type, host):
vif_type = vif['type']
vnic_type = vif['vnic_type']
# instance.display_name could be unicode
instance_repr = utils.get_obj_repr_unicode(instance)
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s virt_type=%(virt_type)s',
{'vif_type': vif_type, 'instance': instance_repr,
'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.InternalError(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
return self._get_config_os_vif(instance, vif_obj, image_meta,
inst_type, virt_type, host,
vnic_type)
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'get_config_%s' % vif_slug, None)
if not func:
raise exception.InternalError(
_("Unexpected vif_type=%s") % vif_type)
return func(instance, vif, image_meta,
inst_type, virt_type, host)
def _plug_bridge_with_port(self, instance, vif, port):
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
disv6 = '/proc/sys/net/ipv6/conf/%s/disable_ipv6' % br_name
if os.path.exists(disv6):
utils.execute('tee',
disv6,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
mtu = vif['network'].get_meta('mtu')
linux_net._create_veth_pair(v1_name, v2_name, mtu)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
if port == 'ovs':
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id,
vif['address'], instance.uuid,
mtu)
elif port == 'ivs':
linux_net.create_ivs_vif_port(v2_name, iface_id,
vif['address'], instance.uuid)
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ovs')
def plug_ivs_ethernet(self, instance, vif):
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance.uuid)
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ivs')
def plug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
pci_slot = vif['profile']['pci_slot']
device_id = instance['uuid']
vnic_mac = vif['address']
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id,
fabric, network_model.VIF_TYPE_IB_HOSTDEV,
pci_slot, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging ib hostdev vif"),
instance=instance)
def plug_802qbg(self, instance, vif):
pass
def plug_802qbh(self, instance, vif):
pass
def plug_hw_veb(self, instance, vif):
# TODO(vladikr): This code can be removed once the minimum version of
# Libvirt is incleased above 1.3.5, as vlan will be set by libvirt
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
linux_net.set_vf_interface_vlan(
vif['profile']['pci_slot'],
mac_addr=vif['address'],
vlan=vif['details'][network_model.VIF_DETAILS_VLAN])
def plug_hostdev_physical(self, instance, vif):
pass
def plug_macvtap(self, instance, vif):
vif_details = vif['details']
vlan = vif_details.get(network_model.VIF_DETAILS_VLAN)
if vlan:
vlan_name = vif_details.get(
network_model.VIF_DETAILS_MACVTAP_SOURCE)
phys_if = vif_details.get(network_model.VIF_DETAILS_PHYS_INTERFACE)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan(
vlan, phys_if, interface=vlan_name)
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance.project_id
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm', iface_id, vif['address'],
'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
mac = vif['details'].get(network_model.VIF_DETAILS_TAP_MAC_ADDRESS)
linux_net.create_tap_dev(dev, mac)
network = vif.get('network')
mtu = network.get_meta('mtu') if network else None
linux_net._set_device_mtu(dev, mtu)
def plug_vhostuser(self, instance, vif):
pass
def plug_vrouter(self, instance, vif):
"""Plug into Contrail's network port
Bind the vif to a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
ip_addr = '0.0.0.0'
ip6_addr = None
subnets = vif['network']['subnets']
for subnet in subnets:
if not subnet['ips']:
continue
ips = subnet['ips'][0]
if not ips['address']:
continue
if (ips['version'] == 4):
if ips['address'] is not None:
ip_addr = ips['address']
if (ips['version'] == 6):
if ips['address'] is not None:
ip6_addr = ips['address']
ptype = 'NovaVMPort'
if (CONF.libvirt.virt_type == 'lxc'):
ptype = 'NameSpacePort'
cmd_args = ("--oper=add --uuid=%s --instance_uuid=%s --vn_uuid=%s "
"--vm_project_uuid=%s --ip_address=%s --ipv6_address=%s"
" --vm_name=%s --mac=%s --tap_name=%s --port_type=%s "
"--tx_vlan_id=%d --rx_vlan_id=%d" % (vif['id'],
instance.uuid, vif['network']['id'],
instance.project_id, ip_addr, ip6_addr,
instance.display_name, vif['address'],
vif['devname'], ptype, -1, -1))
try:
multiqueue = self._is_multiqueue_enabled(instance.image_meta,
instance.flavor)
linux_net.create_tap_dev(dev, multiqueue=multiqueue)
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def _plug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
try:
os_vif.plug(vif, instance_info)
except osv_exception.ExceptionBase as ex:
msg = (_("Failure running os_vif plugin plug method: %(ex)s")
% {'ex': ex})
raise exception.InternalError(msg)
def plug(self, instance, vif):
vif_type = vif['type']
# instance.display_name could be unicode
instance_repr = utils.get_obj_repr_unicode(instance)
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance_repr,
'vif': vif})
if vif_type is None:
raise exception.VirtualInterfacePlugException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
self._plug_os_vif(instance, vif_obj)
return
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'plug_%s' % vif_slug, None)
if not func:
raise exception.VirtualInterfacePlugException(
_("Plug vif failed because of unexpected "
"vif_type=%s") % vif_type)
func(instance, vif)
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
vnic_mac = vif['address']
try:
utils.execute('ebrctl', 'del-port', fabric, vnic_mac,
run_as_root=True)
except Exception:
LOG.exception(_("Failed while unplugging ib hostdev vif"))
def unplug_802qbg(self, instance, vif):
pass
def unplug_802qbh(self, instance, vif):
pass
def unplug_hw_veb(self, instance, vif):
# TODO(vladikr): This code can be removed once the minimum version of
# Libvirt is incleased above 1.3.5, as vlan will be set by libvirt
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net.set_vf_interface_vlan(vif['profile']['pci_slot'],
mac_addr=vif['address'])
def unplug_hostdev_physical(self, instance, vif):
pass
def unplug_macvtap(self, instance, vif):
pass
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
try:
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_vhostuser(self, instance, vif):
pass
def unplug_vrouter(self, instance, vif):
"""Unplug Contrail's network port
Unbind the vif from a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
cmd_args = ("--oper=delete --uuid=%s" % (vif['id']))
try:
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def _unplug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
try:
os_vif.unplug(vif, instance_info)
except osv_exception.ExceptionBase as ex:
msg = (_("Failure running os_vif plugin unplug method: %(ex)s")
% {'ex': ex})
raise exception.InternalError(msg)
def unplug(self, instance, vif):
vif_type = vif['type']
# instance.display_name could be unicode
instance_repr = utils.get_obj_repr_unicode(instance)
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance_repr,
'vif': vif})
if vif_type is None:
msg = _("vif_type parameter must be present for this vif_driver "
"implementation")
raise exception.InternalError(msg)
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
self._unplug_os_vif(instance, vif_obj)
return
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'unplug_%s' % vif_slug, None)
if not func:
msg = _("Unexpected vif_type=%s") % vif_type
raise exception.InternalError(msg)
func(instance, vif)
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import asyncio
import logging
import re
import ssl
import sys
import time
from datetime import timedelta
# External imports
import mock
import tornado
from _util_server import (
http_get,
url,
websocket_open,
ws_url,
)
from flaky import flaky
from tornado.httpclient import HTTPError
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
# Bokeh imports
from bokeh._testing.plugins.managed_server_loop import MSL
from bokeh.application import Application
from bokeh.application.handlers import Handler
from bokeh.client import pull_session
from bokeh.core.properties import List, String
from bokeh.core.types import ID
from bokeh.model import Model
from bokeh.server.server import BaseServer, Server
from bokeh.server.tornado import BokehTornado
from bokeh.util.token import (
check_token_signature,
generate_jwt_token,
get_session_id,
get_token_payload,
)
# Module under test
import bokeh.server.server as server # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
logging.basicConfig(level=logging.DEBUG)
async def async_value(value):
await asyncio.sleep(0) # this ensures we actually return to the loop
return value
class HookListModel(Model):
hooks = List(String)
class HookTestHandler(Handler):
def __init__(self) -> None:
super().__init__()
self.load_count = 0
self.unload_count = 0
self.session_creation_async_value = 0
self.hooks = []
self.periodic_remover = None
def modify_document(self, doc):
# checks that session created hook has run, and session destroyed has not.
assert self.session_creation_async_value == 3
doc.title = "Modified"
doc.roots[0].hooks.append("modify")
self.hooks.append("modify")
doc.add_next_tick_callback(self.on_next_tick)
doc.add_timeout_callback(self.on_timeout, 2)
periodic_cb = doc.add_periodic_callback(self.on_periodic, 3)
self.periodic_remover = lambda: doc.remove_periodic_callback(periodic_cb)
def on_server_loaded(self, server_context):
assert len(server_context.sessions) == 0
self.load_count += 1
self.hooks.append("server_loaded")
def on_server_unloaded(self, server_context):
self.unload_count += 1
self.hooks.append("server_unloaded")
# important to test that this can be async
async def on_session_created(self, session_context):
async def setup_document(doc):
# session creation hook is allowed to init the document before modify_document
from bokeh.document import DEFAULT_TITLE
hook_list = HookListModel()
assert doc.title == DEFAULT_TITLE
assert len(doc.roots) == 0
hook_list.hooks.append("session_created")
doc.add_root(hook_list)
self.session_creation_async_value = await async_value(1)
self.session_creation_async_value = await async_value(2)
self.session_creation_async_value = await async_value(3)
await session_context.with_locked_document(setup_document)
self.hooks.append("session_created")
# this has to be async too
async def on_session_destroyed(self, session_context):
# this should be no-op'd, because the session is already destroyed
async def shutdown_document(doc):
doc.roots[0].hooks.append("session_destroyed")
self.session_creation_async_value = await async_value(4)
self.session_creation_async_value = await async_value(5)
self.session_creation_async_value = await async_value(6)
await session_context.with_locked_document(shutdown_document)
self.hooks.append("session_destroyed")
def on_next_tick(self):
self.hooks.append("next_tick")
def on_timeout(self):
self.hooks.append("timeout")
def on_periodic(self):
self.hooks.append("periodic")
self.periodic_remover()
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_prefix(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
assert server.prefix == ""
with ManagedServerLoop(application, prefix="foo") as server:
assert server.prefix == "/foo"
def test_index(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
assert server.index is None
with ManagedServerLoop(application, index="foo") as server:
assert server.index == "foo"
async def test_get_sessions(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
server_sessions = server.get_sessions('/')
assert len(server_sessions) == 0
await http_get(server.io_loop, url(server))
server_sessions = server.get_sessions('/')
assert len(server_sessions) == 1
await http_get(server.io_loop, url(server))
server_sessions = server.get_sessions('/')
assert len(server_sessions) == 2
server_sessions = server.get_sessions()
assert len(server_sessions) == 2
with pytest.raises(ValueError):
server.get_sessions("/foo")
with ManagedServerLoop({"/foo": application, "/bar": application}) as server:
await http_get(server.io_loop, url(server) + "foo")
server_sessions = server.get_sessions('/foo')
assert len(server_sessions) == 1
server_sessions = server.get_sessions('/bar')
assert len(server_sessions) == 0
server_sessions = server.get_sessions()
assert len(server_sessions) == 1
await http_get(server.io_loop, url(server) + "foo")
server_sessions = server.get_sessions('/foo')
assert len(server_sessions) == 2
server_sessions = server.get_sessions('/bar')
assert len(server_sessions) == 0
server_sessions = server.get_sessions()
assert len(server_sessions) == 2
await http_get(server.io_loop, url(server) + "bar")
server_sessions = server.get_sessions('/foo')
assert len(server_sessions) == 2
server_sessions = server.get_sessions('/bar')
assert len(server_sessions) == 1
server_sessions = server.get_sessions()
assert len(server_sessions) == 3
token_in_json = re.compile("""["']token["'] *: *["']([^"]+)["']""")
def extract_token_from_json(html):
if not isinstance(html, str):
import codecs
html = codecs.decode(html, 'utf-8')
match = token_in_json.search(html)
return match.group(1)
use_for_title_in_json = re.compile("""["']use_for_title["'] *: *(false|true)""")
def extract_use_for_title_from_json(html):
if not isinstance(html, str):
import codecs
html = codecs.decode(html, 'utf-8')
match = use_for_title_in_json.search(html)
return match.group(1)
def autoload_url(server):
return url(server) + \
"autoload.js?bokeh-autoload-element=foo"
def resource_files_requested(response, requested=True):
if not isinstance(response, str):
import codecs
response = codecs.decode(response, 'utf-8')
for file in [
'static/js/bokeh.min.js', 'static/js/bokeh-widgets.min.js']:
if requested:
assert file in response
else:
assert file not in response
def test_use_xheaders(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, use_xheaders=True) as server:
assert server._http.xheaders == True
def test_ssl_args_plumbing(ManagedServerLoop: MSL) -> None:
with mock.patch.object(ssl, 'SSLContext'):
with ManagedServerLoop({}, ssl_certfile="foo") as server:
assert server._http.ssl_options.load_cert_chain.call_args[0] == ()
assert server._http.ssl_options.load_cert_chain.call_args[1] == dict(certfile='foo', keyfile=None, password=None)
with mock.patch.object(ssl, 'SSLContext'):
with ManagedServerLoop({}, ssl_certfile="foo", ssl_keyfile="baz") as server:
assert server._http.ssl_options.load_cert_chain.call_args[0] == ()
assert server._http.ssl_options.load_cert_chain.call_args[1] == dict(certfile='foo', keyfile="baz", password=None)
with mock.patch.object(ssl, 'SSLContext'):
with ManagedServerLoop({}, ssl_certfile="foo", ssl_keyfile="baz", ssl_password="bar") as server:
assert server._http.ssl_options.load_cert_chain.call_args[0] == ()
assert server._http.ssl_options.load_cert_chain.call_args[1] == dict(certfile='foo', keyfile="baz", password="bar")
def test_base_server() -> None:
app = BokehTornado(Application())
httpserver = HTTPServer(app)
httpserver.start()
loop = IOLoop()
loop.make_current()
server = BaseServer(loop, app, httpserver)
server.start()
assert server.io_loop == loop
assert server._tornado.io_loop == loop
httpserver.stop()
server.stop()
server.io_loop.close()
async def test_server_applications_callable_arg(ManagedServerLoop: MSL) -> None:
def modify_doc(doc):
doc.title = "Hello, world!"
with ManagedServerLoop(modify_doc, port=0) as server:
await http_get(server.io_loop, url(server))
session = server.get_sessions('/')[0]
assert session.document.title == "Hello, world!"
with ManagedServerLoop({"/foo": modify_doc}, port=0) as server:
await http_get(server.io_loop, url(server) + "foo")
session = server.get_sessions('/foo')[0]
assert session.document.title == "Hello, world!"
async def test__include_headers(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, include_headers=['Custom']) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server), headers={'Custom': 'Test'})
html = response.body
token = extract_token_from_json(html)
payload = get_token_payload(token)
assert 'headers' in payload
assert payload['headers'] == {'Custom': 'Test'}
async def test__exclude_headers(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, exclude_headers=['Connection', 'Host']) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server))
html = response.body
token = extract_token_from_json(html)
payload = get_token_payload(token)
assert 'headers' in payload
assert payload["headers"].get("Accept-Encoding") == "gzip"
async def test__include_cookies(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, include_cookies=['custom']) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server), headers={'Cookie': 'custom = test ; custom2 = test2'})
html = response.body
token = extract_token_from_json(html)
payload = get_token_payload(token)
assert 'cookies' in payload
assert payload['cookies'] == {'custom': 'test'}
async def test__exclude_cookies(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, exclude_cookies=['custom']) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server), headers={'Cookie': 'custom = test ; custom2 = test2'})
html = response.body
token = extract_token_from_json(html)
payload = get_token_payload(token)
assert 'cookies' in payload
assert payload['cookies'] == {'custom2': 'test2'}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
@pytest.mark.skipif(sys.platform == "win32",
reason="Lifecycle hooks order different on Windows (TODO open issue)")
@flaky(max_runs=10)
def test__lifecycle_hooks(ManagedServerLoop: MSL) -> None:
application = Application()
handler = HookTestHandler()
application.add(handler)
with ManagedServerLoop(application, check_unused_sessions_milliseconds=30) as server:
client_session = pull_session(session_id=ID("test__lifecycle_hooks"),
url=url(server),
io_loop=server.io_loop)
client_doc = client_session.document
assert len(client_doc.roots) == 1
server_session = server.get_session('/', client_session.id)
server_doc = server_session.document
assert len(server_doc.roots) == 1
# save for later, since doc.roots will be emptied after the session is closed
client_hook_list = list(client_doc.roots[0].hooks)
server_hook_list = list(server_doc.roots[0].hooks)
client_session.close()
# expire the session quickly rather than after the usual timeout
server_session.request_expiration()
server.io_loop.call_later(0.1, lambda: server.io_loop.stop())
server.io_loop.start()
assert handler.hooks == [
"server_loaded",
"session_created",
"modify",
"next_tick",
"timeout",
"periodic",
"session_destroyed",
"server_unloaded",
]
assert handler.load_count == 1
assert handler.unload_count == 1
# 3 instead of 6, because locked callbacks on destroyed sessions become no-ops
assert handler.session_creation_async_value == 3
assert client_doc.title == "Modified"
assert server_doc.title == "Modified"
# only the handler sees "session_destroyed" since the session is shut down at that point.
assert client_hook_list == ["session_created", "modify"]
assert server_hook_list == ["session_created", "modify"]
async def test__request_in_session_context(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
response = await http_get(server.io_loop, url(server) + "?foo=10")
html = response.body
token = extract_token_from_json(html)
sessionid = get_session_id(token)
server_session = server.get_session('/', sessionid)
server_doc = server_session.document
session_context = server_doc.session_context
# do we have a request
assert session_context.request is not None
async def test__request_in_session_context_has_arguments(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
response = await http_get(server.io_loop, url(server) + "?foo=10")
html = response.body
token = extract_token_from_json(html)
sessionid = get_session_id(token)
server_session = server.get_session('/', sessionid)
server_doc = server_session.document
session_context = server_doc.session_context
# test if we can get the argument from the request
assert session_context.request.arguments['foo'] == [b'10']
async def test__no_request_arguments_in_session_context(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
response = await http_get(server.io_loop, url(server))
html = response.body
token = extract_token_from_json(html)
sessionid = get_session_id(token)
server_session = server.get_session('/', sessionid)
server_doc = server_session.document
session_context = server_doc.session_context
# if we do not pass any arguments to the url, the request arguments
# should be empty
assert len(session_context.request.arguments) == 0
@pytest.mark.parametrize("querystring,requested", [
("", True),
("&resources=default", True),
("&resources=whatever", True),
("&resources=none", False),
])
async def test__resource_files_requested(querystring, requested, ManagedServerLoop: MSL) -> None:
"""
Checks if the loading of resource files is requested by the autoload.js
response based on the value of the "resources" parameter.
"""
application = Application()
with ManagedServerLoop(application) as server:
response = await http_get(server.io_loop, autoload_url(server) + querystring)
resource_files_requested(response.body, requested=requested)
async def test__autocreate_session_autoload(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, autoload_url(server))
js = response.body
token = extract_token_from_json(js)
sessionid = get_session_id(token)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
async def test__no_set_title_autoload(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, autoload_url(server))
js = response.body
use_for_title = extract_use_for_title_from_json(js)
assert use_for_title == "false"
async def test__autocreate_session_doc(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server))
html = response.body
token = extract_token_from_json(html)
sessionid = get_session_id(token)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
async def test__no_autocreate_session_websocket(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
token = generate_jwt_token("")
await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token])
sessions = server.get_sessions('/')
assert 0 == len(sessions)
async def test__use_provided_session_autoload(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
response = await http_get(server.io_loop, autoload_url(server) + "&bokeh-session-id=" + expected)
js = response.body
token = extract_token_from_json(js)
sessionid = get_session_id(token)
assert expected == sessionid
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
async def test__use_provided_session_header_autoload(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
response = await http_get(server.io_loop, autoload_url(server), headers={'Bokeh-Session-Id': expected})
js = response.body
token = extract_token_from_json(js)
sessionid = get_session_id(token)
assert expected == sessionid
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
async def test__use_provided_session_autoload_token(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
expected_token = generate_jwt_token(expected)
response = await http_get(server.io_loop, autoload_url(server) + "&bokeh-token=" + expected_token)
js = response.body
token = extract_token_from_json(js)
assert expected_token == token
sessionid = get_session_id(token)
assert expected == sessionid
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
async def test__use_provided_session_doc(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
response = await http_get(server.io_loop, url(server) + "?bokeh-session-id=" + expected)
html = response.body
token = extract_token_from_json(html)
sessionid = get_session_id(token)
assert expected == sessionid
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
async def test__use_provided_session_websocket(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
token = generate_jwt_token(expected)
await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token])
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
async def test__autocreate_signed_session_autoload(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='foo') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, autoload_url(server))
js = response.body
token = extract_token_from_json(js)
sessionid = get_session_id(token)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
assert check_token_signature(token, signed=True, secret_key='foo')
async def test__autocreate_signed_session_doc(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='foo') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server))
html = response.body
token = extract_token_from_json(html)
sessionid = get_session_id(token)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
assert check_token_signature(token, signed=True, secret_key='foo')
@flaky(max_runs=10)
async def test__accept_session_websocket(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, session_token_expiration=10) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server))
html = response.body
token = extract_token_from_json(html)
ws = await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token])
msg = await ws.read_queue.get()
assert isinstance(msg, str)
assert 'ACK' in msg
async def test__reject_expired_session_websocket(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, session_token_expiration=1) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server))
html = response.body
token = extract_token_from_json(html)
time.sleep(1.1)
ws = await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token])
assert await ws.read_queue.get() is None
async def test__reject_wrong_subprotocol_websocket(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = await http_get(server.io_loop, url(server))
html = response.body
token = extract_token_from_json(html)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
ws = await websocket_open(server.io_loop, ws_url(server), subprotocols=["foo", token])
assert await ws.read_queue.get() is None
async def test__reject_no_token_websocket(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
await http_get(server.io_loop, url(server))
sessions = server.get_sessions('/')
assert 1 == len(sessions)
ws = await websocket_open(server.io_loop, ws_url(server), subprotocols=["foo"])
assert await ws.read_queue.get() is None
async def test__reject_unsigned_session_autoload(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
with (pytest.raises(HTTPError)) as info:
await http_get(server.io_loop, autoload_url(server) + "&bokeh-session-id=" + expected)
assert 'Invalid token or session ID' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
async def test__reject_unsigned_token_autoload(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
token = generate_jwt_token(expected)
with (pytest.raises(HTTPError)) as info:
await http_get(server.io_loop, autoload_url(server) + "&bokeh-token=" + token)
assert 'Invalid token or session ID' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
async def test__reject_unsigned_session_doc(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
with (pytest.raises(HTTPError)) as info:
await http_get(server.io_loop, url(server) + "?bokeh-session-id=" + expected)
assert 'Invalid token or session ID' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
async def test__reject_unsigned_session_header_doc(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
with (pytest.raises(HTTPError)) as info:
await http_get(server.io_loop, url(server), headers={"Bokeh-Session-Id": expected})
assert 'Invalid token or session ID' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
async def test__reject_unsigned_session_websocket(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
token = generate_jwt_token(expected)
await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token])
sessions = server.get_sessions('/')
assert 0 == len(sessions)
async def test__no_generate_session_autoload(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, generate_session_ids=False) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
with (pytest.raises(HTTPError)) as info:
await http_get(server.io_loop, autoload_url(server))
assert 'No bokeh-session-id provided' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
async def test__no_generate_session_doc(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, generate_session_ids=False) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
with (pytest.raises(HTTPError)) as info:
await http_get(server.io_loop, url(server))
assert 'No bokeh-session-id provided' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
@pytest.mark.skipif(sys.platform == "win32",
reason="multiple processes not supported on Windows")
def test__server_multiple_processes() -> None:
# Can't use an ioloop in this test
with mock.patch('tornado.httpserver.HTTPServer.add_sockets'):
with mock.patch('tornado.process.fork_processes') as tornado_fp:
application = Application()
server.Server(application, num_procs=3, port=0)
assert tornado_fp.mock_calls == [
mock.call(3, None)
if tornado.version_info >= (6,)
else mock.call(3)
]
def test__existing_ioloop_with_multiple_processes_exception(ManagedServerLoop, event_loop) -> None:
application = Application()
loop = IOLoop.current()
with pytest.raises(RuntimeError):
with ManagedServerLoop(application, io_loop=loop, num_procs=3):
pass
async def test__actual_port_number(ManagedServerLoop: MSL) -> None:
application = Application()
with ManagedServerLoop(application, port=0) as server:
port = server.port
assert port > 0
await http_get(server.io_loop, url(server))
def test__ioloop_not_forcibly_stopped() -> None:
# Issue #5494
application = Application()
loop = IOLoop()
loop.make_current()
server = Server(application, io_loop=loop)
server.start()
result = []
def f():
server.unlisten()
server.stop()
# If server.stop() were to stop the Tornado IO loop,
# g() wouldn't be called and `result` would remain empty.
loop.add_timeout(timedelta(seconds=0.01), g)
def g():
result.append(None)
loop.stop()
loop.add_callback(f)
loop.start()
assert result == [None]
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division
import numpy as np
from PIL import Image
from nnabla.logger import logger
from .common import _imread_before, _imread_after, _imsave_before, _imresize_before, _imresize_after
from .image_utils_backend import ImageUtilsBackend
class PilBackend(ImageUtilsBackend):
_interpolations_map = {
"nearest": Image.NEAREST,
"bilinear": Image.BILINEAR,
"bicubic": Image.BICUBIC,
}
def __init__(self):
ImageUtilsBackend.__init__(self)
if hasattr(Image, "HAMMING"): # version >3.4.0
self._interpolations_map["hamming"] = Image.HAMMING
if hasattr(Image, "BOX"): # version >3.4.0
self._interpolations_map["box"] = Image.BOX
if hasattr(Image, "LANCZOS"): # version >1.1.3
self._interpolations_map["lanczos"] = Image.LANCZOS
@staticmethod
def convert_pil(pil_image, grayscale, num_channels, return_palette_indices):
if pil_image.mode == "I":
raise ValueError(
"Input img type seems int32. Currently we don`t support int32 image in pillow backend.")
# Note:
# This code block below is copied from
# https://github.com/scipy/scipy/blob/maintenance/1.2.x/scipy/misc/pilutil.py.
if pil_image.mode == 'P' and not return_palette_indices:
# Mode 'P' means there is an indexed "palette". If we leave the mode
# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
# containing the indices into the palette, and not a 3-D array
# containing the RGB or RGBA values.
if 'transparency' in pil_image.info:
pil_image = pil_image.convert('RGBA')
else:
pil_image = pil_image.convert('RGB')
if grayscale:
ret = np.asarray(pil_image.convert("L"))
if num_channels > 0:
ret = np.broadcast_to(
ret[..., np.newaxis], ret.shape + (num_channels,))
return ret
elif num_channels == 3:
return pil_image.convert("RGB")
elif num_channels == 4:
return pil_image.convert("RGBA")
return pil_image
@staticmethod
def pil_image_to_ndarray(pil_image, grayscale, num_channels, return_palette_indices):
ret = PilBackend.convert_pil(pil_image, grayscale, num_channels,
return_palette_indices)
return np.asarray(ret).astype(np.uint8)
@staticmethod
def pil_resize_from_ndarray(arr, size, resample):
mode = "F" if arr.dtype == np.float32 else None
pil_image = Image.fromarray(arr, mode=mode)
resized_image = pil_image.resize(size, resample=resample)
return np.asarray(resized_image)
def accept(self, path, ext, operator):
if operator in ['resize', 'save']:
return "OK"
else:
if ext in ['.bmp', '.dib', '.eps', '.gif', '.icns', '.ico', '.jpeg', '.jpg', '.msp', '.png', '.ppm', '.pbm', '.pgm', '.pnm', '.tif', '.tiff']:
return "OK"
else:
return "NG"
def imread(self, path, grayscale=False, size=None, interpolate="bilinear",
channel_first=False, as_uint16=False, num_channels=-1, return_palette_indices=False):
"""
Read image by PIL module.
Notice that PIL only supports uint8 for RGB (not uint16).
So this imread function returns only uint8 array for both RGB and gray-scale.
(Currently ignore "I" mode for gray-scale (32bit integer).)
Args:
path (str or 'file object'): File path or object to read.
grayscale (bool):
size (tupple of int):
(width, height).
If None, output img shape depends on the files to read.
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel).
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"].
as_uint16 (bool):
If you specify this argument, you can use only False for pil backend.
num_channels (int):
channel size of output array.
Default is -1 which preserves raw image shape.
return_palette_indices (bool):
Whether to return a raw palette indices without any conversion or not.
If this flag is True and read Image has the mode "P",
then this function returns 2-D array containing the indices into palette.
We recommend that this flag should be False unless you intend to use the raw palette indices.
Returns:
numpy.ndarray
"""
if as_uint16:
logger.warning("pillow only supports uint8 for RGB image."
" If you want to load image as uint16,"
" install pypng or cv2 and"
" nnabla.utils.image_utils automatically change backend to use these module.")
return self.next_available(path).imread(path, grayscale=grayscale, size=size, interpolate=interpolate,
channel_first=channel_first, as_uint16=as_uint16, num_channels=num_channels)
_imread_before(grayscale, num_channels)
pil_img = Image.open(path, mode="r")
try:
img = self.pil_image_to_ndarray(
pil_img, grayscale, num_channels, return_palette_indices)
except:
return self.next_available(path).imread(path, grayscale=grayscale, size=size, interpolate=interpolate,
channel_first=channel_first, as_uint16=as_uint16, num_channels=num_channels)
return _imread_after(img, size, interpolate, channel_first, self.imresize)
def imsave(self, path, img, channel_first=False, as_uint16=False, auto_scale=True):
"""
Save image by pillow module.
Currently, pillow supports only uint8 to save.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is considered as (height, width, channel)
as_uint16 (bool):
In this backend, this argument is always False because pillow dose not support uint16.
If True, exception will be raised.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pillow backend, only float ([0, 1]) to uint8 ([0, 255]) is supported.
"""
img = _imsave_before(img, channel_first, auto_scale)
if img.dtype == np.uint16 or as_uint16:
logger.warning("Pillow only supports uint8 image to save. Cast img to uint8."
"If you want to save image as uint16, install pypng or cv2 "
"and nnabla.utils.image_utils automatically change backend to use these module.")
return self.next_available(path).imsave(path, img, channel_first=channel_first, as_uint16=as_uint16, auto_scale=auto_scale)
if auto_scale and img.dtype != np.uint8:
img = (img * 255).astype(np.uint8)
if len(img.shape) == 3 and img.shape[-1] == 1:
img = np.squeeze(img, axis=-1)
Image.fromarray(img).save(path)
def imresize(self, img, size, interpolate="bilinear", channel_first=False):
"""
Resize image by pil module.
Args:
img (numpy.ndarray): Image array to save.
Image shape is considered as (height, width, channel) for RGB or (height, width) for gray-scale by default.
size (tupple of int): (width, height).
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value isyou can get the array whose shape is False, which means the img shape is (height, width, channels)
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]
Returns:
numpy.ndarray whose shape is ('size'[1], 'size'[0], channel) or (size[1], size[0])
"""
img = _imresize_before(img, size, channel_first,
interpolate, list(self._interpolations_map.keys()))
expand_flag = False
if len(img.shape) == 3 and img.shape[-1] == 1:
# (h, w, 1) can not be handled by pil.Image, temporally reshape to (h, w)
img = img.reshape(img.shape[0], img.shape[1])
expand_flag = True
resample = self._interpolations_map[interpolate]
if img.dtype == np.uint8:
resized = self.pil_resize_from_ndarray(img, size, resample)
else:
dtype = img.dtype
img_float32 = np.asarray(img, np.float32)
if len(img.shape) == 3:
resized = np.stack([self.pil_resize_from_ndarray(img_float32[..., i], size, resample)
for i in range(img.shape[-1])], axis=2)
else:
resized = self.pil_resize_from_ndarray(
img_float32, size, resample)
resized = np.asarray(resized, dtype)
if expand_flag:
resized = resized[..., np.newaxis]
return _imresize_after(resized, channel_first)
|
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Batch normalization module for Sonnet.
This contains the module BatchNorm, which performs batch normalization on
its inputs. It has an optional post-normalization scale and offset, and it
maintains moving averages of the statistics for use at test time.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow as tf
from tensorflow.python.layers import utils
from tensorflow.python.training import moving_averages
def create_beta_initializer():
"""Returns a default initializer for the `beta` in batch norm."""
return tf.zeros_initializer()
def create_gamma_initializer():
"""Returns a default initializer for the `gamma` in batch norm."""
return tf.ones_initializer()
def create_mean_initializer():
"""Returns a default initializer for the `moving_mean` in batch norm."""
return tf.zeros_initializer()
def create_variance_initializer():
"""Returns a default initializer for the `moving_variance` in batch norm."""
return tf.ones_initializer()
class BatchNorm(base.AbstractModule):
"""Batch normalization module, including optional affine transformation.
This module maintains exponential moving averages of the mean and
variance, which can be optionally used to normalize at test time.
At training time, batch statistics (mean, variance) are not shared between
separate connections. The moving averages are shared between separate
connections. At both training and test time, the optional affine
transformation (`* gamma + beta`) is shared between separate connections.
This is also the case for distributed replica training, where the batch
statistics are not aggregated across replicas, but the moving averages are
shared globally.
When connecting the module to the graph, `is_training=True` means that
- Update ops are created to update the moving averages with the current
batch's statistics.
- Features are normalized using the *current batch's statistics*. The
`test_local_stats` setting is ignored. The moving averages are
**not** used.
whereas `is_training=False` means that
- Update ops are not created.
- Features are normalized using either:
- The test batch statistics if `test_local_stats=True` (default).
- The moving averages if `test_local_stats=False`.
Local batch statistics are used by default at test time, but the moving
averages can be used by specifying a flag when connecting. One often wants
to use local batch statistics at test time to track the progress while the
model is trained as it would ensure that moving average updates do not affect
the training curves. Once the training is finished, it's often advantageous
to use moving average statistics, since it would make evaluation agnostic to
the batch size, and might even lead to small improvements over the local
batch statistics.
You can either update the moving averages automatically by setting
`update_ops_collection=None` or by running the ops in the given collection,
by default tf.GraphKeys.UPDATE_OPS.
For example, to run the updates automatically:
bn = BatchNorm(update_ops_collection=None)
train_net = bn(train_inputs, is_training=True)
this does, however, have the effect of blocking the forwards pass of the
network until the update ops have been run and may have a small performance
penalty.
For example, to run the updates manually:
bn = BatchNorm()
train_net = bn(train_inputs, is_training=True)
...
update_ops = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
train_op = tf.group(train_op, update_ops)
Then, whenever `train_op` is run so also are the moving average update ops.
Some batch normalization caveats:
- Batch normalization will remove the effect of adding a bias, so e.g.
`use_bias=False` should be used for an immediately preceding snt.Linear
module.
- If your data batches aren't i.i.d. then batch normalization can allow your
network to 'cheat' by using the batch statistics to peek at the rest of
the batch. This can exhibit itself as a higher test score with
`test_local_stats=True` than `test_local_stats=False`.
"""
GAMMA = "gamma"
BETA = "beta"
MOVING_MEAN = "moving_mean"
MOVING_VARIANCE = "moving_variance"
POSSIBLE_INITIALIZER_KEYS = {GAMMA, BETA, MOVING_MEAN, MOVING_VARIANCE}
POSSIBLE_PARTITIONER_KEYS = {GAMMA, BETA}
POSSIBLE_REGULARIZER_KEYS = {GAMMA, BETA}
def __init__(self, axis=None, offset=True, scale=False,
decay_rate=0.999, eps=1e-3, initializers=None,
partitioners=None, regularizers=None,
update_ops_collection="update_ops", fused=False,
name="batch_norm"):
"""Constructs a BatchNorm module.
By default reduces over all input tensor dimensions apart from the final
dimension. This has the effect of treating pixels in 1D/2D/3D images as
additional elements of the minibatch.
If this is not the desired behaviour, the user can specify the tensor
indices to reduce over with `axis`.
Args:
axis: Optional iterable of indices of dimensions to reduce over. By
default `None` and all dimensions except the last are reduced over.
offset: Optional boolean to specify whether or not to apply a trained
component-wise bias after the batch normalization and scaling.
scale: Optional boolean to specify whether or not to apply a trained
component-wise scale after the batch normalization.
decay_rate: Decay rate of the exponential moving averages of the mean
and variance.
eps: Small number to avoid dividing by zero when diving by the standard
deviation.
initializers: Optional dict containing ops to initialize the weights of
the affine transform (`gamma` and `beta`).
partitioners: Optional dict containing partitioners to partition the
weights of the affine transform (`gamma` and `beta`).
regularizers: Optional dict containing regularizers for the weights of the
affine transform ('gamma' and 'beta'). As a default, no regularizers are
used. A regularizer should be a function that takes a single `Tensor` as
an input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
update_ops_collection: Name of TensorFlow variable collection to add the
moving average update ops to. If `None`, we instead add the update ops
as control dependencies of the output of the module. This may result in
some slowdown, as the feed-forward of the network is now blocked. By
default, `tf.GraphKeys.UPDATE_OPS`.
fused: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise.
name: Name of the module.
Raises:
KeyError: If `initializers` contains any keys other than `gamma`, `beta`,
`moving_mean` or `moving_variance`.
KeyError: If `partitioners` or `regularizers` contains any keys other
than `gamma` or `beta`.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
super(BatchNorm, self).__init__(name=name)
self._axis = axis
self._offset = offset
self._scale = scale
self._decay_rate = decay_rate
self._eps = eps
self._update_ops_collection = update_ops_collection
self._fused = fused
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_PARTITIONER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_REGULARIZER_KEYS)
def _build_statistics(self, input_batch, axis, use_batch_stats, stat_dtype):
"""Builds the statistics part of the graph when using moving variance.
Args:
input_batch: Input batch Tensor.
axis: Indices of `input_batch` to reduce over.
use_batch_stats: Boolean to indicate if batch statistics should be
calculated, otherwise moving averages are returned.
stat_dtype: TensorFlow datatype to use for the moving mean and variance.
Returns:
Tuple of (mean, variance), each of the same datatype as `input_batch`.
"""
# Set up our moving statistics. When connecting in parallel, this is shared.
if self.MOVING_MEAN not in self._initializers:
self._initializers[self.MOVING_MEAN] = create_mean_initializer()
self._moving_mean = tf.get_variable(
"moving_mean",
dtype=stat_dtype,
shape=self._mean_shape,
collections=[
tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
],
initializer=self._initializers[self.MOVING_MEAN],
trainable=False)
if self.MOVING_VARIANCE not in self._initializers:
self._initializers[self.MOVING_VARIANCE] = create_variance_initializer()
self._moving_variance = tf.get_variable(
"moving_variance",
dtype=stat_dtype,
shape=self._mean_shape,
collections=[
tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
],
initializer=self._initializers[self.MOVING_VARIANCE],
trainable=False)
def build_batch_stats():
"""Builds the batch statistics calculation ops."""
mean, variance = tf.nn.moments(input_batch, axis,
keep_dims=True, name="normalize_moments")
return mean, variance
def build_moving_stats():
"""Retrieves the moving statistics."""
# If necessary, cast the moving statistics to match the input type.
# This is required by tf.nn.batch_normalization.
input_dtype = input_batch.dtype.base_dtype
if stat_dtype == input_dtype:
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
)
else:
return (
tf.cast(self._moving_mean, input_dtype),
tf.cast(self._moving_variance, input_dtype),
)
mean, variance = utils.smart_cond(
use_batch_stats,
build_batch_stats,
build_moving_stats,
)
return mean, variance
def _build_update_ops(self, mean, variance, is_training):
"""Builds the moving average update ops when using moving variance.
Args:
mean: The mean value to update with.
variance: The variance value to update with.
is_training: Boolean Tensor to indicate if we're currently in
training mode.
Returns:
Tuple of `(update_mean_op, update_variance_op)` when `is_training` is or
could be `True`. Returns `None` when `is_training=False`.
"""
def build_update_ops():
"""Builds the exponential moving average update ops."""
update_mean_op = moving_averages.assign_moving_average(
variable=self._moving_mean,
value=mean,
decay=self._decay_rate,
zero_debias=False,
name="update_moving_mean").op
update_variance_op = moving_averages.assign_moving_average(
variable=self._moving_variance,
value=variance,
decay=self._decay_rate,
zero_debias=False,
name="update_moving_variance").op
return update_mean_op, update_variance_op
def build_no_ops():
return (tf.no_op(), tf.no_op())
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
update_mean_op, update_variance_op = utils.smart_cond(
is_training,
build_update_ops,
build_no_ops,
)
return (update_mean_op, update_variance_op)
else:
return None
def _infer_fused_data_format(self, input_batch):
"""Infers the data format for the fused batch norm.
It uses the axis option to infer this information. Specifically, the
axis value (0, 1, 2) corresponds to data format NHWC and the
axis value (0, 2, 3) to data format NCHW.
Args:
input_batch: A Tensor of arbitrary dimension.
Returns:
A string description of the data format NHWC or NCHW.
Raises:
NotImplementedError: for input of dimensionality different from 4.
ValueError: for axis configuration different from (0, 1, 2) and (0, 2, 3).
"""
input_shape = input_batch.get_shape().as_list()
input_shape_len = len(input_shape)
if input_shape_len != 4:
raise NotImplementedError("fused batch norm supports only input with "
"4 dimensions, it received input of "
"dimensionality {:d}".format(input_shape_len))
axis = range(input_shape_len)[:-1] if self._axis is None else self._axis
axis = tuple(axis)
if axis == (0, 1, 2):
# Reduce over the last dimension.
return "NHWC"
elif axis == (0, 2, 3):
# Reduce over the second dimension.
return "NCHW"
else:
raise ValueError("Invalid axis option {}. This does not correspond to"
" either the NHWC format (0, 1, 2) or the NCHW "
"(0, 2, 3).".format(axis))
def _fused_batch_norm_op(self, input_batch, mean, variance, use_batch_stats):
"""Creates a fused batch normalization op."""
# Store the original shape of the mean and variance.
mean_shape = mean.get_shape()
variance_shape = variance.get_shape()
# The fused batch norm expects the mean, variance, gamma and beta
# tensors to have dimension 1, so we flatten them to remove the
# extra dimensions.
gamma_flatten = tf.reshape(self._gamma, shape=(-1,))
beta_flatten = tf.reshape(self._beta, shape=(-1,))
flatten_mean = tf.reshape(mean, shape=(-1,))
flatten_variance = tf.reshape(variance, shape=(-1,))
use_batch_stats = tf.convert_to_tensor(use_batch_stats)
common_args = {
"scale": gamma_flatten,
"offset": beta_flatten,
"epsilon": self._eps,
"data_format": self._infer_fused_data_format(input_batch),
"name": "batch_norm"
}
def use_batch_stats_fused_batch_norm():
return tf.nn.fused_batch_norm(input_batch, mean=None, variance=None,
is_training=True, **common_args)
def moving_average_fused_batch_norm():
return tf.nn.fused_batch_norm(input_batch, mean=flatten_mean,
variance=flatten_variance,
is_training=False, **common_args)
batch_norm_op, mean, variance = utils.smart_cond(
use_batch_stats, use_batch_stats_fused_batch_norm,
moving_average_fused_batch_norm)
mean = tf.reshape(mean, mean_shape)
variance = tf.reshape(variance, variance_shape)
return batch_norm_op, mean, variance
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats,
stat_dtype):
"""Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`.
"""
if self._fused:
# For the non-training case where not using batch stats,
# pass in the moving statistic variables directly.
# These will already be in the correct dtype, even for float16 input.
batch_norm_op, mean, variance = self._fused_batch_norm_op(
input_batch,
self._moving_mean, self._moving_variance, use_batch_stats)
else:
batch_norm_op = tf.nn.batch_normalization(
input_batch,
mean,
variance,
self._beta,
self._gamma,
self._eps,
name="batch_norm")
# We'll echo the supplied mean and variance so that they can also be used
# to update the moving statistics. Cast to matching type if necessary.
if input_batch.dtype.base_dtype != stat_dtype:
mean = tf.cast(mean, stat_dtype)
variance = tf.cast(variance, stat_dtype)
return batch_norm_op, mean, variance
def _build_scale_offset(self, dtype):
"""Sets up optional scale and offset factors."""
# tf.nn.fused_batch_norm accepts float16 batch data, but not scale/offset.
if self._fused and dtype == tf.float16:
dtype = tf.float32
# The fused batch norm operation needs the beta, gamma variables,
# so in this case we build them and set the trainable option according
# to the values of _offset and _scale.
self._beta = None
if self._offset or self._fused:
if self.BETA not in self._initializers:
self._initializers[self.BETA] = create_beta_initializer()
self._beta = tf.get_variable(
self.BETA,
dtype=dtype,
shape=self._mean_shape,
initializer=self._initializers[self.BETA],
partitioner=self._partitioners.get(self.BETA, None),
regularizer=self._regularizers.get(self.BETA, None),
trainable=self._offset)
self._gamma = None
if self._scale or self._fused:
if self.GAMMA not in self._initializers:
self._initializers[self.GAMMA] = create_gamma_initializer()
self._gamma = tf.get_variable(
self.GAMMA,
dtype=dtype,
shape=self._mean_shape,
initializer=self._initializers[self.GAMMA],
partitioner=self._partitioners.get(self.GAMMA, None),
regularizer=self._regularizers.get(self.GAMMA, None),
trainable=self._scale)
def _build(self, input_batch, is_training, test_local_stats=True):
"""Connects the BatchNorm module into the graph.
Args:
input_batch: A Tensor of arbitrary dimension. By default, the final
dimension is not reduced over when computing the minibatch statistics.
is_training: A boolean to indicate if the module should be connected in
training mode, meaning the moving averages are updated. Can be a Tensor.
test_local_stats: A boolean to indicate if local batch statistics should
be used when `is_training=False`. If not, moving averages are used.
By default `True`. Can be a Tensor.
Returns:
A tensor with the same shape as `input_batch`.
Raises:
base.IncompatibleShapeError: If `axis` is not valid for the
input shape or has negative entries.
base.NotSupportedError: If `input_batch` has data type of `tf.float16`.
"""
input_shape = input_batch.get_shape()
if self._axis is not None:
if len(self._axis) > len(input_shape):
raise base.IncompatibleShapeError(
"Too many indices specified in axis: len({}) > len({}).".format(
self._axis, input_shape))
if max(self._axis) >= len(input_shape):
raise base.IncompatibleShapeError(
"One or more index in axis is too large for "
"input shape: {} >= {:d}.".format(self._axis, len(input_shape)))
if min(self._axis) < 0:
raise base.IncompatibleShapeError(
"Indices in axis must be non-negative: {} < 0.".format(
self._axis))
axis = self._axis
else:
# Reduce over all dimensions except the last.
axis = tuple(range(len(input_shape))[:-1])
dtype = input_batch.dtype.base_dtype
# Maintain moving averages at a minimum precision of tf.float32.
stat_dtype = tf.float32 if dtype == tf.float16 else dtype
self._mean_shape = input_batch.get_shape().as_list()
for index in axis:
self._mean_shape[index] = 1
use_batch_stats = is_training | test_local_stats
mean, variance = self._build_statistics(input_batch, axis,
use_batch_stats, stat_dtype)
# Sets up optional gamma and beta parameters
self._build_scale_offset(dtype)
# Sets up the batch normalization op.
out, mean, variance = self._batch_norm_op(input_batch, mean, variance,
use_batch_stats, stat_dtype)
# Sets up the update op.
update_ops = self._build_update_ops(mean, variance, is_training)
# Put update ops in the update ops collection if given, otherwise add as
# control dependencies of the output.
if update_ops:
if self._update_ops_collection:
for update_op in update_ops:
tf.add_to_collection(self._update_ops_collection, update_op)
else:
with tf.control_dependencies(update_ops):
out = tf.identity(out)
return out
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def moving_mean(self):
self._ensure_is_connected()
return self._moving_mean
@property
def moving_variance(self):
self._ensure_is_connected()
return self._moving_variance
@property
def beta(self):
self._ensure_is_connected()
if self._beta is None:
raise base.Error(
"Batch normalization doesn't have an offset, so no beta")
else:
return self._beta
@property
def gamma(self):
self._ensure_is_connected()
if self._gamma is None:
raise base.Error(
"Batch normalization doesn't have a scale, so no gamma")
else:
return self._gamma
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2DaemonSetStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'collision_count': 'int',
'current_number_scheduled': 'int',
'desired_number_scheduled': 'int',
'number_available': 'int',
'number_misscheduled': 'int',
'number_ready': 'int',
'number_unavailable': 'int',
'observed_generation': 'int',
'updated_number_scheduled': 'int'
}
attribute_map = {
'collision_count': 'collisionCount',
'current_number_scheduled': 'currentNumberScheduled',
'desired_number_scheduled': 'desiredNumberScheduled',
'number_available': 'numberAvailable',
'number_misscheduled': 'numberMisscheduled',
'number_ready': 'numberReady',
'number_unavailable': 'numberUnavailable',
'observed_generation': 'observedGeneration',
'updated_number_scheduled': 'updatedNumberScheduled'
}
def __init__(self, collision_count=None, current_number_scheduled=None, desired_number_scheduled=None, number_available=None, number_misscheduled=None, number_ready=None, number_unavailable=None, observed_generation=None, updated_number_scheduled=None):
"""
V1beta2DaemonSetStatus - a model defined in Swagger
"""
self._collision_count = None
self._current_number_scheduled = None
self._desired_number_scheduled = None
self._number_available = None
self._number_misscheduled = None
self._number_ready = None
self._number_unavailable = None
self._observed_generation = None
self._updated_number_scheduled = None
self.discriminator = None
if collision_count is not None:
self.collision_count = collision_count
self.current_number_scheduled = current_number_scheduled
self.desired_number_scheduled = desired_number_scheduled
if number_available is not None:
self.number_available = number_available
self.number_misscheduled = number_misscheduled
self.number_ready = number_ready
if number_unavailable is not None:
self.number_unavailable = number_unavailable
if observed_generation is not None:
self.observed_generation = observed_generation
if updated_number_scheduled is not None:
self.updated_number_scheduled = updated_number_scheduled
@property
def collision_count(self):
"""
Gets the collision_count of this V1beta2DaemonSetStatus.
Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.
:return: The collision_count of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""
Sets the collision_count of this V1beta2DaemonSetStatus.
Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.
:param collision_count: The collision_count of this V1beta2DaemonSetStatus.
:type: int
"""
self._collision_count = collision_count
@property
def current_number_scheduled(self):
"""
Gets the current_number_scheduled of this V1beta2DaemonSetStatus.
The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:return: The current_number_scheduled of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._current_number_scheduled
@current_number_scheduled.setter
def current_number_scheduled(self, current_number_scheduled):
"""
Sets the current_number_scheduled of this V1beta2DaemonSetStatus.
The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:param current_number_scheduled: The current_number_scheduled of this V1beta2DaemonSetStatus.
:type: int
"""
if current_number_scheduled is None:
raise ValueError("Invalid value for `current_number_scheduled`, must not be `None`")
self._current_number_scheduled = current_number_scheduled
@property
def desired_number_scheduled(self):
"""
Gets the desired_number_scheduled of this V1beta2DaemonSetStatus.
The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:return: The desired_number_scheduled of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._desired_number_scheduled
@desired_number_scheduled.setter
def desired_number_scheduled(self, desired_number_scheduled):
"""
Sets the desired_number_scheduled of this V1beta2DaemonSetStatus.
The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:param desired_number_scheduled: The desired_number_scheduled of this V1beta2DaemonSetStatus.
:type: int
"""
if desired_number_scheduled is None:
raise ValueError("Invalid value for `desired_number_scheduled`, must not be `None`")
self._desired_number_scheduled = desired_number_scheduled
@property
def number_available(self):
"""
Gets the number_available of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)
:return: The number_available of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._number_available
@number_available.setter
def number_available(self, number_available):
"""
Sets the number_available of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)
:param number_available: The number_available of this V1beta2DaemonSetStatus.
:type: int
"""
self._number_available = number_available
@property
def number_misscheduled(self):
"""
Gets the number_misscheduled of this V1beta2DaemonSetStatus.
The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:return: The number_misscheduled of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._number_misscheduled
@number_misscheduled.setter
def number_misscheduled(self, number_misscheduled):
"""
Sets the number_misscheduled of this V1beta2DaemonSetStatus.
The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
:param number_misscheduled: The number_misscheduled of this V1beta2DaemonSetStatus.
:type: int
"""
if number_misscheduled is None:
raise ValueError("Invalid value for `number_misscheduled`, must not be `None`")
self._number_misscheduled = number_misscheduled
@property
def number_ready(self):
"""
Gets the number_ready of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.
:return: The number_ready of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._number_ready
@number_ready.setter
def number_ready(self, number_ready):
"""
Sets the number_ready of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.
:param number_ready: The number_ready of this V1beta2DaemonSetStatus.
:type: int
"""
if number_ready is None:
raise ValueError("Invalid value for `number_ready`, must not be `None`")
self._number_ready = number_ready
@property
def number_unavailable(self):
"""
Gets the number_unavailable of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)
:return: The number_unavailable of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._number_unavailable
@number_unavailable.setter
def number_unavailable(self, number_unavailable):
"""
Sets the number_unavailable of this V1beta2DaemonSetStatus.
The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)
:param number_unavailable: The number_unavailable of this V1beta2DaemonSetStatus.
:type: int
"""
self._number_unavailable = number_unavailable
@property
def observed_generation(self):
"""
Gets the observed_generation of this V1beta2DaemonSetStatus.
The most recent generation observed by the daemon set controller.
:return: The observed_generation of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""
Sets the observed_generation of this V1beta2DaemonSetStatus.
The most recent generation observed by the daemon set controller.
:param observed_generation: The observed_generation of this V1beta2DaemonSetStatus.
:type: int
"""
self._observed_generation = observed_generation
@property
def updated_number_scheduled(self):
"""
Gets the updated_number_scheduled of this V1beta2DaemonSetStatus.
The total number of nodes that are running updated daemon pod
:return: The updated_number_scheduled of this V1beta2DaemonSetStatus.
:rtype: int
"""
return self._updated_number_scheduled
@updated_number_scheduled.setter
def updated_number_scheduled(self, updated_number_scheduled):
"""
Sets the updated_number_scheduled of this V1beta2DaemonSetStatus.
The total number of nodes that are running updated daemon pod
:param updated_number_scheduled: The updated_number_scheduled of this V1beta2DaemonSetStatus.
:type: int
"""
self._updated_number_scheduled = updated_number_scheduled
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2DaemonSetStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/env python2
# coding:utf-8
import binascii
import os
import socket
import struct
import sys
import time
current_path = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
root = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, os.pardir))
data_path = os.path.abspath( os.path.join(root, os.pardir, "data", "x_tunnel"))
python_path = os.path.join(root, 'python27', '1.0')
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
#sys.path.insert(0, noarch_lib)
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
#sys.path.insert(0, linux_lib)
sys.path.append(linux_lib)
from xlog import getLogger
xlog = getLogger("heroku_front")
else:
class xlog():
@staticmethod
def debug(fmt, *args, **kwargs):
pass
@staticmethod
def info(fmt, *args, **kwargs):
pass
@staticmethod
def warn(fmt, *args, **kwargs):
pass
@staticmethod
def exception(fmt, *args, **kwargs):
pass
import OpenSSL
SSLError = OpenSSL.SSL.WantReadError
import socks
import check_local_network
from config import config
import cert_util
import openssl_wrap
import simple_http_client
import sni_generater
import hyper
# http://docs.python.org/dev/library/ssl.html
# http://blog.ivanristic.com/2009/07/examples-of-the-information-collected-from-ssl-handshakes.html
# http://src.chromium.org/svn/trunk/src/net/third_party/nss/ssl/sslenum.c
# openssl s_server -accept 443 -key CA.crt -cert CA.crt
# ref: http://vincent.bernat.im/en/blog/2011-ssl-session-reuse-rfc5077.html
g_cacertfile = os.path.join(current_path, "cacert.pem")
openssl_context = openssl_wrap.SSLConnection.context_builder(ca_certs=g_cacertfile)
try:
openssl_context.set_session_id(binascii.b2a_hex(os.urandom(10)))
except:
pass
if hasattr(OpenSSL.SSL, 'SESS_CACHE_BOTH'):
openssl_context.set_session_cache_mode(OpenSSL.SSL.SESS_CACHE_BOTH)
max_timeout = 5
default_socket = socket.socket
def load_proxy_config():
global default_socket
if int(config.PROXY_ENABLE):
if config.PROXY_TYPE == "HTTP":
proxy_type = socks.HTTP
elif config.PROXY_TYPE == "SOCKS4":
proxy_type = socks.SOCKS4
elif config.PROXY_TYPE == "SOCKS5":
proxy_type = socks.SOCKS5
else:
xlog.error("proxy type %s unknown, disable proxy", config.PROXY_TYPE)
raise
socks.set_default_proxy(proxy_type, config.PROXY_HOST, config.PROXY_PORT, config.PROXY_USER, config.PROXY_PASSWD)
load_proxy_config()
import threading
network_fail_lock = threading.Lock()
def connect_ssl(ip, port=443, timeout=5, top_domain=None, on_close=None):
sni = sni_generater.get()
if not top_domain:
top_domain = sni
xlog.debug("top_domain:%s sni:%s", top_domain, sni)
if int(config.PROXY_ENABLE):
sock = socks.socksocket(socket.AF_INET if ':' not in ip else socket.AF_INET6)
else:
sock = socket.socket(socket.AF_INET if ':' not in ip else socket.AF_INET6)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
# resize socket recv buffer 8K->32K to improve browser releated application performance
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32*1024)
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
sock.settimeout(timeout)
ssl_sock = openssl_wrap.SSLConnection(openssl_context, sock, ip, on_close=on_close)
ssl_sock.set_connect_state()
ssl_sock.set_tlsext_host_name(sni)
time_begin = time.time()
ip_port = (ip, port)
ssl_sock.connect(ip_port)
time_connected = time.time()
ssl_sock.do_handshake()
try:
h2 = ssl_sock.get_alpn_proto_negotiated()
if h2 == "h2":
ssl_sock.h2 = True
else:
ssl_sock.h2 = False
except Exception as e:
#xlog.exception("alpn:%r", e)
if hasattr(ssl_sock._connection, "protos") and ssl_sock._connection.protos == "h2":
ssl_sock.h2 = True
else:
ssl_sock.h2 = False
time_handshaked = time.time()
# report network ok
check_local_network.network_stat = "OK"
check_local_network.last_check_time = time_handshaked
check_local_network.continue_fail_count = 0
cert = ssl_sock.get_peer_certificate()
if not cert:
raise socket.error(' certficate is none')
issuer_commonname = next((v for k, v in cert.get_issuer().get_components() if k == 'CN'), '')
if not issuer_commonname.startswith('DigiCert'):
# and issuer_commonname not in ['DigiCert ECC Extended Validation Server CA']
raise socket.error(' certficate is issued by %r, not COMODO' % ( issuer_commonname))
connect_time = int((time_connected - time_begin) * 1000)
handshake_time = int((time_handshaked - time_begin) * 1000)
if __name__ == "__main__":
xlog.debug("h2:%s", ssl_sock.h2)
xlog.debug("issued by:%s", issuer_commonname)
xlog.debug("conn: %d handshake:%d", connect_time, handshake_time)
# sometimes, we want to use raw tcp socket directly(select/epoll), so setattr it to ssl socket.
ssl_sock.ip = ip
ssl_sock._sock = sock
ssl_sock.fd = sock.fileno()
ssl_sock.create_time = time_begin
ssl_sock.connect_time = connect_time
ssl_sock.handshake_time = handshake_time
ssl_sock.sni = sni
ssl_sock.top_domain = top_domain
return ssl_sock
def get_ssl_cert_domain(ssl_sock):
cert = ssl_sock.get_peer_certificate()
if not cert:
raise SSLError("no cert")
#issuer_commonname = next((v for k, v in cert.get_issuer().get_components() if k == 'CN'), '')
ssl_cert = cert_util.SSLCert(cert)
xlog.info("%s CN:%s", ssl_sock.ip, ssl_cert.cn)
ssl_sock.domain = ssl_cert.cn
def check_xtunnel_http1(ssl_sock, host):
xlog.warn("ip:%s use http/1.1", ssl_sock.ip)
start_time = time.time()
try:
request_data = 'GET / HTTP/1.1\r\nHost: %s\r\n\r\n' % host
ssl_sock.send(request_data.encode())
response = simple_http_client.Response(ssl_sock)
response.begin(timeout=5)
server_type = response.getheader('Server', "")
xlog.debug("status:%d", response.status)
xlog.debug("Server type:%s", server_type)
if response.status == 403:
xlog.warn("check status:%d", response.status)
return ssl_sock
if response.status != 200:
xlog.warn("ip:%s status:%d", ssl_sock.ip, response.status)
return False
content = response.read(timeout=1)
if content != "OK":
xlog.warn("app check content:%s", content)
return ssl_sock
except Exception as e:
xlog.debug("check ip %s http1 e:%r", ssl_sock.ip, e)
return ssl_sock
time_cost = (time.time() - start_time) * 1000
ssl_sock.request_time = time_cost
xlog.info("check_xtunnel ok, time:%d", time_cost)
ssl_sock.support_xtunnel = True
return ssl_sock
def check_xtunnel_http2(ssl_sock, host):
xlog.warn("ip:%s use http/2", ssl_sock.ip)
start_time = time.time()
try:
conn = hyper.HTTP20Connection(ssl_sock, host=host, ip=ssl_sock.ip, port=443)
conn.request('GET', '/')
except Exception as e:
#xlog.exception("xtunnel %r", e)
xlog.debug("ip:%s http/1.1:%r", ssl_sock.ip, e )
return ssl_sock
try:
response = conn.get_response()
except Exception as e:
xlog.exception("http2 get response fail:%r", e)
return ssl_sock
xlog.debug("ip:%s http/2", ssl_sock.ip)
if response.status != 200:
xlog.warn("app check ip:%s status:%d", ssl_sock.ip, response.status)
return ssl_sock
content = response.read()
if content != "OK":
xlog.warn("app check content:%s", content)
return ssl_sock
ssl_sock.support_xtunnel = True
time_cost = (time.time() - start_time) * 1000
ssl_sock.request_time = time_cost
xlog.info("check_xtunnel ok, time:%d", time_cost)
return ssl_sock
def test_xtunnel_ip2(ip, top_domain=None, wait_time=0):
try:
ssl_sock = connect_ssl(ip, timeout=max_timeout, top_domain=top_domain)
get_ssl_cert_domain(ssl_sock)
except socket.timeout:
xlog.warn("connect timeout")
return False
except Exception as e:
xlog.exception("test_xtunnel_ip %s e:%r",ip, e)
return False
ssl_sock.support_xtunnel = False
host = "xxnet4.herokuapp.com"
xlog.info("host:%s", host)
time.sleep(wait_time)
if not ssl_sock.h2:
return check_xtunnel_http1(ssl_sock, host)
else:
return check_xtunnel_http2(ssl_sock, host)
if __name__ == "__main__":
# case 1: only ip
# case 2: ip + domain
# connect use domain
if len(sys.argv) > 1:
ip = sys.argv[1]
else:
ip = "107.21.125.200"
print("Usage: check_ip.py [ip] [top_domain] [wait_time=0]")
xlog.info("test ip:%s", ip)
if len(sys.argv) > 2:
top_domain = sys.argv[2]
else:
top_domain = None
if len(sys.argv) > 3:
wait_time = int(sys.argv[3])
else:
wait_time = 0
res = test_xtunnel_ip2(ip, top_domain=top_domain, wait_time=wait_time)
if not res:
print("connect fail")
elif res.support_xtunnel:
print("success, domain:%s handshake:%d" % (res.domain, res.handshake_time))
else:
print("not support")
|
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from corepy.spre.spe import Instruction, DispatchInstruction
from vmx_insts import *
__doc__="""
VMX/AltiVec Instruction Set Architectre (ISA)
To use, import this module and call the Instructions as Python
functions to generate a properly coded version. For example, to
create an addx instruction:
import vmx_isa as av
# add the vectors of unsigned bytes in v2 and v3 and put the result in v3
inst = av.vaddubs(1, 2, 3)
Operands are in the same order as presented in the Programming
Environments Manual.
For a complete reference and details for all instructions, please
referer to:
'AltiVec Technology Programming Environments Manual' from Freescale
Semiconductor.
URL (valid as of June 1, 2006):
http://www.freescale.com/files/32bit/doc/ref_manual/ALTIVECPEM.pdf
"""
class VMXInstruction(Instruction): pass
class vmhaddshs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':32}
class vmhraddshs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':33}
class vmladduhm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':34}
class vmsumubm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':36}
class vmsummbm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':37}
class vmsumuhm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':38}
class vmsumuhs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':39}
class vmsumshm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':40}
class vmsumshs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':41}
class vsel(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':42}
class vperm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':43}
class vsldoi(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_SH_XO
params = {'OPCD':4, 'XO':44}
class vmaddfp(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':46}
class vnmsubfp(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_vC_XO
params = {'OPCD':4, 'XO':47}
class vaddubm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':0}
class vadduhm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':64}
class vadduwm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':128}
class vaddcuw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':384}
class vaddubs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':512}
class vadduhs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':576}
class vadduws(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':640}
class vaddsbs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':768}
class vaddshs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':832}
class vaddsws(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':896}
class vaddubm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1024}
class vadduhm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1088}
class vadduwm(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1152}
class vsubcuw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1408}
class vsububs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1536}
class vsubuhs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1600}
class vsubuws(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1664}
class vsubsbs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1792}
class vsubshs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1856}
class vsubsws(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1920}
class vmaxub(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':2}
class vmaxuh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':66}
class vmaxuw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':130}
class vmaxsb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':258}
class vmaxsh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':322}
class vmaxsw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':386}
class vminub(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':514}
class vminuh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':578}
class vminuw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':642}
class vminsb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':770}
class vminsh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':834}
class vminsw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':898}
class vavgub(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1026}
class vavguh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1090}
class vavguw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1154}
class vavgsb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1282}
class vavgsh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1346}
class vavgsw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1410}
class vrlb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':4}
class vrlh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':68}
class vrlw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':132}
class vslb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':260}
class vslh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':324}
class vslw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':388}
class vsl(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':452}
class vsrb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':516}
class vsrh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':580}
class vsrw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':644}
class vsr(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':708}
class vsrab(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':772}
class vsrah(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':836}
class vsraw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':900}
class vand(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1028}
class vandc(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1092}
class vor(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1156}
class vnor(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1284}
class mfvscr(VMXInstruction):
machine_inst = OPCD_vD_XO
params = {'OPCD':4, 'XO':1540}
class mtvscr(VMXInstruction):
machine_inst = OPCD_vB_XO
params = {'OPCD':4, 'XO':1604}
class vmuloub(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':8}
class vmulouh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':72}
class vmulosb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':264}
class vmulosh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':328}
class vmuleub(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':520}
class vmuleuh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':584}
class vmulesb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':776}
class vmulesh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':840}
class vsum4ubs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1544}
class vsum4sbs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1800}
class vsum4shs(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1608}
class vsum2sws(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1672}
class vsumsws(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1928}
class vaddfp(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':10}
class vsubfp(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':74}
class vrefp(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':266}
class vsqrtefp(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':330}
class vexptefp(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':394}
class vlogefp(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':458}
class vrfin(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':522}
class vrfiz(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':586}
class vrfip(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':650}
class vrfim(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':714}
class vcfux(VMXInstruction):
machine_inst = OPCD_vD_UIMM_vB_XO
params = {'OPCD':4, 'XO':778}
class vcfsx(VMXInstruction):
machine_inst = OPCD_vD_UIMM_vB_XO
params = {'OPCD':4, 'XO':842}
class vctuxs(VMXInstruction):
machine_inst = OPCD_vD_UIMM_vB_XO
params = {'OPCD':4, 'XO':906}
class vctsx(VMXInstruction):
machine_inst = OPCD_vD_UIMM_vB_XO
params = {'OPCD':4, 'XO':970}
class vmaxfp(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1034}
class vminfp(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1098}
class vmrghb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':12}
class vmrghh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':76}
class vmrghw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':140}
class vmrglb(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':268}
class vmrglh(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':332}
class vmrglw(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':396}
class vspltb(VMXInstruction):
machine_inst = OPCD_vD_UIMM_vB_XO
params = {'OPCD':4, 'XO':524}
class vsplth(VMXInstruction):
machine_inst = OPCD_vD_UIMM_vB_XO
params = {'OPCD':4, 'XO':588}
class vspltw(VMXInstruction):
machine_inst = OPCD_vD_UIMM_vB_XO
params = {'OPCD':4, 'XO':652}
class vspltisb(VMXInstruction):
machine_inst = OPCD_vD_SIMM_XO
params = {'OPCD':4, 'XO':780}
class vspltish(VMXInstruction):
machine_inst = OPCD_vD_SIMM_XO
params = {'OPCD':4, 'XO':844}
class vspltisw(VMXInstruction):
machine_inst = OPCD_vD_SIMM_XO
params = {'OPCD':4, 'XO':908}
class vslo(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1036}
class vsro(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1100}
class vpkuhum(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':14}
class vpkuwum(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':78}
class vpkuhus(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':142}
class vpkuwus(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':206}
class vpkshus(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':270}
class vpkswus(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':334}
class vpkshss(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':398}
class vpkswss(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':462}
class vupkhsb(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':526}
class vupkhsh(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':590}
class vupkisb(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':654}
class vupkish(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':718}
class vpkpx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':782}
class vupkhpx(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':846}
class vupklpx(VMXInstruction):
machine_inst = OPCD_vD_vB_XO
params = {'OPCD':4, 'XO':974}
class vxor(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_XO
params = {'OPCD':4, 'XO':1220}
class dst(VMXInstruction):
machine_inst = OPCD_T_STRM_A_B_XO
params = {'OPCD':31, 'T':0, 'XO':342}
class dstt(VMXInstruction):
machine_inst = OPCD_T_STRM_A_B_XO
params = {'OPCD':31, 'T':1, 'XO':342}
class dstst(VMXInstruction):
machine_inst = OPCD_T_STRM_A_B_XO
params = {'OPCD':31, 'T':0, 'XO':374}
class dststt(VMXInstruction):
machine_inst = OPCD_T_STRM_A_B_XO
params = {'OPCD':31, 'T':1, 'XO':374}
class dss(VMXInstruction):
machine_inst = OPCD_T_STRM_XO
params = {'OPCD':31, 'T':0, 'XO':822}
class dssall(VMXInstruction):
machine_inst = OPCD_T_XO
params = {'OPCD':31, 'T':1, 'XO':822}
class lvebx(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':7}
class lvehx(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':39}
class lvewx(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':71}
class lvsl(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':6}
class lvsr(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':38}
class lvx(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':103}
class lvxl(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':359}
class stvebx(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':135}
class stvehx(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':167}
class stvewx(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':199}
class stvx(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':231}
class stvxl(VMXInstruction):
machine_inst = OPCD_vD_A_B_XO
params = {'OPCD':31, 'XO':487}
class vcmpbfpx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':966}
class vcmpeqfpx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':198}
class vcmpequbx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':6}
class vcmpequhx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':70}
class vcmpequwx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':134}
class vcmpgefpx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':454}
class vcmpgtfpx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':710}
class vcmpgtswx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':902}
class vcmpgtubx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':518}
class vcmpgtuhx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':582}
class vcmpgtuwx(VMXInstruction):
machine_inst = OPCD_vD_vA_vB_RC_XO
params = {'OPCD':4, 'XO':646}
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for linear operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
__all__ = ["LinearOperator"]
# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
class LinearOperator(object):
"""Base class defining a [batch of] linear operator[s].
Subclasses of `LinearOperator` provide a access to common methods on a
(batch) matrix, without the need to materialize the matrix. This allows:
* Matrix free computations
* Operators that take advantage of special structure, while providing a
consistent API to users.
#### Subclassing
To enable a public method, subclasses should implement the leading-underscore
version of the method. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable
`apply(x, adjoint=False, name="apply")` a subclass should implement
`_apply(x, adjoint=False)`.
#### Performance contract
Subclasses should implement a method only if it can be done with a reasonable
performance increase over generic dense operations, either in time, parallel
scalability, or memory usage. For example, if the determinant can only be
computed using `tf.matrix_determinant(self.to_dense())`, then determinants
should not be implemented.
Class docstrings should contain an explanation of computational complexity.
Since this is a high-performance library, attention should be paid to detail,
and explanations can include constants as well as Big-O notation.
#### Shape compatibility
`LinearOperator` sub classes should operate on a [batch] matrix with
compatible shape. Class docstrings should define what is meant by compatible
shape. Some sub-classes may not support batching.
An example is:
`x` is a batch matrix with compatible shape for `apply` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
x.shape = [B1,...,Bb] + [N, R]
```
`rhs` is a batch matrix with compatible shape for `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
rhs.shape = [B1,...,Bb] + [M, R]
```
#### Example docstring for subclasses.
This operator acts like a (batch) matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `m x n` matrix. Again, this matrix `A` may not be materialized, but for
purposes of identifying and working with compatible arguments the shape is
relevant.
Examples:
```python
some_tensor = ... shape = ????
operator = MyLinOp(some_tensor)
operator.shape()
==> [2, 4, 4]
operator.log_determinant()
==> Shape [2] Tensor
x = ... Shape [2, 4, 5] Tensor
operator.apply(x)
==> Shape [2, 4, 5] Tensor
```
#### Shape compatibility
This operator acts on batch matrices with compatible shape.
FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE
#### Performance
FILL THIS IN
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
dtype,
graph_parents=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize the `LinearOperator`.
**This is a private method for subclass use.**
**Subclasses should copy-paste this `__init__` documentation.**
Args:
dtype: The type of the this `LinearOperator`. Arguments to `apply` and
`solve` will have to be this type.
graph_parents: Python list of graph prerequisites of this `LinearOperator`
Typically tensors that are passed during initialization.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `dtype` is real, this is equivalent to being symmetric.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If any member of graph_parents is `None` or not a `Tensor`.
ValueError: If hints are set incorrectly.
"""
# Check and auto-set flags.
if is_square is False:
if is_non_singular or is_positive_definite:
raise ValueError(
"A non-singular or positive definite operator is always square.")
self._is_square_set_by_user = is_square
if is_positive_definite:
if is_non_singular is False:
raise ValueError("A positive definite matrix is always non-singular.")
is_non_singular = True
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._graph_parents = graph_parents
self._is_non_singular = is_non_singular
self._is_self_adjoint = is_self_adjoint
self._is_positive_definite = is_positive_definite
self._name = name or type(self).__name__
# We will cache some tensors to avoid repeatedly adding shape
# manipulation ops to the graph.
# Naming convention:
# self._cached_X_tensor is the cached version of self._X_tensor.
self._cached_shape_tensor = None
self._cached_batch_shape_tensor = None
self._cached_domain_dimension_tensor = None
self._cached_range_dimension_tensor = None
self._cached_tensor_rank_tensor = None
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=((values or []) + self._graph_parents)) as scope:
yield scope
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `LinearOperator`."""
return self._dtype
@property
def name(self):
"""Name prepended to all ops created by this `LinearOperator`."""
return self._name
@property
def graph_parents(self):
"""List of graph dependencies of this `LinearOperator`."""
return self._graph_parents
@property
def is_non_singular(self):
return self._is_non_singular
@property
def is_self_adjoint(self):
return self._is_self_adjoint
@property
def is_positive_definite(self):
return self._is_positive_definite
@property
def is_square(self):
"""Return `True/False` depending on if this operator is square."""
# Static checks done after __init__. Why? Because domain/range dimension
# sometimes requires lots of work done in the derived class after init.
static_square_check = self.domain_dimension == self.range_dimension
if self._is_square_set_by_user is False and static_square_check:
raise ValueError(
"User set is_square hint to False, but the operator was square.")
if self._is_square_set_by_user is None:
return static_square_check
return self._is_square_set_by_user
def _shape(self):
# Write this in derived class to enable all static shape methods.
raise NotImplementedError("_shape is not implemented.")
@property
def shape(self):
"""`TensorShape` of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb, M, N])`, equivalent to `A.get_shape()`.
Returns:
`TensorShape`, statically determined, may be undefined.
"""
return self._shape()
def _shape_tensor(self):
raise NotImplementedError("_shape_tensor is not implemented.")
def shape_tensor(self, name="shape_tensor"):
"""Shape of this `LinearOperator`, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.
Args:
name: A name for this `Op.
Returns:
`int32` `Tensor`
"""
with self._name_scope(name):
# Be clean by avoiding adding shape Ops to the graph too many times.
if self._cached_shape_tensor is None:
# Prefer to use statically defined shape if available.
if self.shape.is_fully_defined():
self._cached_shape_tensor = linear_operator_util.shape_tensor(
self.shape.as_list())
else:
self._cached_shape_tensor = self._shape_tensor()
return self._cached_shape_tensor
@property
def batch_shape(self):
"""`TensorShape` of batch dimensions of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb])`, equivalent to `A.get_shape()[:-2]`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[:-2]
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of batch dimensions of this operator, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb]`.
Args:
name: A name for this `Op.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
if self._cached_batch_shape_tensor is None:
# Prefer to use statically defined shape if available.
if self.batch_shape.is_fully_defined():
self._cached_batch_shape_tensor = linear_operator_util.shape_tensor(
self.batch_shape.as_list(), name="batch_shape")
else:
self._cached_batch_shape_tensor = self.shape_tensor()[:-2]
return self._cached_batch_shape_tensor
@property
def tensor_rank(self, name="tensor_rank"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op.
Returns:
Python integer, or None if the tensor rank is undefined.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self.shape.ndims
def tensor_rank_tensor(self, name="tensor_rank_tensor"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op.
Returns:
`int32` `Tensor`, determined at runtime.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
if self._cached_tensor_rank_tensor is None:
# Prefer to use statically defined shape if available.
if self.tensor_rank is not None:
self._cached_tensor_rank_tensor = ops.convert_to_tensor(
self.tensor_rank)
else:
self._cached_tensor_rank_tensor = array_ops.size(
self.shape_tensor())
return self._cached_tensor_rank_tensor
@property
def domain_dimension(self):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[-1]
def domain_dimension_tensor(self, name="domain_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
if self._cached_domain_dimension_tensor is None:
# Prefer to use statically defined shape if available.
if self.domain_dimension.value is not None:
self._cached_domain_dimension_tensor = ops.convert_to_tensor(
self.domain_dimension.value)
else:
self._cached_domain_dimension_tensor = self.shape_tensor()[-1]
return self._cached_domain_dimension_tensor
@property
def range_dimension(self):
"""Dimension (in the sense of vector spaces) of the range of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[-2]
def range_dimension_tensor(self, name="range_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the range of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
if self._cached_range_dimension_tensor is None:
# Prefer to use statically defined shape if available.
if self.range_dimension.value is not None:
self._cached_range_dimension_tensor = ops.convert_to_tensor(
self.range_dimension.value)
else:
self._cached_range_dimension_tensor = self.shape_tensor()[-2]
return self._cached_range_dimension_tensor
def _assert_non_singular(self):
raise NotImplementedError("assert_non_singular is not implemented.")
def assert_non_singular(self, name="assert_non_singular"):
"""Returns an `Op` that asserts this operator is non singular."""
with self._name_scope(name):
return self._assert_non_singular()
def _assert_positive_definite(self):
raise NotImplementedError("assert_positive_definite is not implemented.")
def assert_positive_definite(self, name="assert_positive_definite"):
"""Returns an `Op` that asserts this operator is positive definite.
Here, positive definite means that the quadratic form `x^H A x` has positive
real part for all nonzero `x`. Note that we do not require the operator to
be self-adjoint to be positive definite.
Args:
name: A name to give this `Op`.
Returns:
An `Op` that asserts this operator is positive definite.
"""
with self._name_scope(name):
return self._assert_positive_definite()
def _assert_self_adjoint(self):
raise NotImplementedError("assert_self_adjoint is not implemented.")
def assert_self_adjoint(self, name="assert_self_adjoint"):
"""Returns an `Op` that asserts this operator is self-adjoint."""
with self._name_scope(name):
return self._assert_self_adjoint()
def _check_input_dtype(self, arg):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != self.dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s"
% (self.dtype, arg.dtype, arg))
def _apply(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError("_apply is not implemented.")
def apply(self, x, adjoint=False, adjoint_arg=False, name="apply"):
"""Transform `x` with left multiplication: `x --> Ax`.
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op.
Returns:
A `Tensor` with shape `[..., M, R]` and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
arg_dim = -1 if adjoint_arg else -2
self.shape[self_dim].assert_is_compatible_with(x.get_shape()[arg_dim])
return self._apply(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _determinant(self):
raise NotImplementedError("_det is not implemented.")
def determinant(self, name="det"):
"""Determinant for every batch member.
Args:
name: A name for this `Op.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._determinant()
def _log_abs_determinant(self):
raise NotImplementedError("_log_abs_det is not implemented.")
def log_abs_determinant(self, name="log_abs_det"):
"""Log absolute value of determinant for every batch member.
Args:
name: A name for this `Op.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._log_abs_determinant()
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# Since this is an exact solve method for all rhs, this will only be
# available for non-singular (batch) operators, in particular the operator
# must be square.
raise NotImplementedError("_solve is not implemented.")
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve `R` (batch) systems of equations exactly: `A X = rhs`.
Examples:
```python
# Create an operator acting like a 10 x 2 x 2 matrix.
operator = LinearOperator(...)
operator.shape # = 10 x 2 x 2
# Solve one linear system (R = 1) for every member of the length 10 batch.
RHS = ... # shape 10 x 2 x 1
X = operator.solve(RHS) # shape 10 x 2 x 1
# Solve five linear systems (R = 5) for every member of the length 10 batch.
RHS = ... # shape 10 x 2 x 5
X = operator.solve(RHS)
X[3, :, 2] # Solution to the linear system A[3, :, :] X = RHS[3, :, 2]
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
arg_dim = -1 if adjoint_arg else -2
self.shape[self_dim].assert_is_compatible_with(rhs.get_shape()[arg_dim])
return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _to_dense(self):
"""Generic and often inefficient implementation. Override often."""
if self.batch_shape.is_fully_defined():
batch_shape = self.batch_shape
else:
batch_shape = self.batch_shape_tensor()
if self.domain_dimension.value is not None:
n = self.domain_dimension.value
else:
n = self.domain_dimension_tensor()
eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
return self.apply(eye)
def to_dense(self, name="to_dense"):
"""Return a dense (batch) matrix representing this operator."""
with self._name_scope(name):
return self._to_dense()
def _diag_part(self):
"""Generic and often inefficient implementation. Override often."""
return array_ops.matrix_diag_part(self.to_dense())
def diag_part(self, name="diag_part"):
"""Efficiently get the [batch] diagonal part of this operator.
If this operator has shape `[B1,...,Bb, M, N]`, this returns a
`Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where
`diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.
```
my_operator = LinearOperatorDiag([1., 2.])
# Efficiently get the diagonal
my_operator.diag_part()
==> [1., 2.]
# Equivalent, but inefficient method
tf.matrix_diag_part(my_operator.to_dense())
==> [1., 2.]
```
Args:
name: A name for this `Op`.
Returns:
diag_part: A `Tensor` of same `dtype` as self.
"""
with self._name_scope(name):
return self._diag_part()
def _add_to_tensor(self, x):
# Override if a more efficient implementation is available.
return self.to_dense() + x
def add_to_tensor(self, x, name="add_to_tensor"):
"""Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
return self._add_to_tensor(x)
|
|
# -*- coding: utf-8 -*-
import os
import warnings
from datetime import date
from django.conf import settings
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
from django.db.models.base import model_unpickle
from django.db.models.query_utils import DeferredAttribute
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from cms.exceptions import DontUsePageAttributeWarning
from cms.models.placeholdermodel import Placeholder
from cms.plugin_rendering import PluginContext, render_plugin
from cms.utils.helpers import reversion_register
from cms.utils import get_cms_setting
from mptt.models import MPTTModel, MPTTModelBase
class BoundRenderMeta(object):
def __init__(self, meta):
self.index = 0
self.total = 1
self.text_enabled = getattr(meta, 'text_enabled', False)
class PluginModelBase(MPTTModelBase):
"""
Metaclass for all CMSPlugin subclasses. This class should not be used for
any other type of models.
"""
def __new__(cls, name, bases, attrs):
# remove RenderMeta from the plugin class
attr_meta = attrs.pop('RenderMeta', None)
# create a new class (using the super-metaclass)
new_class = super(PluginModelBase, cls).__new__(cls, name, bases, attrs)
# if there is a RenderMeta in attrs, use this one
if attr_meta:
meta = attr_meta
else:
# else try to use the one from the superclass (if present)
meta = getattr(new_class, '_render_meta', None)
# set a new BoundRenderMeta to prevent leaking of state
new_class._render_meta = BoundRenderMeta(meta)
# turn 'myapp_mymodel' into 'cmsplugin_mymodel' by removing the
# 'myapp_' bit from the db_table name.
if [base for base in bases if isinstance(base, PluginModelBase)]:
splitter = '%s_' % new_class._meta.app_label
if splitter in new_class._meta.db_table:
splitted = new_class._meta.db_table.split(splitter, 1)
table_name = 'cmsplugin_%s' % splitted[1]
else:
table_name = new_class._meta.db_table
new_class._meta.db_table = table_name
return new_class
class CMSPlugin(MPTTModel):
'''
The base class for a CMS plugin model. When defining a new custom plugin, you should
store plugin-instance specific information on a subclass of this class.
An example for this would be to store the number of pictures to display in a galery.
Two restrictions apply when subclassing this to use in your own models:
1. Subclasses of CMSPlugin *cannot be further subclassed*
2. Subclasses of CMSPlugin cannot define a "text" field.
'''
__metaclass__ = PluginModelBase
placeholder = models.ForeignKey(Placeholder, editable=False, null=True)
parent = models.ForeignKey('self', blank=True, null=True, editable=False)
position = models.PositiveSmallIntegerField(_("position"), blank=True, null=True, editable=False)
language = models.CharField(_("language"), max_length=15, blank=False, db_index=True, editable=False)
plugin_type = models.CharField(_("plugin_name"), max_length=50, db_index=True, editable=False)
#plugin_tips = models.CharField(_("plugin_tips"), max_length=300, blank=True)
creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now)
changed_date = models.DateTimeField(auto_now=True)
level = models.PositiveIntegerField(db_index=True, editable=False)
lft = models.PositiveIntegerField(db_index=True, editable=False)
rght = models.PositiveIntegerField(db_index=True, editable=False)
tree_id = models.PositiveIntegerField(db_index=True, editable=False)
child_plugin_instances = None
class Meta:
app_label = 'cms'
class RenderMeta:
index = 0
total = 1
text_enabled = False
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something wierd with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
pk_val = None
if self._deferred:
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
if pk_val is None:
# The pk_val and model values are the same for all
# DeferredAttribute classes, so we only need to do this
# once.
obj = self.__class__.__dict__[field.attname]
model = obj.model_ref()
else:
factory = lambda x, y: x
return (model_unpickle, (model, defers, factory), data)
def __unicode__(self):
return unicode(self.id)
def get_plugin_name(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type).name
def get_short_description(self):
instance = self.get_plugin_instance()[0]
if instance is not None:
return unicode(instance)
return _("<Empty>")
def get_plugin_class(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type)
def get_plugin_instance(self, admin=None):
plugin_class = self.get_plugin_class()
plugin = plugin_class(plugin_class.model, admin) # needed so we have the same signature as the original ModelAdmin
if plugin.model != self.__class__: # and self.__class__ == CMSPlugin:
# (if self is actually a subclass, getattr below would break)
try:
instance = plugin_class.model.objects.get(cmsplugin_ptr=self)
instance._render_meta = self._render_meta
except (AttributeError, ObjectDoesNotExist):
instance = None
else:
instance = self
return instance, plugin
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
instance, plugin = self.get_plugin_instance()
if instance and not (admin and not plugin.admin_preview):
if not isinstance(placeholder, Placeholder):
placeholder = instance.placeholder
placeholder_slot = placeholder.slot
current_app = context.current_app if context else None
context = PluginContext(context, instance, placeholder, current_app=current_app)
context = plugin.render(context, instance, placeholder_slot)
if plugin.render_plugin:
template = hasattr(instance, 'render_template') and instance.render_template or plugin.render_template
if not template:
raise ValidationError("plugin has no render_template: %s" % plugin.__class__)
else:
template = None
return render_plugin(context, instance, placeholder, template, processors, context.current_app)
return ""
def get_media_path(self, filename):
pages = self.placeholder.page_set.all()
if pages.count():
return pages[0].get_media_path(filename)
else: # django 1.0.2 compatibility
today = date.today()
return os.path.join(get_cms_setting('PAGE_MEDIA_PATH'),
str(today.year), str(today.month), str(today.day), filename)
@property
def page(self):
warnings.warn(
"Don't use the page attribute on CMSPlugins! CMSPlugins are not "
"guaranteed to have a page associated with them!",
DontUsePageAttributeWarning)
return self.placeholder.page if self.placeholder_id else None
def get_instance_icon_src(self):
"""
Get src URL for instance's icon
"""
instance, plugin = self.get_plugin_instance()
if instance:
return plugin.icon_src(instance)
else:
return u''
def get_instance_icon_alt(self):
"""
Get alt text for instance's icon
"""
instance, plugin = self.get_plugin_instance()
if instance:
return unicode(plugin.icon_alt(instance))
else:
return u''
def save(self, no_signals=False, *args, **kwargs):
if no_signals: # ugly hack because of mptt
super(CMSPlugin, self).save_base(cls=self.__class__)
else:
super(CMSPlugin, self).save()
def set_base_attr(self, plugin):
for attr in ['parent_id', 'placeholder', 'language', 'plugin_type', 'creation_date', 'level', 'lft', 'rght', 'position', 'tree_id']:
setattr(plugin, attr, getattr(self, attr))
def copy_plugin(self, target_placeholder, target_language, plugin_trail):
"""
Copy this plugin and return the new plugin.
"""
try:
plugin_instance, cls = self.get_plugin_instance()
except KeyError: # plugin type not found anymore
return
# set up some basic attributes on the new_plugin
new_plugin = CMSPlugin()
new_plugin.placeholder = target_placeholder
new_plugin.tree_id = None
new_plugin.lft = None
new_plugin.rght = None
new_plugin.level = None
# In the block below, we use plugin_trail as a kind of breadcrumb trail
# through the tree.
#
# we assign a parent to our new plugin
if not self.parent:
# We're lucky; we don't need to find a parent. We'll just put
# new_plugin into the plugin_trail for potential children to use,
# and move on.
plugin_trail[:] = [new_plugin]
else:
# We will need to find a parent for our new_plugin.
marker = plugin_trail.pop()
# are we going up or down?
level_difference = self.level - marker.level
if level_difference == 1:
# going up; put the marker back
plugin_trail.append(marker)
else:
# going down; remove more items from plugin_trail
if level_difference < 0:
plugin_trail[:] = plugin_trail[:level_difference]
# assign new_plugin.parent
new_plugin.parent = plugin_trail[-1]
# new_plugin becomes the last item in the tree for the next round
plugin_trail.append(new_plugin)
new_plugin.level = None
new_plugin.language = target_language
new_plugin.plugin_type = self.plugin_type
new_plugin.position = self.position
new_plugin.save()
if plugin_instance:
plugin_instance.pk = new_plugin.pk
plugin_instance.id = new_plugin.pk
plugin_instance.placeholder = target_placeholder
plugin_instance.tree_id = new_plugin.tree_id
plugin_instance.lft = new_plugin.lft
plugin_instance.rght = new_plugin.rght
plugin_instance.level = new_plugin.level
plugin_instance.cmsplugin_ptr = new_plugin
plugin_instance.language = target_language
plugin_instance.parent = new_plugin.parent
plugin_instance.position = new_plugin.position # added to retain the position when creating a public copy of a plugin
plugin_instance.save()
old_instance = plugin_instance.__class__.objects.get(pk=self.pk)
plugin_instance.copy_relations(old_instance)
return new_plugin
def post_copy(self, old_instance, new_old_ziplist):
"""
Handle more advanced cases (eg Text Plugins) after the original is
copied
"""
pass
def copy_relations(self, old_instance):
"""
Handle copying of any relations attached to this plugin. Custom plugins
have to do this themselves!
"""
pass
def delete_with_public(self):
"""
Delete the public copy of this plugin if it exists,
then delete the draft
"""
position = self.position
slot = self.placeholder.slot
page = self.placeholder.page
if page and getattr(page, 'publisher_public'):
try:
placeholder = Placeholder.objects.get(page=page.publisher_public, slot=slot)
except Placeholder.DoesNotExist:
pass
else:
public_plugin = CMSPlugin.objects.filter(placeholder=placeholder, position=position)
public_plugin.delete()
self.placeholder = None
self.delete()
def has_change_permission(self, request):
page = self.placeholder.page if self.placeholder else None
if page:
return page.has_change_permission(request)
elif self.placeholder:
return self.placeholder.has_change_permission(request)
elif self.parent:
return self.parent.has_change_permission(request)
return False
def is_first_in_placeholder(self):
return self.position == 0
def is_last_in_placeholder(self):
"""
WARNING: this is a rather expensive call compared to is_first_in_placeholder!
"""
return self.placeholder.cmsplugin_set.filter(parent__isnull=True).order_by('-position')[0].pk == self.pk
def get_position_in_placeholder(self):
"""
1 based position!
"""
return self.position + 1
def num_children(self):
if self.child_plugin_instances:
return len(self.child_plugin_instances)
reversion_register(CMSPlugin)
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
class Meta:
pass
setattr(Meta, "proxy", True)
setattr(Meta, "app_label", model._meta.app_label)
class RenderMeta:
pass
setattr(RenderMeta, "index", model._render_meta.index)
setattr(RenderMeta, "total", model._render_meta.total)
setattr(RenderMeta, "text_enabled", model._render_meta.text_enabled)
# The app_cache wants a unique name for each model, otherwise the new class
# won't be created (we get an old one back). Therefore, we generate the
# name using the passed in attrs. It's OK to reuse an old case if the attrs
# are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
overrides = dict([(attr, DeferredAttribute(attr, model))
for attr in attrs])
overrides["Meta"] = RenderMeta
overrides["RenderMeta"] = RenderMeta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(name, (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
"""
import collections
import copy
import httplib
import math
import re
import time
from oslo_serialization import jsonutils
from oslo_utils import importutils
import webob.dec
import webob.exc
from cinder.api.openstack import wsgi
from cinder.api.views import limits as limits_views
from cinder.api import xmlutil
from cinder.i18n import _
from cinder import quota
from cinder import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
LIMITS_PREFIX = "limits."
# Convenience constants for the limits dictionary passed to Limiter().
PER_SECOND = 1
PER_MINUTE = 60
PER_HOUR = 60 * 60
PER_DAY = 60 * 60 * 24
limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class LimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
rates = xmlutil.SubTemplateElement(root, 'rates')
rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate')
rate.set('uri', 'uri')
rate.set('regex', 'regex')
limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit')
limit.set('value', 'value')
limit.set('verb', 'verb')
limit.set('remaining', 'remaining')
limit.set('unit', 'unit')
limit.set('next-available', 'next-available')
absolute = xmlutil.SubTemplateElement(root, 'absolute',
selector='absolute')
limit = xmlutil.SubTemplateElement(absolute, 'limit',
selector=xmlutil.get_items)
limit.set('name', 0)
limit.set('value', 1)
return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap)
class LimitsController(wsgi.Controller):
"""Controller for accessing limits in the OpenStack API."""
@wsgi.serializers(xml=LimitsTemplate)
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['cinder.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("cinder.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
def create_resource():
return wsgi.Resource(LimitsController())
class Limit(object):
"""Stores information about a limit for HTTP requests."""
UNITS = {
1: "SECOND",
60: "MINUTE",
60 * 60: "HOUR",
60 * 60 * 24: "DAY",
}
UNIT_MAP = dict([(v, k) for k, v in UNITS.items()])
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = _("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.")
self.error_message = msg % self.__dict__
def __call__(self, verb, url):
"""Represent a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 10, PER_MINUTE),
Limit("POST", "*/servers", "^/servers", 50, PER_DAY),
Limit("PUT", "*", ".*", 10, PER_MINUTE),
Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
Limit("DELETE", "*", ".*", 100, PER_MINUTE),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""Rate-limits requests passing through this middleware.
All limit information is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""Initialize new `RateLimitingMiddleware`
This wraps the given WSGI application and sets up the given limits.
@param application: WSGI application to wrap
@param limits: String describing limits
@param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Represent a single call through this middleware.
We should record the request if we have a limit relevant to it.
If no limit is relevant to the request, ignore it.
If the request should be rate limited, return a fault telling the user
they are over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("cinder.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.OverLimitFault(msg, error, retry)
req.environ["cinder.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""Rate-limit checking class which handles limits in memory."""
def __init__(self, limits, **kwargs):
"""Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith(LIMITS_PREFIX):
username = key[len(LIMITS_PREFIX):]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""Return the limits for a given user."""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""Convert a string into a list of Limit instances.
This implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError("Limit rules must be surrounded by "
"parentheses")
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError("Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit")
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in Limit.UNIT_MAP:
raise ValueError("Invalid units specified")
unit = Limit.UNIT_MAP[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""Rate-limit checking from a WSGI application.
Uses an in-memory `Limiter`.
To use, POST ``/<username>`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""Handles a call to this application.
Returns 204 if the request is acceptable to the limiter, else a 403
is returned with a relevant header indicating when the request
*will* succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""Rate-limit requests based on answers from a remote source."""
def __init__(self, limiter_address):
"""Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dumps({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if 200 >= resp.status < 300:
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""Ignore a limits string--simply doesn't apply for the limit proxy.
@return: Empty list.
"""
return []
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pandas.util.testing import assert_frame_equal
from pyflink.common import Row
from pyflink.table import expressions as expr, ListView
from pyflink.table.types import DataTypes
from pyflink.table.udf import udf, udtf, udaf, AggregateFunction, TableAggregateFunction, udtaf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \
PyFlinkBlinkStreamTableTestCase
class RowBasedOperationTests(object):
def test_map(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
func = udf(lambda x: Row(x + 1, x * x), result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())]))
t.map(func(t.b)).alias("a", "b") \
.map(func(t.a)).alias("a", "b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual, ["+I[4, 9]", "+I[3, 4]", "+I[7, 36]", "+I[10, 81]", "+I[5, 16]"])
def test_map_with_pandas_udf(self):
t = self.t_env.from_elements(
[(1, Row(2, 3)), (2, Row(1, 3)), (1, Row(5, 4)), (1, Row(8, 6)), (2, Row(3, 4))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b",
DataTypes.ROW([DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("d", DataTypes.INT())]))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
def func(x):
import pandas as pd
res = pd.concat([x.a, x.c + x.d], axis=1)
return res
def func2(x):
return x * 2
def func3(x):
assert isinstance(x, Row)
return x
pandas_udf = udf(func,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]),
func_type='pandas')
pandas_udf_2 = udf(func2,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]),
func_type='pandas')
general_udf = udf(func3,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]))
t.map(pandas_udf).map(pandas_udf_2).map(general_udf).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[4, 8]", "+I[2, 10]", "+I[2, 28]", "+I[2, 18]", "+I[4, 14]"])
def test_flat_map(self):
t = self.t_env.from_elements(
[(1, "2,3"), (2, "1"), (1, "5,6,7")],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.STRING())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f'],
[DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(),
DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
@udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
def split(x):
for s in x[1].split(","):
yield x[0], s
t.flat_map(split) \
.flat_map(split) \
.join_lateral(split.alias("a", "b")) \
.left_outer_join_lateral(split.alias("c", "d")) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[1, 2, 1, 2, 1, 2]", "+I[1, 3, 1, 3, 1, 3]", "+I[2, 1, 2, 1, 2, 1]",
"+I[1, 5, 1, 5, 1, 5]", "+I[1, 6, 1, 6, 1, 6]", "+I[1, 7, 1, 7, 1, 7]"])
class BatchRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBlinkBatchTableTestCase):
def test_aggregate_with_pandas_udaf(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda pd: (pd.b.mean(), pd.a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.select(t.a, t.b) \
.group_by(t.a) \
.aggregate(pandas_udaf) \
.select("*") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 5.0, 1]", "+I[2, 2.0, 2]"])
def test_aggregate_with_pandas_udaf_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda pd: Row(pd.b.mean(), pd.b.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.select(t.b) \
.aggregate(pandas_udaf.alias("a", "b")) \
.select("a, b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3.8, 8]"])
def test_window_aggregate_with_pandas_udaf(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT(),
DataTypes.INT()
])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda pd: (pd.b.mean(), pd.b.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.select(t.b, t.rowtime) \
.window(tumble_window) \
.group_by("w") \
.aggregate(pandas_udaf.alias("d", "e")) \
.select("w.rowtime, d, e") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[2018-03-11 03:59:59.999, 2.2, 3]",
"+I[2018-03-11 04:59:59.999, 8.0, 8]"])
class StreamRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBlinkStreamTableTestCase):
def test_aggregate(self):
import pandas as pd
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
function = CountAndSumAggregateFunction()
agg = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
result = t.group_by(t.a) \
.aggregate(agg.alias("c", "d")) \
.select("a, c, d") \
.to_pandas()
assert_frame_equal(result.sort_values('a').reset_index(drop=True),
pd.DataFrame([[1, 3, 15], [2, 2, 4]], columns=['a', 'c', 'd']))
def test_flat_aggregate(self):
import pandas as pd
mytop = udtaf(Top2())
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(5, 'Hi2', 'hi'),
(7, 'Hi', 'Hello'),
(2, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.select(t.a, t.c) \
.group_by(t.c) \
.flat_aggregate(mytop) \
.select(t.a) \
.flat_aggregate(mytop.alias("b")) \
.select("b") \
.to_pandas()
assert_frame_equal(result, pd.DataFrame([[7], [5]], columns=['b']))
def test_flat_aggregate_list_view(self):
import pandas as pd
my_concat = udtaf(ListViewConcatTableAggregateFunction())
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "2")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi'),
(2, 'Hi', 'Hello'),
(1, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(3, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(2, 'Hi3', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c) \
.flat_aggregate(my_concat(t.b, ',').alias("b")) \
.select(t.b, t.c) \
.alias("a, c")
assert_frame_equal(result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"]],
columns=['a', 'c']))
class CountAndSumAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
from pyflink.common import Row
return Row(accumulator[0], accumulator[1])
def create_accumulator(self):
from pyflink.common import Row
return Row(0, 0)
def accumulate(self, accumulator, *args):
accumulator[0] += 1
accumulator[1] += args[0][1]
def retract(self, accumulator, *args):
accumulator[0] -= 1
accumulator[1] -= args[0][1]
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] += other_acc[0]
accumulator[1] += other_acc[1]
def get_accumulator_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())])
class Top2(TableAggregateFunction):
def emit_value(self, accumulator):
yield Row(accumulator[0])
yield Row(accumulator[1])
def create_accumulator(self):
return [None, None]
def accumulate(self, accumulator, *args):
if args[0][0] is not None:
if accumulator[0] is None or args[0][0] > accumulator[0]:
accumulator[1] = accumulator[0]
accumulator[0] = args[0][0]
elif accumulator[1] is None or args[0][0] > accumulator[1]:
accumulator[1] = args[0][0]
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - 1
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
self.accumulate(accumulator, other_acc[0])
self.accumulate(accumulator, other_acc[1])
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT())])
class ListViewConcatTableAggregateFunction(TableAggregateFunction):
def emit_value(self, accumulator):
result = accumulator[1].join(accumulator[0])
yield Row(result)
yield Row(result)
def create_accumulator(self):
return Row(ListView(), '')
def accumulate(self, accumulator, *args):
accumulator[1] = args[1]
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
raise NotImplementedError
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.LIST_VIEW(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW([DataTypes.FIELD("a", DataTypes.STRING())])
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import os
import testtools
import webob.exc
from quantum import context
from quantum.api.extensions import ExtensionMiddleware
from quantum.api.extensions import PluginAwareExtensionManager
from quantum.common import config
from quantum.db.loadbalancer import loadbalancer_db as ldb
import quantum.extensions
from quantum.extensions import loadbalancer
from quantum.plugins.common import constants
from quantum.plugins.services.agent_loadbalancer import (
plugin as loadbalancer_plugin
)
from quantum.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
DB_CORE_PLUGIN_KLASS = 'quantum.db.db_base_plugin_v2.QuantumDbPluginV2'
DB_LB_PLUGIN_KLASS = (
"quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
)
ROOTDIR = os.path.dirname(__file__) + '../../../..'
ETCDIR = os.path.join(ROOTDIR, 'etc')
extensions_path = ':'.join(quantum.extensions.__path__)
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class LoadBalancerPluginDbTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
resource_prefix_map = dict(
(k, constants.COMMON_PREFIXES[constants.LOADBALANCER])
for k in loadbalancer.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, core_plugin=None, lb_plugin=None):
service_plugins = {'lb_plugin_name': DB_LB_PLUGIN_KLASS}
super(LoadBalancerPluginDbTestCase, self).setUp(
service_plugins=service_plugins
)
self._subnet_id = "0c798ed8-33ba-11e2-8b28-000c291c4d14"
self.plugin = loadbalancer_plugin.LoadBalancerPlugin()
ext_mgr = PluginAwareExtensionManager(
extensions_path,
{constants.LOADBALANCER: self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = ExtensionMiddleware(app, ext_mgr=ext_mgr)
def _create_vip(self, fmt, name, pool_id, protocol, protocol_port,
admin_state_up, expected_res_status=None, **kwargs):
data = {'vip': {'name': name,
'pool_id': pool_id,
'protocol': protocol,
'protocol_port': protocol_port,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
for arg in ('description', 'subnet_id', 'address',
'session_persistence', 'connection_limit'):
if arg in kwargs and kwargs[arg] is not None:
data['vip'][arg] = kwargs[arg]
vip_req = self.new_create_request('vips', data, fmt)
vip_res = vip_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(vip_res.status_int, expected_res_status)
return vip_res
def _create_pool(self, fmt, name, lb_method, protocol, admin_state_up,
expected_res_status=None, **kwargs):
data = {'pool': {'name': name,
'subnet_id': self._subnet_id,
'lb_method': lb_method,
'protocol': protocol,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
for arg in ('description'):
if arg in kwargs and kwargs[arg] is not None:
data['pool'][arg] = kwargs[arg]
pool_req = self.new_create_request('pools', data, fmt)
pool_res = pool_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(pool_res.status_int, expected_res_status)
return pool_res
def _create_member(self, fmt, address, protocol_port, admin_state_up,
expected_res_status=None, **kwargs):
data = {'member': {'address': address,
'protocol_port': protocol_port,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
for arg in ('weight', 'pool_id'):
if arg in kwargs and kwargs[arg] is not None:
data['member'][arg] = kwargs[arg]
member_req = self.new_create_request('members', data, fmt)
member_res = member_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(member_res.status_int, expected_res_status)
return member_res
def _create_health_monitor(self, fmt, type, delay, timeout, max_retries,
admin_state_up, expected_res_status=None,
**kwargs):
data = {'health_monitor': {'type': type,
'delay': delay,
'timeout': timeout,
'max_retries': max_retries,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
for arg in ('http_method', 'path', 'expected_code'):
if arg in kwargs and kwargs[arg] is not None:
data['health_monitor'][arg] = kwargs[arg]
req = self.new_create_request('health_monitors', data, fmt)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(res.status_int, expected_res_status)
return res
def _api_for_resource(self, resource):
if resource in ['networks', 'subnets', 'ports']:
return self.api
else:
return self.ext_api
@contextlib.contextmanager
def vip(self, fmt=None, name='vip1', pool=None, subnet=None,
protocol='HTTP', protocol_port=80, admin_state_up=True,
no_delete=False, **kwargs):
if not fmt:
fmt = self.fmt
with test_db_plugin.optional_ctx(subnet, self.subnet) as tmp_subnet:
with test_db_plugin.optional_ctx(pool, self.pool) as tmp_pool:
pool_id = tmp_pool['pool']['id']
res = self._create_vip(fmt,
name,
pool_id,
protocol,
protocol_port,
admin_state_up,
subnet_id=tmp_subnet['subnet']['id'],
**kwargs)
vip = self.deserialize(fmt or self.fmt, res)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
try:
yield vip
finally:
if not no_delete:
self._delete('vips', vip['vip']['id'])
@contextlib.contextmanager
def pool(self, fmt=None, name='pool1', lb_method='ROUND_ROBIN',
protocol='HTTP', admin_state_up=True, no_delete=False,
**kwargs):
if not fmt:
fmt = self.fmt
res = self._create_pool(fmt,
name,
lb_method,
protocol,
admin_state_up,
**kwargs)
pool = self.deserialize(fmt or self.fmt, res)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
try:
yield pool
finally:
if not no_delete:
self._delete('pools', pool['pool']['id'])
@contextlib.contextmanager
def member(self, fmt=None, address='192.168.1.100', protocol_port=80,
admin_state_up=True, no_delete=False, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_member(fmt,
address,
protocol_port,
admin_state_up,
**kwargs)
member = self.deserialize(fmt or self.fmt, res)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
try:
yield member
finally:
if not no_delete:
self._delete('members', member['member']['id'])
@contextlib.contextmanager
def health_monitor(self, fmt=None, type='TCP',
delay=30, timeout=10, max_retries=3,
admin_state_up=True,
no_delete=False, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_health_monitor(fmt,
type,
delay,
timeout,
max_retries,
admin_state_up,
**kwargs)
health_monitor = self.deserialize(fmt or self.fmt, res)
the_health_monitor = health_monitor['health_monitor']
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
# make sure:
# 1. When the type is HTTP/S we have HTTP related attributes in
# the result
# 2. When the type is not HTTP/S we do not have HTTP related
# attributes in the result
http_related_attributes = ('http_method', 'url_path', 'expected_codes')
if type in ['HTTP', 'HTTPS']:
for arg in http_related_attributes:
self.assertIsNotNone(the_health_monitor.get(arg))
else:
for arg in http_related_attributes:
self.assertIsNone(the_health_monitor.get(arg))
try:
yield health_monitor
finally:
if not no_delete:
self._delete('health_monitors', the_health_monitor['id'])
class TestLoadBalancer(LoadBalancerPluginDbTestCase):
def test_create_vip(self, **extras):
expected = {
'name': 'vip1',
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
}
expected.update(extras)
with self.subnet() as subnet:
expected['subnet_id'] = subnet['subnet']['id']
name = expected['name']
with self.vip(name=name, subnet=subnet, **extras) as vip:
for k in ('id', 'address', 'port_id', 'pool_id'):
self.assertTrue(vip['vip'].get(k, None))
self.assertEqual(
dict((k, v)
for k, v in vip['vip'].items() if k in expected),
expected
)
return vip
def test_create_vip_twice_for_same_pool(self):
""" Test loadbalancer db plugin via extension and directly """
with self.subnet() as subnet:
with self.pool(name="pool1") as pool:
with self.vip(name='vip1', subnet=subnet, pool=pool):
vip_data = {
'name': 'vip1',
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
'session_persistence': ''
}
self.assertRaises(loadbalancer.VipExists,
self.plugin.create_vip,
context.get_admin_context(),
{'vip': vip_data})
def test_update_vip_raises_vip_exists(self):
with self.subnet() as subnet:
with contextlib.nested(
self.pool(name="pool1"),
self.pool(name="pool2")
) as (pool1, pool2):
with contextlib.nested(
self.vip(name='vip1', subnet=subnet, pool=pool1),
self.vip(name='vip2', subnet=subnet, pool=pool2)
) as (vip1, vip2):
vip_data = {
'id': vip2['vip']['id'],
'name': 'vip1',
'pool_id': pool1['pool']['id'],
}
self.assertRaises(loadbalancer.VipExists,
self.plugin.update_vip,
context.get_admin_context(),
vip2['vip']['id'],
{'vip': vip_data})
def test_update_vip_change_pool(self):
with self.subnet() as subnet:
with contextlib.nested(
self.pool(name="pool1"),
self.pool(name="pool2")
) as (pool1, pool2):
with self.vip(name='vip1', subnet=subnet, pool=pool1) as vip:
# change vip from pool1 to pool2
vip_data = {
'id': vip['vip']['id'],
'name': 'vip1',
'pool_id': pool2['pool']['id'],
}
ctx = context.get_admin_context()
self.plugin.update_vip(ctx,
vip['vip']['id'],
{'vip': vip_data})
db_pool2 = (ctx.session.query(ldb.Pool).
filter_by(id=pool2['pool']['id']).one())
db_pool1 = (ctx.session.query(ldb.Pool).
filter_by(id=pool1['pool']['id']).one())
# check that pool1.vip became None
self.assertIsNone(db_pool1.vip)
# and pool2 got vip
self.assertEqual(db_pool2.vip.id, vip['vip']['id'])
def test_create_vip_with_invalid_values(self):
invalid = {
'protocol': 'UNSUPPORTED',
'protocol_port': 'NOT_AN_INT',
'protocol_port': 1000500,
'subnet': {'subnet': {'id': 'invalid-subnet'}}
}
for param, value in invalid.items():
kwargs = {'name': 'the-vip', param: value}
with testtools.ExpectedException(webob.exc.HTTPClientError):
with self.vip(**kwargs):
pass
def test_create_vip_with_address(self):
self.test_create_vip(address='10.0.0.7')
def test_create_vip_with_address_outside_subnet(self):
with testtools.ExpectedException(webob.exc.HTTPClientError):
self.test_create_vip(address='9.9.9.9')
def test_create_vip_with_session_persistence(self):
self.test_create_vip(session_persistence={'type': 'HTTP_COOKIE'})
def test_create_vip_with_session_persistence_with_app_cookie(self):
sp = {'type': 'APP_COOKIE', 'cookie_name': 'sessionId'}
self.test_create_vip(session_persistence=sp)
def test_create_vip_with_session_persistence_unsupported_type(self):
with testtools.ExpectedException(webob.exc.HTTPClientError):
self.test_create_vip(session_persistence={'type': 'UNSUPPORTED'})
def test_create_vip_with_unnecessary_cookie_name(self):
sp = {'type': "SOURCE_IP", 'cookie_name': 'sessionId'}
with testtools.ExpectedException(webob.exc.HTTPClientError):
self.test_create_vip(session_persistence=sp)
def test_create_vip_with_session_persistence_without_cookie_name(self):
sp = {'type': "APP_COOKIE"}
with testtools.ExpectedException(webob.exc.HTTPClientError):
self.test_create_vip(session_persistence=sp)
def test_create_vip_with_protocol_mismatch(self):
with self.pool(protocol='TCP') as pool:
with testtools.ExpectedException(webob.exc.HTTPClientError):
self.test_create_vip(pool=pool, protocol='HTTP')
def test_update_vip_with_protocol_mismatch(self):
with self.pool(protocol='TCP') as pool:
with self.vip(protocol='HTTP') as vip:
data = {'vip': {'pool_id': pool['pool']['id']}}
req = self.new_update_request('vips', data, vip['vip']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_reset_session_persistence(self):
name = 'vip4'
session_persistence = {'type': "HTTP_COOKIE"}
update_info = {'vip': {'session_persistence': None}}
with self.vip(name=name, session_persistence=session_persistence) as v:
# Ensure that vip has been created properly
self.assertEqual(v['vip']['session_persistence'],
session_persistence)
# Try resetting session_persistence
req = self.new_update_request('vips', update_info, v['vip']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
# If session persistence has been removed, it won't be present in
# the response.
self.assertNotIn('session_persistence', res['vip'])
def test_update_vip(self):
name = 'new_vip'
keys = [('name', name),
('address', "10.0.0.2"),
('protocol_port', 80),
('connection_limit', 100),
('admin_state_up', False),
('status', 'PENDING_UPDATE')]
with self.vip(name=name) as vip:
keys.append(('subnet_id', vip['vip']['subnet_id']))
data = {'vip': {'name': name,
'connection_limit': 100,
'session_persistence':
{'type': "APP_COOKIE",
'cookie_name': "jesssionId"},
'admin_state_up': False}}
req = self.new_update_request('vips', data, vip['vip']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['vip'][k], v)
def test_delete_vip(self):
with self.pool():
with self.vip(no_delete=True) as vip:
req = self.new_delete_request('vips',
vip['vip']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_vip(self):
name = "vip_show"
keys = [('name', name),
('address', "10.0.0.10"),
('protocol_port', 80),
('protocol', 'HTTP'),
('connection_limit', -1),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.vip(name=name, address='10.0.0.10') as vip:
req = self.new_show_request('vips',
vip['vip']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['vip'][k], v)
def test_list_vips(self):
name = "vips_list"
keys = [('name', name),
('address', "10.0.0.2"),
('protocol_port', 80),
('protocol', 'HTTP'),
('connection_limit', -1),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.vip(name=name) as vip:
keys.append(('subnet_id', vip['vip']['subnet_id']))
req = self.new_list_request('vips')
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res), 1)
for k, v in keys:
self.assertEqual(res['vips'][0][k], v)
def test_list_vips_with_sort_emulated(self):
with self.subnet() as subnet:
with contextlib.nested(
self.vip(name='vip1', subnet=subnet, protocol_port=81),
self.vip(name='vip2', subnet=subnet, protocol_port=82),
self.vip(name='vip3', subnet=subnet, protocol_port=82)
) as (vip1, vip2, vip3):
self._test_list_with_sort(
'vip',
(vip1, vip3, vip2),
[('protocol_port', 'asc'), ('name', 'desc')]
)
def test_list_vips_with_pagination_emulated(self):
with self.subnet() as subnet:
with contextlib.nested(self.vip(name='vip1', subnet=subnet),
self.vip(name='vip2', subnet=subnet),
self.vip(name='vip3', subnet=subnet)
) as (vip1, vip2, vip3):
self._test_list_with_pagination('vip',
(vip1, vip2, vip3),
('name', 'asc'), 2, 2)
def test_list_vips_with_pagination_reverse_emulated(self):
with self.subnet() as subnet:
with contextlib.nested(self.vip(name='vip1', subnet=subnet),
self.vip(name='vip2', subnet=subnet),
self.vip(name='vip3', subnet=subnet)
) as (vip1, vip2, vip3):
self._test_list_with_pagination_reverse('vip',
(vip1, vip2, vip3),
('name', 'asc'), 2, 2)
def test_create_pool_with_invalid_values(self):
name = 'pool3'
pool = self.pool(name=name, protocol='UNSUPPORTED')
self.assertRaises(webob.exc.HTTPClientError, pool.__enter__)
pool = self.pool(name=name, lb_method='UNSUPPORTED')
self.assertRaises(webob.exc.HTTPClientError, pool.__enter__)
def test_create_pool(self):
name = "pool1"
keys = [('name', name),
('subnet_id', self._subnet_id),
('tenant_id', self._tenant_id),
('protocol', 'HTTP'),
('lb_method', 'ROUND_ROBIN'),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.pool(name=name) as pool:
for k, v in keys:
self.assertEqual(pool['pool'][k], v)
def test_create_pool_with_members(self):
name = "pool2"
with self.pool(name=name) as pool:
pool_id = pool['pool']['id']
res1 = self._create_member(self.fmt,
'192.168.1.100',
'80',
True,
pool_id=pool_id,
weight=1)
req = self.new_show_request('pools',
pool_id,
fmt=self.fmt)
pool_updated = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
member1 = self.deserialize(self.fmt, res1)
self.assertEqual(member1['member']['id'],
pool_updated['pool']['members'][0])
self.assertEqual(len(pool_updated['pool']['members']), 1)
keys = [('address', '192.168.1.100'),
('protocol_port', 80),
('weight', 1),
('pool_id', pool_id),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
for k, v in keys:
self.assertEqual(member1['member'][k], v)
self._delete('members', member1['member']['id'])
def test_delete_pool(self):
with self.pool(no_delete=True) as pool:
with self.member(no_delete=True,
pool_id=pool['pool']['id']):
req = self.new_delete_request('pools',
pool['pool']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_pool(self):
name = "pool1"
keys = [('name', name),
('subnet_id', self._subnet_id),
('tenant_id', self._tenant_id),
('protocol', 'HTTP'),
('lb_method', 'ROUND_ROBIN'),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.pool(name=name) as pool:
req = self.new_show_request('pools',
pool['pool']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['pool'][k], v)
def test_list_pools_with_sort_emulated(self):
with contextlib.nested(self.pool(name='p1'),
self.pool(name='p2'),
self.pool(name='p3')
) as (p1, p2, p3):
self._test_list_with_sort('pool', (p3, p2, p1),
[('name', 'desc')])
def test_list_pools_with_pagination_emulated(self):
with contextlib.nested(self.pool(name='p1'),
self.pool(name='p2'),
self.pool(name='p3')
) as (p1, p2, p3):
self._test_list_with_pagination('pool',
(p1, p2, p3),
('name', 'asc'), 2, 2)
def test_list_pools_with_pagination_reverse_emulated(self):
with contextlib.nested(self.pool(name='p1'),
self.pool(name='p2'),
self.pool(name='p3')
) as (p1, p2, p3):
self._test_list_with_pagination_reverse('pool',
(p1, p2, p3),
('name', 'asc'), 2, 2)
def test_create_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(address='192.168.1.100',
protocol_port=80,
pool_id=pool_id) as member1:
with self.member(address='192.168.1.101',
protocol_port=80,
pool_id=pool_id) as member2:
req = self.new_show_request('pools',
pool_id,
fmt=self.fmt)
pool_update = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertIn(member1['member']['id'],
pool_update['pool']['members'])
self.assertIn(member2['member']['id'],
pool_update['pool']['members'])
def test_update_member(self):
with self.pool(name="pool1") as pool1:
with self.pool(name="pool2") as pool2:
keys = [('address', "192.168.1.100"),
('tenant_id', self._tenant_id),
('protocol_port', 80),
('weight', 10),
('pool_id', pool2['pool']['id']),
('admin_state_up', False),
('status', 'PENDING_UPDATE')]
with self.member(pool_id=pool1['pool']['id']) as member:
req = self.new_show_request('pools',
pool1['pool']['id'],
fmt=self.fmt)
pool1_update = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertEqual(len(pool1_update['pool']['members']), 1)
req = self.new_show_request('pools',
pool2['pool']['id'],
fmt=self.fmt)
pool2_update = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertEqual(len(pool1_update['pool']['members']), 1)
self.assertEqual(len(pool2_update['pool']['members']), 0)
data = {'member': {'pool_id': pool2['pool']['id'],
'weight': 10,
'admin_state_up': False}}
req = self.new_update_request('members',
data,
member['member']['id'])
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
for k, v in keys:
self.assertEqual(res['member'][k], v)
req = self.new_show_request('pools',
pool1['pool']['id'],
fmt=self.fmt)
pool1_update = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
req = self.new_show_request('pools',
pool2['pool']['id'],
fmt=self.fmt)
pool2_update = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertEqual(len(pool2_update['pool']['members']), 1)
self.assertEqual(len(pool1_update['pool']['members']), 0)
def test_delete_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id,
no_delete=True) as member:
req = self.new_delete_request('members',
member['member']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
req = self.new_show_request('pools',
pool_id,
fmt=self.fmt)
pool_update = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertEqual(len(pool_update['pool']['members']), 0)
def test_show_member(self):
with self.pool() as pool:
keys = [('address', "192.168.1.100"),
('tenant_id', self._tenant_id),
('protocol_port', 80),
('weight', 1),
('pool_id', pool['pool']['id']),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.member(pool_id=pool['pool']['id']) as member:
req = self.new_show_request('members',
member['member']['id'],
fmt=self.fmt)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
for k, v in keys:
self.assertEqual(res['member'][k], v)
def test_list_members_with_sort_emulated(self):
with self.pool() as pool:
with contextlib.nested(self.member(pool_id=pool['pool']['id'],
protocol_port=81),
self.member(pool_id=pool['pool']['id'],
protocol_port=82),
self.member(pool_id=pool['pool']['id'],
protocol_port=83)
) as (m1, m2, m3):
self._test_list_with_sort('member', (m3, m2, m1),
[('protocol_port', 'desc')])
def test_list_members_with_pagination_emulated(self):
with self.pool() as pool:
with contextlib.nested(self.member(pool_id=pool['pool']['id'],
protocol_port=81),
self.member(pool_id=pool['pool']['id'],
protocol_port=82),
self.member(pool_id=pool['pool']['id'],
protocol_port=83)
) as (m1, m2, m3):
self._test_list_with_pagination(
'member', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2
)
def test_list_members_with_pagination_reverse_emulated(self):
with self.pool() as pool:
with contextlib.nested(self.member(pool_id=pool['pool']['id'],
protocol_port=81),
self.member(pool_id=pool['pool']['id'],
protocol_port=82),
self.member(pool_id=pool['pool']['id'],
protocol_port=83)
) as (m1, m2, m3):
self._test_list_with_pagination_reverse(
'member', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2
)
def test_create_healthmonitor(self):
keys = [('type', "TCP"),
('tenant_id', self._tenant_id),
('delay', 30),
('timeout', 10),
('max_retries', 3),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.health_monitor() as monitor:
for k, v in keys:
self.assertEqual(monitor['health_monitor'][k], v)
def test_update_healthmonitor(self):
keys = [('type', "TCP"),
('tenant_id', self._tenant_id),
('delay', 20),
('timeout', 20),
('max_retries', 2),
('admin_state_up', False),
('status', 'PENDING_UPDATE')]
with self.health_monitor() as monitor:
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
req = self.new_update_request("health_monitors",
data,
monitor['health_monitor']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['health_monitor'][k], v)
def test_delete_healthmonitor(self):
with self.health_monitor(no_delete=True) as monitor:
req = self.new_delete_request('health_monitors',
monitor['health_monitor']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_healthmonitor(self):
with self.health_monitor() as monitor:
keys = [('type', "TCP"),
('tenant_id', self._tenant_id),
('delay', 30),
('timeout', 10),
('max_retries', 3),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
req = self.new_show_request('health_monitors',
monitor['health_monitor']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['health_monitor'][k], v)
def test_list_healthmonitors_with_sort_emulated(self):
with contextlib.nested(self.health_monitor(delay=30),
self.health_monitor(delay=31),
self.health_monitor(delay=32)
) as (m1, m2, m3):
self._test_list_with_sort('health_monitor', (m3, m2, m1),
[('delay', 'desc')])
def test_list_healthmonitors_with_pagination_emulated(self):
with contextlib.nested(self.health_monitor(delay=30),
self.health_monitor(delay=31),
self.health_monitor(delay=32)
) as (m1, m2, m3):
self._test_list_with_pagination('health_monitor',
(m1, m2, m3),
('delay', 'asc'), 2, 2)
def test_list_healthmonitors_with_pagination_reverse_emulated(self):
with contextlib.nested(self.health_monitor(delay=30),
self.health_monitor(delay=31),
self.health_monitor(delay=32)
) as (m1, m2, m3):
self._test_list_with_pagination_reverse('health_monitor',
(m1, m2, m3),
('delay', 'asc'), 2, 2)
def test_get_pool_stats(self):
keys = [("bytes_in", 0),
("bytes_out", 0),
("active_connections", 0),
("total_connections", 0)]
with self.pool() as pool:
req = self.new_show_request("pools",
pool['pool']['id'],
subresource="stats",
fmt=self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['stats'][k], v)
def test_create_healthmonitor_of_pool(self):
with self.health_monitor(type="TCP") as monitor1:
with self.health_monitor(type="HTTP") as monitor2:
with self.pool() as pool:
data = {"health_monitor": {
"id": monitor1['health_monitor']['id'],
'tenant_id': self._tenant_id}}
req = self.new_create_request(
"pools",
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource="health_monitors")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
data = {"health_monitor": {
"id": monitor2['health_monitor']['id'],
'tenant_id': self._tenant_id}}
req = self.new_create_request(
"pools",
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource="health_monitors")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
req = self.new_show_request(
'pools',
pool['pool']['id'],
fmt=self.fmt)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertIn(monitor1['health_monitor']['id'],
res['pool']['health_monitors'])
self.assertIn(monitor2['health_monitor']['id'],
res['pool']['health_monitors'])
def test_delete_healthmonitor_of_pool(self):
with self.health_monitor(type="TCP") as monitor1:
with self.health_monitor(type="HTTP") as monitor2:
with self.pool() as pool:
# add the monitors to the pool
data = {"health_monitor": {
"id": monitor1['health_monitor']['id'],
'tenant_id': self._tenant_id}}
req = self.new_create_request(
"pools",
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource="health_monitors")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
data = {"health_monitor": {
"id": monitor2['health_monitor']['id'],
'tenant_id': self._tenant_id}}
req = self.new_create_request(
"pools",
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource="health_monitors")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
# remove one of healthmonitor from the pool
req = self.new_delete_request(
"pools",
fmt=self.fmt,
id=pool['pool']['id'],
sub_id=monitor1['health_monitor']['id'],
subresource="health_monitors")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
req = self.new_show_request(
'pools',
pool['pool']['id'],
fmt=self.fmt)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertNotIn(monitor1['health_monitor']['id'],
res['pool']['health_monitors'])
self.assertIn(monitor2['health_monitor']['id'],
res['pool']['health_monitors'])
def test_create_loadbalancer(self):
vip_name = "vip3"
pool_name = "pool3"
with self.pool(name=pool_name) as pool:
with self.vip(name=vip_name, pool=pool) as vip:
pool_id = pool['pool']['id']
vip_id = vip['vip']['id']
# Add two members
res1 = self._create_member(self.fmt,
'192.168.1.100',
'80',
True,
pool_id=pool_id,
weight=1)
res2 = self._create_member(self.fmt,
'192.168.1.101',
'80',
True,
pool_id=pool_id,
weight=2)
# Add a health_monitor
req = self._create_health_monitor(self.fmt,
'HTTP',
'10',
'10',
'3',
True)
health_monitor = self.deserialize(self.fmt, req)
self.assertEqual(req.status_int, 201)
# Associate the health_monitor to the pool
data = {"health_monitor": {
"id": health_monitor['health_monitor']['id'],
'tenant_id': self._tenant_id}}
req = self.new_create_request("pools",
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource="health_monitors")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
# Get pool and vip
req = self.new_show_request('pools',
pool_id,
fmt=self.fmt)
pool_updated = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
member1 = self.deserialize(self.fmt, res1)
member2 = self.deserialize(self.fmt, res2)
self.assertIn(member1['member']['id'],
pool_updated['pool']['members'])
self.assertIn(member2['member']['id'],
pool_updated['pool']['members'])
self.assertIn(health_monitor['health_monitor']['id'],
pool_updated['pool']['health_monitors'])
req = self.new_show_request('vips',
vip_id,
fmt=self.fmt)
vip_updated = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self.assertEqual(vip_updated['vip']['pool_id'],
pool_updated['pool']['id'])
# clean up
self._delete('health_monitors',
health_monitor['health_monitor']['id'])
self._delete('members', member1['member']['id'])
self._delete('members', member2['member']['id'])
class TestLoadBalancerXML(TestLoadBalancer):
fmt = 'xml'
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core custom tags."""
__author__ = 'John Orr (jorr@google.com)'
import os
import re
import urllib
import urlparse
from xml.etree import cElementTree
import markdown
import appengine_config
from common import crypto
from common import jinja_utils
from common import schema_fields
from common import tags
from common import utils as common_utils
from controllers import utils
from models import courses
from models import custom_modules
from models import models
from models import roles
from models import transforms
from modules.oeditor import oeditor
_RESOURCE_PREFIX = '/modules/core_tags'
RESOURCE_FOLDER = _RESOURCE_PREFIX + '/resources/'
_OEDITOR_RESOURCE_FOLDER = '/modules/oeditor/resources/'
_DRIVE_TAG_REFRESH_SCRIPT = RESOURCE_FOLDER + 'drive_tag_refresh.js'
_IFRAME_RESIZE_SCRIPT = _OEDITOR_RESOURCE_FOLDER + 'resize_iframes.js'
_PARENT_FRAME_SCRIPT = RESOURCE_FOLDER + 'drive_tag_parent_frame.js'
_SCRIPT_MANAGER_SCRIPT = RESOURCE_FOLDER + 'drive_tag_script_manager.js'
_RESOURCE_ABSPATH = os.path.join(os.path.dirname(__file__), 'resources')
_TEMPLATES_ABSPATH = os.path.join(os.path.dirname(__file__), 'templates')
_GOOGLE_DRIVE_TAG_PATH = _RESOURCE_PREFIX + '/googledrivetag'
_GOOGLE_DRIVE_TAG_RENDERER_PATH = _RESOURCE_PREFIX + '/googledrivetagrenderer'
def _escape_url(url, force_https=True):
"""Escapes/quotes url parts to sane user input."""
scheme, netloc, path, query, unused_fragment = urlparse.urlsplit(url)
if force_https:
scheme = 'https'
path = urllib.quote(path)
query = urllib.quote_plus(query, '=?&;')
return urlparse.urlunsplit((scheme, netloc, path, query, unused_fragment))
def _replace_url_query(url, new_query):
"""Replaces the query part of a URL with a new one."""
scheme, netloc, path, _, fragment = urlparse.urlsplit(url)
return urlparse.urlunsplit((scheme, netloc, path, new_query, fragment))
class _Runtime(object):
"""Derives runtime configuration state from CB application context."""
def __init__(self, app_context):
self._app_context = app_context
self._environ = self._app_context.get_environ()
def can_edit(self):
return roles.Roles.is_course_admin(self._app_context)
def courses_can_use_google_apis(self):
return courses.COURSES_CAN_USE_GOOGLE_APIS.value
def configured(self):
return (
self.courses_can_use_google_apis() and
bool(self.get_api_key()) and
bool(self.get_client_id()))
def get_api_key(self):
course, google, api_key = courses.CONFIG_KEY_GOOGLE_API_KEY.split(':')
return self._environ.get(course, {}).get(google, {}).get(api_key, '')
def get_client_id(self):
course, google, client_id = courses.CONFIG_KEY_GOOGLE_CLIENT_ID.split(
':')
return self._environ.get(
course, {}
).get(
google, {}
).get(
client_id, '')
def get_slug(self):
return self._app_context.get_slug()
class CoreTag(tags.BaseTag):
"""All core custom tags derive from this class."""
@classmethod
def vendor(cls):
return 'gcb'
@classmethod
def create_icon_url(cls, name):
"""Creates a URL for an icon with a specific name."""
return os.path.join(RESOURCE_FOLDER, name)
class GoogleDoc(CoreTag):
"""Custom tag for a Google Doc."""
@classmethod
def name(cls):
return 'Google Doc'
def render(self, node, unused_handler):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url(_replace_url_query(link, 'embedded=true'))
iframe = cElementTree.XML("""
<iframe class="google-doc" title="Google Doc" type="text/html" frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return self.create_icon_url('docs.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleDoc.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the Document link.
# Changes to the publication status of a document or to its
# contents do not appear instantly.
schema_fields.SchemaField(
'link', 'Document Link', 'string',
optional=True,
description=('Provide the "Document Link" from the Google Docs '
'"Publish to the web" dialog')))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '300'},
description=('Height of the document, in pixels. Width will be '
'set automatically')))
return reg
class GoogleDrive(CoreTag, tags.ContextAwareTag):
"""Custom tag for Google Drive items."""
CONTENT_CHUNK_TYPE = 'google-drive'
@classmethod
def additional_dirs(cls):
return [_RESOURCE_ABSPATH]
@classmethod
def extra_css_files(cls):
return ['google_drive_tag.css']
@classmethod
def extra_js_files(cls):
return ['drive_tag_child_frame.js', 'google_drive_tag_lightbox.js']
@classmethod
def name(cls):
return 'Google Drive'
@classmethod
def on_register(cls):
oeditor.ObjectEditor.EXTRA_SCRIPT_TAG_URLS.append(
cls._oeditor_extra_script_tags_urls)
@classmethod
def on_unregister(cls):
oeditor.ObjectEditor.EXTRA_SCRIPT_TAG_URLS.remove(
cls._oeditor_extra_script_tags_urls)
@classmethod
def _oeditor_extra_script_tags_urls(cls):
script_urls = []
if courses.COURSES_CAN_USE_GOOGLE_APIS.value:
# Order matters here because scripts are inserted in the order they
# are found in this list, and later ones may refer to symbols from
# earlier ones.
script_urls.append(_SCRIPT_MANAGER_SCRIPT)
script_urls.append(_PARENT_FRAME_SCRIPT)
return script_urls
def get_icon_url(self):
return self.create_icon_url('drive.png')
def get_schema(self, handler):
api_key = None
client_id = None
if handler:
runtime = _Runtime(handler.app_context)
if not runtime.configured():
return self.unavailable_schema(
'Google Drive is not available. Please make sure the '
'global gcb_courses_can_use_google_apis setting is True '
'and set the Google API Key and Google Client ID in course '
'settings in order to use this tag.')
api_key = runtime.get_api_key()
client_id = runtime.get_client_id()
reg = schema_fields.FieldRegistry(GoogleDrive.name())
reg.add_property(
schema_fields.SchemaField(
'document-id', 'Document ID', 'string',
optional=True, # Validation enforced by JS code.
description='The ID of the Google Drive item you want to '
'use', i18n=False, extra_schema_dict_values={
'api-key': api_key,
'client-id': client_id,
'type-id': self.CONTENT_CHUNK_TYPE,
'xsrf-token': GoogleDriveRESTHandler.get_xsrf_token(),
}))
return reg
def render(self, node, context):
runtime = _Runtime(context.handler.app_context)
resource_id = node.attrib.get('document-id')
src = self._get_tag_renderer_url(
runtime.get_slug(), self.CONTENT_CHUNK_TYPE, resource_id)
tag = cElementTree.Element('div')
tag.set('class', 'google-drive google-drive-container')
if runtime.can_edit():
controls = cElementTree.Element('div')
controls.set('class', 'google-drive google-drive-controls')
controls.set('data-api-key', runtime.get_api_key())
controls.set('data-client-id', runtime.get_client_id())
controls.set('data-document-id', resource_id)
controls.set(
'data-xsrf-token', GoogleDriveRESTHandler.get_xsrf_token())
tag.append(controls)
iframe = cElementTree.Element('iframe')
iframe.set(
'class',
'google-drive google-drive-content-iframe gcb-needs-resizing')
iframe.set('frameborder', '0')
iframe.set('scrolling', 'no')
iframe.set('src', src)
iframe.set('title', 'Google Drive')
iframe.set('width', '100%')
tag.append(iframe)
return tag
def rollup_header_footer(self, context):
runtime = _Runtime(context.handler.app_context)
can_edit = runtime.can_edit()
srcs = [_IFRAME_RESIZE_SCRIPT]
if can_edit: # Harmless but wasteful to give to non-admins.
srcs = [_SCRIPT_MANAGER_SCRIPT] + srcs
header = cElementTree.Element('div')
for src in srcs:
script = cElementTree.Element('script')
script.set('src', src)
header.append(script)
# Put in footer so other scripts will already be loaded when our main
# fires. Give script to admins only (though note that even if non-admins
# grab the script we won't give them the XSRF tokens they need to issue
# CB AJAX ops).
footer = cElementTree.Element('div')
if can_edit:
script = cElementTree.Element('script')
script.set('src', _DRIVE_TAG_REFRESH_SCRIPT)
footer.append(script)
return (header, footer)
def _get_tag_renderer_url(self, slug, type_id, resource_id):
args = urllib.urlencode(
{'type_id': type_id, 'resource_id': resource_id})
return '%s%s?%s' % (slug, _GOOGLE_DRIVE_TAG_RENDERER_PATH, args)
class GoogleDriveRESTHandler(utils.BaseRESTHandler):
_XSRF_TOKEN_NAME = 'modules-core-tags-google-drive'
XSRF_TOKEN_REQUEST_KEY = 'xsrf_token'
@classmethod
def get_xsrf_token(cls):
return crypto.XsrfTokenManager.create_xsrf_token(cls._XSRF_TOKEN_NAME)
def put(self):
if not courses.COURSES_CAN_USE_GOOGLE_APIS.value:
self.error(404)
return
request = transforms.loads(self.request.get('request', ''))
if not self.assert_xsrf_token_or_fail(
request, self._XSRF_TOKEN_NAME, {}):
return
contents = request.get('contents')
document_id = request.get('document_id')
type_id = request.get('type_id')
if not (contents and document_id):
transforms.send_json_response(
self, 400, 'Save failed; no Google Drive item chosen.')
return
if not type_id:
transforms.send_json_response(
self, 400, 'Save failed; type_id not set')
return
key = None
try:
key = self._save_content_chunk(contents, type_id, document_id)
except Exception, e: # On purpose. pylint: disable-msg=broad-except
transforms.send_json_response(
self, 500, 'Error when saving: %s' % e)
return
transforms.send_json_response(
self, 200, 'Success.', payload_dict={'key': str(key)})
def _save_content_chunk(self, contents, type_id, resource_id):
key = None
uid = models.ContentChunkDAO.make_uid(type_id, resource_id)
matches = models.ContentChunkDAO.get_by_uid(uid)
if not matches:
key = models.ContentChunkDAO.save(models.ContentChunkDTO({
'content_type': 'text/html',
'contents': contents,
'resource_id': resource_id,
'type_id': type_id,
}))
else:
# There is a data race in the DAO -- it's possible to create two
# entries at the same time with the same UID. If that happened,
# use the first one saved.
dto = matches[0]
dto.contents = contents
dto.content_type = 'text/html'
key = models.ContentChunkDAO.save(dto)
return key
class GoogleDriveTagRenderer(utils.BaseHandler):
def get(self):
if not courses.COURSES_CAN_USE_GOOGLE_APIS.value:
self.error(404)
return
resource_id = self.request.get('resource_id')
type_id = self.request.get('type_id')
if not (resource_id and type_id):
self._handle_error(400, 'Bad request')
return
matches = models.ContentChunkDAO.get_by_uid(
models.ContentChunkDAO.make_uid(type_id, resource_id))
if not matches:
self._handle_error(404, 'Content chunk not found')
return
# There is a data race in the DAO -- it's possible to create two entries
# at the same time with the same UID. If that happened, use the first
# one saved.
chunk = matches[0]
template = jinja_utils.get_template(
'drive_item.html', [_TEMPLATES_ABSPATH])
self.response.out.write(template.render({'contents': chunk.contents}))
def _handle_error(self, code, message):
template = jinja_utils.get_template(
'drive_error.html', [_TEMPLATES_ABSPATH])
self.error(code)
self.response.out.write(template.render({
'code': code,
'message': message,
}))
class GoogleSpreadsheet(CoreTag):
"""Custom tag for a Google Spreadsheet."""
@classmethod
def name(cls):
return 'Google Spreadsheet'
def render(self, node, unused_handler):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url('%s&chrome=false' % link.split('&output')[0])
iframe = cElementTree.XML("""
<iframe class="google-spreadsheet" title="Google Spreadsheet" type="text/html"
frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return self.create_icon_url('spreadsheets.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleSpreadsheet.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the link above 'Copy
# and paste the link above'. Changes to the publication status of a
# document or to its contents do not appear instantly.
schema_fields.SchemaField(
'link', 'Link', 'string',
optional=True,
description=('Provide the link from the Google Spreadsheets '
'"Publish to the web" dialog')))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '300'},
description=('Height of the spreadsheet, in pixels. Width will '
'be set automatically')))
return reg
class YouTube(CoreTag):
@classmethod
def name(cls):
return 'YouTube Video'
def render(self, node, unused_handler):
video_id = node.attrib.get('videoid')
if utils.CAN_PERSIST_TAG_EVENTS.value:
return self._render_with_tracking(video_id)
else:
return self._render_no_tracking(video_id)
def _render_no_tracking(self, video_id):
"""Embed video without event tracking support."""
you_tube_url = (
'https://www.youtube.com/embed/%s'
'?feature=player_embedded&rel=0') % video_id
iframe = cElementTree.XML("""
<div class="gcb-video-container">
<iframe class="youtube-player" title="YouTube Video Player"
type="text/html" frameborder="0" allowfullscreen="allowfullscreen">
</iframe>
</div>""")
iframe[0].set('src', you_tube_url)
return iframe
def _render_with_tracking(self, video_id):
"""Embed video and enable event tracking."""
video_id = jinja_utils.js_string_raw(video_id)
uid = common_utils.generate_instance_id()
dom = cElementTree.XML("""
<p>
<script></script>
<script></script>
</p>""")
dom.attrib['id'] = uid
dom[0].attrib['src'] = os.path.join(RESOURCE_FOLDER, 'youtube_video.js')
dom[1].text = 'gcbTagYoutubeEnqueueVideo("%s", "%s");' % (video_id, uid)
return dom
def get_icon_url(self):
return self.create_icon_url('youtube.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(YouTube.name())
reg.add_property(schema_fields.SchemaField(
'videoid', 'Video Id', 'string',
optional=True,
description='Provide YouTube video ID (e.g. Kdg2drcUjYI)'))
return reg
class Html5Video(CoreTag):
@classmethod
def name(cls):
return 'HTML5 Video'
def render(self, node, unused_handler):
if utils.CAN_PERSIST_TAG_EVENTS.value:
tracking_text = (
'<script src="' + os.path.join(
RESOURCE_FOLDER, 'html5_video.js') + '">' +
'</script>' +
'<script>' +
' gcbTagHtml5TrackVideo("%s");' % (
jinja_utils.js_string_raw(node.attrib.get('instanceid'))) +
'</script>')
else:
tracking_text = ''
video_text = (
'<div>' +
' <video></video>'
'%s' % tracking_text +
'</div>')
video = cElementTree.XML(video_text)
video[0].set('id', node.attrib.get('instanceid'))
video[0].set('src', node.attrib.get('url'))
if node.attrib.get('width'):
video[0].set('width', node.attrib.get('width'))
if node.attrib.get('height'):
video[0].set('height', node.attrib.get('height'))
video[0].set('controls', 'true')
return video
def get_icon_url(self):
return self.create_icon_url('html5-badge-h-solo.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(Html5Video.name())
reg.add_property(
schema_fields.SchemaField(
'url', 'Video URL', 'url',
optional=False,
description='URL of the video. Note that playing a video'
'from Google Docs is supported; add "&export=download". E.g.,'
'https://docs.google.com/a/google.com/uc?authuser=0'
'&id=0B82t9jeypLokMERMQ1g5Q3NFU1E&export=download'))
reg.add_property(schema_fields.SchemaField(
'width', 'Width', 'integer',
optional=True,
description='Width, in pixels.'))
reg.add_property(schema_fields.SchemaField(
'height', 'Height', 'integer',
optional=True,
description='Height, in pixels.'))
return reg
class GoogleGroup(CoreTag):
@classmethod
def name(cls):
return 'Google Group'
def render(self, node, handler):
# Note: in Firefox, this component requires a full hostname to work.
# If you are working in the development environment and are accessing
# this component at localhost, please replace 'localhost' with
# '127.0.0.1' instead.
_, netloc, _, _, _ = urlparse.urlsplit(handler.request.uri)
parent_url_suffix = ''
if (appengine_config.PRODUCTION_MODE or
not netloc.startswith('localhost')):
parent_url_suffix = (
'&parenturl=%s' % urllib.quote(handler.request.uri, safe=''))
group_name = node.attrib.get('group')
category_name = node.attrib.get('category')
embedded_forum_url = (
'https://groups.google.com/forum/embed/?hl=en%s'
'#!categories/%s/%s' % (
parent_url_suffix,
urllib.quote(group_name),
urllib.quote(category_name)
))
iframe = cElementTree.XML("""
<p>
<iframe class="forum-embed" title="Google Group Embed"
type="text/html" width="700" height="300" frameborder="0">
</iframe>
</p>""")
iframe[0].set('src', embedded_forum_url)
return iframe
def get_icon_url(self):
return self.create_icon_url('forumembed.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleGroup.name())
reg.add_property(schema_fields.SchemaField(
'group', 'Group Name', 'string', optional=True, i18n=False,
description='Name of the Google Group (e.g. mapping-with-google)'))
reg.add_property(schema_fields.SchemaField(
'category', 'Category Name', 'string', optional=True, i18n=False,
description='Name of the Category (e.g. unit5-2-annotation)'))
return reg
class IFrame(CoreTag):
def render(self, node, unused_handler):
src = node.attrib.get('src')
title = node.attrib.get('title')
height = node.attrib.get('height') or '400'
width = node.attrib.get('width') or '650'
iframe = cElementTree.XML(
'<iframe style="border: 0;"></iframe>'
)
iframe.set('src', _escape_url(src, force_https=False))
iframe.set('title', title)
iframe.set('width', width)
iframe.set('height', height)
return iframe
def get_icon_url(self):
return self.create_icon_url('iframe.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(IFrame.name())
reg.add_property(schema_fields.SchemaField(
'src', 'Source URL', 'string',
optional=True,
description='Provide source URL for iframe (including http/https)'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
optional=True,
description='Provide title of iframe'))
reg.add_property(schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '400'},
description=('Height of the iframe')))
reg.add_property(schema_fields.SchemaField(
'width', 'Width', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '650'},
description=('Width of the iframe')))
return reg
class FlashCard(CoreTag):
def render(self, node, unused_handler):
src = node.attrib.get('src')
title = node.attrib.get('title')
height = node.attrib.get('height') or '400'
width = node.attrib.get('width') or '650'
content = node.attrib.get('content') or ''
iframe = cElementTree.XML(
'<iframe style="border: 0;"></iframe>'
)
iframe.set('src', _escape_url(src + '?content=' + content, force_https=False))
iframe.set('title', title)
iframe.set('width', width)
iframe.set('height', height)
return iframe
def get_icon_url(self):
return self.create_icon_url('iframe.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(IFrame.name())
reg.add_property(schema_fields.SchemaField(
'src', 'Source URL', 'string',
optional=True,
description='Provide source URL for iframe (including http/https)'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
optional=True,
description='Provide title of iframe'))
reg.add_property(schema_fields.SchemaField(
'content', 'Content', 'string',
optional=True,
description='Provide url of the content (questions and answers)'))
reg.add_property(schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '400'},
description=('Height of the iframe')))
reg.add_property(schema_fields.SchemaField(
'width', 'Width', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '650'},
description=('Width of the iframe')))
return reg
class Include(CoreTag):
def render(self, node, handler):
template_path = re.sub('^/+', '', node.attrib.get('path'))
base_path = os.path.dirname(template_path)
base_file = os.path.basename(template_path)
handler.init_template_values(handler.app_context.get_environ())
handler.template_value['base_path'] = base_path
html_text = handler.render_template_to_html(
handler.template_value, base_file,
additional_dirs=[
os.path.join(appengine_config.BUNDLE_ROOT, 'views'),
appengine_config.BUNDLE_ROOT,
os.path.join(appengine_config.BUNDLE_ROOT, base_path),
])
return tags.html_string_to_element_tree(html_text)
def get_icon_url(self):
return self.create_icon_url('include.png')
def get_schema(self, handler):
expected_prefix = os.path.join(appengine_config.BUNDLE_ROOT,
'assets/html')
select_data = []
if handler:
all_files = handler.app_context.fs.list(expected_prefix,
include_inherited=True)
for name in all_files:
if name.startswith(expected_prefix):
name = name.replace(appengine_config.BUNDLE_ROOT, '')
select_data.append(
(name, name.replace('/assets/html/', '')))
reg = schema_fields.FieldRegistry(Include.name())
reg.add_property(schema_fields.SchemaField(
'path', 'File Path', 'string', optional=False,
select_data=select_data,
description='Select a file from within assets/html. '
'The contents of this file will be inserted verbatim '
'at this point. Note: HTML files for inclusion may '
'also be uploaded as assets.'))
return reg
class Markdown(tags.ContextAwareTag, CoreTag):
@classmethod
def name(cls):
return 'Markdown'
def get_icon_url(self):
return self.create_icon_url('markdown.png')
def render(self, node, context):
# The markdown is "text" type in the schema and so is presented in the
# tag's body.
html = ''
if node.text:
html = markdown.markdown(node.text)
return tags.html_string_to_element_tree(
'<div class="gcb-markdown">%s</div>' % html)
def rollup_header_footer(self, context):
"""Include markdown css only when markdown tag is present."""
header = tags.html_string_to_element_tree(
'<link href="%s/markdown.css" rel="stylesheet" '
'type="text/css">' % RESOURCE_FOLDER)
footer = tags.html_string_to_element_tree('')
return (header, footer)
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(Markdown.name())
reg.add_property(schema_fields.SchemaField(
'markdown', 'Markdown', 'text', optional=False,
description='Provide '
'<a target="_blank" '
'href="http://daringfireball.net/projects/markdown/syntax">'
'markdown</a> text'))
return reg
custom_module = None
def register_module():
"""Registers this module in the registry."""
custom_tags = [
FlashCard, GoogleDoc, GoogleDrive, GoogleSpreadsheet, YouTube, Html5Video,
GoogleGroup, IFrame, Include, Markdown]
def make_binding_name(custom_tag):
return 'gcb-%s' % custom_tag.__name__.lower()
def on_module_disable():
for custom_tag in custom_tags:
tags.Registry.remove_tag_binding(make_binding_name(custom_tag))
# Unregsiter extra libraries required by GoogleDrive
GoogleDrive.on_unregister()
def on_module_enable():
for custom_tag in custom_tags:
tags.Registry.add_tag_binding(
make_binding_name(custom_tag), custom_tag)
# Register extra libraries required by GoogleDrive
GoogleDrive.on_register()
global custom_module
global_routes = [(
os.path.join(RESOURCE_FOLDER, '.*'), tags.ResourcesHandler)]
namespaced_routes = [
(_GOOGLE_DRIVE_TAG_PATH, GoogleDriveRESTHandler),
(_GOOGLE_DRIVE_TAG_RENDERER_PATH, GoogleDriveTagRenderer),
]
custom_module = custom_modules.Module(
'Core Custom Tags Module',
'A module that provides core custom tags.',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enable,
notify_module_disabled=on_module_disable)
return custom_module
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BQL execution.
This module implements the main dispatcher for executing different
kinds of BQL phrases. Queries, as in ``SELECT``, ``ESTIMATE``, and so
on, are compiled into SQL; commands, as in ``CREATE TABLE``,
``INSERT``, and the rest of the DDL/DML (Data Definition/Modelling
language) are executed directly.
"""
import itertools
import apsw
import bayeslite.ast as ast
import bayeslite.bqlfn as bqlfn
import bayeslite.compiler as compiler
import bayeslite.core as core
import bayeslite.txn as txn
from bayeslite.exception import BQLError
from bayeslite.guess import bayesdb_guess_stattypes
from bayeslite.read_csv import bayesdb_read_csv_file
from bayeslite.sqlite3_util import sqlite3_quote_name
from bayeslite.util import casefold
from bayeslite.util import cursor_value
def execute_phrase(bdb, phrase, bindings=()):
"""Execute the BQL AST phrase `phrase` and return a cursor of results."""
if isinstance(phrase, ast.Parametrized):
n_numpar = phrase.n_numpar
nampar_map = phrase.nampar_map
phrase = phrase.phrase
assert 0 < n_numpar
else:
n_numpar = 0
nampar_map = None
# Ignore extraneous bindings. XXX Bad idea?
if ast.is_query(phrase):
# Compile the query in the transaction in case we need to
# execute subqueries to determine column lists. Compiling is
# a quick tree descent, so this should be fast.
out = compiler.Output(n_numpar, nampar_map, bindings)
with bdb.savepoint():
compiler.compile_query(bdb, phrase, out)
winders, unwinders = out.getwindings()
return execute_wound(bdb, winders, unwinders, out.getvalue(),
out.getbindings())
if isinstance(phrase, ast.Begin):
txn.bayesdb_begin_transaction(bdb)
return empty_cursor(bdb)
if isinstance(phrase, ast.Rollback):
txn.bayesdb_rollback_transaction(bdb)
return empty_cursor(bdb)
if isinstance(phrase, ast.Commit):
txn.bayesdb_commit_transaction(bdb)
return empty_cursor(bdb)
if isinstance(phrase, ast.CreateTabAs):
assert ast.is_query(phrase.query)
with bdb.savepoint():
if core.bayesdb_has_table(bdb, phrase.name):
if phrase.ifnotexists:
return empty_cursor(bdb)
else:
raise BQLError(bdb,
'Name already defined as table: %s' %
(repr(phrase.name),))
out = compiler.Output(n_numpar, nampar_map, bindings)
qt = sqlite3_quote_name(phrase.name)
temp = 'TEMP ' if phrase.temp else ''
ifnotexists = 'IF NOT EXISTS ' if phrase.ifnotexists else ''
out.write('CREATE %sTABLE %s%s AS ' % (temp, ifnotexists, qt))
compiler.compile_query(bdb, phrase.query, out)
winders, unwinders = out.getwindings()
with compiler.bayesdb_wind(bdb, winders, unwinders):
bdb.sql_execute(out.getvalue(), out.getbindings())
return empty_cursor(bdb)
if isinstance(phrase, ast.CreateTabCsv):
with bdb.savepoint():
table_exists = core.bayesdb_has_table(bdb, phrase.name)
if table_exists:
if phrase.ifnotexists:
return empty_cursor(bdb)
else:
raise BQLError(bdb, 'Table already exists: %s' %
(repr(phrase.name),))
bayesdb_read_csv_file(
bdb, phrase.name, phrase.csv, header=True, create=True)
return empty_cursor(bdb)
if isinstance(phrase, ast.DropTab):
with bdb.savepoint():
sql = 'SELECT COUNT(*) FROM bayesdb_population WHERE tabname = ?'
cursor = bdb.sql_execute(sql, (phrase.name,))
if 0 < cursor_value(cursor):
raise BQLError(bdb, 'Table still in use by populations: %s' %
(repr(phrase.name),))
bdb.sql_execute('DELETE FROM bayesdb_column WHERE tabname = ?',
(phrase.name,))
ifexists = 'IF EXISTS ' if phrase.ifexists else ''
qt = sqlite3_quote_name(phrase.name)
return bdb.sql_execute('DROP TABLE %s%s' % (ifexists, qt))
if isinstance(phrase, ast.AlterTab):
with bdb.savepoint():
table = phrase.table
if not core.bayesdb_has_table(bdb, table):
raise BQLError(bdb, 'No such table: %s' % (repr(table),))
for cmd in phrase.commands:
if isinstance(cmd, ast.AlterTabRenameTab):
# If the names differ only in case, we have to do
# some extra work because SQLite will reject the
# table rename. Note that we may even have table
# == cmd.name here, but if the stored table name
# differs in case from cmd.name, we want to update
# it anyway.
if casefold(table) == casefold(cmd.name):
# Go via a temporary table.
temp = table + '_temp'
while core.bayesdb_has_table(bdb, temp):
temp += '_temp'
rename_table(bdb, table, temp)
rename_table(bdb, temp, cmd.name)
else:
# Make sure nothing else has this name and
# rename it.
if core.bayesdb_has_table(bdb, cmd.name):
raise BQLError(bdb,
'Name already defined as table: %s'
% (repr(cmd.name),))
rename_table(bdb, table, cmd.name)
# If table has implicit population, rename it too.
if core.bayesdb_table_has_implicit_population(
bdb, cmd.name):
populations = \
core.bayesdb_table_populations(bdb, cmd.name)
assert len(populations) == 1
population_name = core.bayesdb_population_name(
bdb, populations[0])
qt = sqlite3_quote_name(cmd.name)
qp = sqlite3_quote_name(population_name)
bdb.execute('ALTER POPULATION %s RENAME TO %s'
% (qp, qt))
# Remember the new name for subsequent commands.
table = cmd.name
elif isinstance(cmd, ast.AlterTabRenameCol):
# XXX Need to deal with this in the compiler.
raise NotImplementedError('Renaming columns'
' not yet implemented.')
# Make sure the old name exist and the new name does not.
old_folded = casefold(cmd.old)
new_folded = casefold(cmd.new)
if old_folded != new_folded:
if not core.bayesdb_table_has_column(bdb, table,
cmd.old):
raise BQLError(bdb, 'No such column in table %s'
': %s' %
(repr(table), repr(cmd.old)))
if core.bayesdb_table_has_column(bdb, table, cmd.new):
raise BQLError(bdb, 'Column already exists'
' in table %s: %s' %
(repr(table), repr(cmd.new)))
# Update bayesdb_column. Everything else refers
# to columns by (tabname, colno) pairs rather than
# by names.
update_column_sql = '''
UPDATE bayesdb_column SET name = :new
WHERE tabname = :table AND name = :old
'''
total_changes = bdb._sqlite3.totalchanges()
bdb.sql_execute(update_column_sql, {
'table': table,
'old': cmd.old,
'new': cmd.new,
})
assert bdb._sqlite3.totalchanges() - total_changes == 1
# ...except backends may have the (case-folded) name cached.
if old_folded != new_folded:
populations_sql = '''
SELECT id FROM bayesdb_population WHERE tabname = ?
'''
cursor = bdb.sql_execute(populations_sql, (table,))
generators = [
core.bayesdb_population_generators(
bdb, population_id)
for (population_id,) in cursor
]
for generator_id in set(generators):
backend = core.bayesdb_generator_backend(bdb,
generator_id)
backend.rename_column(bdb, generator_id,
old_folded, new_folded)
else:
assert False, 'Invalid alter table command: %s' % \
(cmd,)
return empty_cursor(bdb)
if isinstance(phrase, ast.GuessSchema):
if not core.bayesdb_has_table(bdb, phrase.table):
raise BQLError(bdb, 'No such table : %s' % phrase.table)
out = compiler.Output(0, {}, {})
with bdb.savepoint():
qt = sqlite3_quote_name(phrase.table)
temptable = bdb.temp_table_name()
qtt = sqlite3_quote_name(temptable)
cursor = bdb.sql_execute('SELECT * FROM %s' % (qt,))
column_names = [d[0] for d in cursor.description]
rows = cursor.fetchall()
stattypes = bayesdb_guess_stattypes(column_names, rows)
distinct_value_counts = [
len(set([row[i] for row in rows]))
for i in range(len(column_names))
]
out.winder('''
CREATE TEMP TABLE %s (
column TEXT,
stattype TEXT,
num_distinct INTEGER,
reason TEXT
)
''' % (qtt,), ())
for cn, st, ct in zip(column_names, stattypes, distinct_value_counts):
out.winder('''
INSERT INTO %s VALUES (?, ?, ?, ?)
''' % (qtt), (cn, st[0], ct, st[1]))
out.write('SELECT * FROM %s' % (qtt,))
out.unwinder('DROP TABLE %s' % (qtt,), ())
winders, unwinders = out.getwindings()
return execute_wound(
bdb, winders, unwinders, out.getvalue(), out.getbindings())
if isinstance(phrase, ast.CreatePop):
with bdb.savepoint():
_create_population(bdb, phrase)
return empty_cursor(bdb)
if isinstance(phrase, ast.DropPop):
with bdb.savepoint():
if not core.bayesdb_has_population(bdb, phrase.name):
if phrase.ifexists:
return empty_cursor(bdb)
raise BQLError(bdb, 'No such population: %r' % (phrase.name,))
population_id = core.bayesdb_get_population(bdb, phrase.name)
generator_ids = core.bayesdb_population_generators(
bdb, population_id)
if generator_ids:
generators = [core.bayesdb_generator_name(bdb, gid)
for gid in generator_ids]
raise BQLError(bdb, 'Population %r still has generators: %r' %
(phrase.name, generators))
# XXX helpful error checking if generators still exist
# XXX check change counts
bdb.sql_execute('''
DELETE FROM bayesdb_variable WHERE population_id = ?
''', (population_id,))
bdb.sql_execute('''
DELETE FROM bayesdb_population WHERE id = ?
''', (population_id,))
return empty_cursor(bdb)
if isinstance(phrase, ast.AlterPop):
with bdb.savepoint():
population = phrase.population
if not core.bayesdb_has_population(bdb, population):
raise BQLError(bdb, 'No such population: %s' %
(repr(population),))
population_id = core.bayesdb_get_population(bdb, population)
for cmd in phrase.commands:
if isinstance(cmd, ast.AlterPopRenamePop):
table = core.bayesdb_population_table(bdb, population_id)
# Prevent renaming of implicit population directly, unless
# being called by ast.AlterTabRenameTab in which case the
# table name and population name will not be matching.
if core.bayesdb_population_is_implicit(bdb, population_id) \
and casefold(population) == casefold(table):
raise BQLError(bdb, 'Cannot rename implicit'
'population %s; rename base table instead'
% (population,))
# Make sure nothing else has this name.
if casefold(population) != casefold(cmd.name):
if core.bayesdb_has_population(bdb, cmd.name):
raise BQLError(bdb,
'Name already defined as population' ': %s'
% (repr(cmd.name),))
# Update bayesdb_population. Everything else
# refers to it by id.
update_generator_sql = '''
UPDATE bayesdb_population SET name = ? WHERE id = ?
'''
total_changes = bdb._sqlite3.totalchanges()
bdb.sql_execute(update_generator_sql,
(cmd.name, population_id))
assert bdb._sqlite3.totalchanges() - total_changes == 1
# If population has implicit generator, rename it too.
if core.bayesdb_population_has_implicit_generator(
bdb, population_id):
generators = core.bayesdb_population_generators(
bdb, population_id)
assert len(generators) == 1
generator_name = core.bayesdb_generator_name(
bdb, generators[0])
qp = sqlite3_quote_name(cmd.name)
qg = sqlite3_quote_name(generator_name)
bdb.execute('ALTER GENERATOR %s RENAME TO %s'
% (qg, qp,))
# Remember the new name for subsequent commands.
population = cmd.name
elif isinstance(cmd, ast.AlterPopAddVar):
# Ensure column exists in base table.
table = core.bayesdb_population_table(bdb, population_id)
if not core.bayesdb_table_has_column(
bdb, table, cmd.name):
raise BQLError(bdb,
'No such variable in base table: %s'
% (cmd.name))
# Ensure variable not already in population.
if core.bayesdb_has_variable(
bdb, population_id, None, cmd.name):
raise BQLError(bdb,
'Variable already in population: %s'
% (cmd.name))
# Ensure there is at least observation in the column.
qt = sqlite3_quote_name(table)
qc = sqlite3_quote_name(cmd.name)
cursor = bdb.sql_execute(
'SELECT COUNT(*) FROM %s WHERE %s IS NOT NULL' %
(qt, qc))
if cursor_value(cursor) == 0:
raise BQLError(bdb,
'Cannot add variable without any values: %s'
% (cmd.name))
# If stattype is None, guess.
if cmd.stattype is None:
cursor = bdb.sql_execute(
'SELECT %s FROM %s' % (qc, qt))
rows = cursor.fetchall()
[stattype, reason] = bayesdb_guess_stattypes(
[cmd.name], rows)[0]
# Fail if trying to model a key.
if stattype == 'key':
raise BQLError(bdb,
'Values in column %s appear to be keys.'
% (cmd.name,))
# Fail if cannot determine a stattype.
elif stattype == 'ignore':
raise BQLError(bdb,
'Failed to determine a stattype for %s, '
'please specify one manually.' % (cmd.name,))
# If user specified stattype, ensure it exists.
elif not core.bayesdb_has_stattype(bdb, cmd.stattype):
raise BQLError(bdb,
'Invalid stattype: %s' % (cmd.stattype))
else:
stattype = cmd.stattype
# Check that strings are not being modeled as numerical.
if stattype == 'numerical' \
and _column_contains_string(bdb, table, cmd.name):
raise BQLError(bdb,
'Numerical column contains string values: %r '
% (qc,))
with bdb.savepoint():
# Add the variable to the population.
core.bayesdb_add_variable(
bdb, population_id, cmd.name, stattype)
colno = core.bayesdb_variable_number(
bdb, population_id, None, cmd.name)
# Add the variable to each (initialized) generator in
# the population.
generator_ids = filter(
lambda g: core.bayesdb_generator_modelnos(bdb, g),
core.bayesdb_population_generators(
bdb, population_id),
)
for generator_id in generator_ids:
backend = core.bayesdb_generator_backend(
bdb, generator_id)
backend.add_column(bdb, generator_id, colno)
elif isinstance(cmd, ast.AlterPopStatType):
# Check the no generators are defined for this population.
generators = core.bayesdb_population_generators(
bdb, population_id)
if generators:
raise BQLError(bdb,
'Cannot update statistical types for population '
'%s, it has generators: %s'
% (repr(population), repr(generators),))
# Check all the variables are in the population.
unknown = [
c for c in cmd.names if not
core.bayesdb_has_variable(bdb, population_id, None, c)
]
if unknown:
raise BQLError(bdb,
'No such variables in population: %s'
% (repr(unknown)))
# Check the statistical type is valid.
if not core.bayesdb_has_stattype(bdb, cmd.stattype):
raise BQLError(bdb,
'Invalid statistical type: %r'
% (repr(cmd.stattype),))
# Check that strings are not being modeled as numerical.
if cmd.stattype == 'numerical':
table = core.bayesdb_population_table(
bdb, population_id)
numerical_string_vars = [
col for col in cmd.names
if _column_contains_string(bdb, table, col)
]
if numerical_string_vars:
raise BQLError(bdb,
'Columns with string values modeled as '
'numerical: %r' % (numerical_string_vars,))
# Perform the stattype update.
colnos = [
core.bayesdb_variable_number(
bdb, population_id, None, c) for c in cmd.names
]
qcolnos = ','.join('%d' % (colno,) for colno in colnos)
update_stattype_sql = '''
UPDATE bayesdb_variable SET stattype = ?
WHERE population_id = ? AND colno IN (%s)
''' % (qcolnos,)
bdb.sql_execute(
update_stattype_sql,
(casefold(cmd.stattype), population_id,))
else:
assert False, 'Invalid ALTER POPULATION command: %s' % \
(repr(cmd),)
return empty_cursor(bdb)
if isinstance(phrase, ast.CreateGen):
# Find the population.
if not core.bayesdb_has_population(bdb, phrase.population):
raise BQLError(bdb, 'No such population: %r' %
(phrase.population,))
population_id = core.bayesdb_get_population(bdb, phrase.population)
# Find the backend, or use the default.
backend_name = phrase.backend
if phrase.backend is None:
backend_name = 'cgpm'
if backend_name not in bdb.backends:
raise BQLError(bdb, 'No such backend: %s' %
(repr(backend_name),))
backend = bdb.backends[backend_name]
# Retrieve the (possibility implicit) generator name.
generator_name = phrase.name or phrase.population
implicit = 1 if phrase.name is None else 0
with bdb.savepoint():
if core.bayesdb_has_generator(bdb, population_id, generator_name):
if not phrase.ifnotexists:
raise BQLError(
bdb, 'Name already defined as generator: %s' %
(repr(generator_name),))
else:
# Insert a record into bayesdb_generator and get the
# assigned id.
bdb.sql_execute('''
INSERT INTO bayesdb_generator
(name, population_id, backend, implicit)
VALUES (?, ?, ?, ?)
''', (generator_name, population_id, backend.name(), implicit))
generator_id = core.bayesdb_get_generator(
bdb, population_id, generator_name)
# Do any backend-specific initialization.
backend.create_generator(bdb, generator_id, phrase.schema)
# All done. Nothing to return.
return empty_cursor(bdb)
if isinstance(phrase, ast.DropGen):
with bdb.savepoint():
if not core.bayesdb_has_generator(bdb, None, phrase.name):
if phrase.ifexists:
return empty_cursor(bdb)
raise BQLError(bdb, 'No such generator: %s' %
(repr(phrase.name),))
generator_id = core.bayesdb_get_generator(bdb, None, phrase.name)
backend = core.bayesdb_generator_backend(bdb, generator_id)
# Backend-specific destruction.
backend.drop_generator(bdb, generator_id)
# Drop latent variables, models, and, finally, generator.
drop_columns_sql = '''
DELETE FROM bayesdb_variable WHERE generator_id = ?
'''
bdb.sql_execute(drop_columns_sql, (generator_id,))
drop_model_sql = '''
DELETE FROM bayesdb_generator_model WHERE generator_id = ?
'''
bdb.sql_execute(drop_model_sql, (generator_id,))
drop_generator_sql = '''
DELETE FROM bayesdb_generator WHERE id = ?
'''
bdb.sql_execute(drop_generator_sql, (generator_id,))
return empty_cursor(bdb)
if isinstance(phrase, ast.AlterGen):
with bdb.savepoint():
generator = phrase.generator
if not core.bayesdb_has_generator(bdb, None, generator):
raise BQLError(bdb, 'No such generator: %s' %
(repr(generator),))
generator_id = core.bayesdb_get_generator(bdb, None, generator)
cmds_generic = []
for cmd in phrase.commands:
if isinstance(cmd, ast.AlterGenRenameGen):
population_id = core.bayesdb_generator_population(
bdb, generator_id)
population = core.bayesdb_population_name(
bdb, population_id)
# Prevent renaming of implicit generator directly, unless
# being called by ast.AlterPopRenamePop in which case the
# population name and generator name will not be matching.
if core.bayesdb_population_is_implicit(bdb, generator_id) \
and casefold(generator) == casefold(population):
raise BQLError(bdb, 'Cannot rename implicit '
'generator; rename base population instead')
# Disable modelnos with AlterGenRenameGen.
if phrase.modelnos is not None:
raise BQLError(bdb, 'Cannot specify models for RENAME')
# Make sure nothing else has this name.
if casefold(generator) != casefold(cmd.name):
if core.bayesdb_has_generator(bdb, None, cmd.name):
raise BQLError(bdb, 'Name already defined'
' as generator: %s' %
(repr(cmd.name),))
# Update bayesdb_generator. Everything else
# refers to it by id.
update_generator_sql = '''
UPDATE bayesdb_generator SET name = ? WHERE id = ?
'''
total_changes = bdb._sqlite3.totalchanges()
bdb.sql_execute(update_generator_sql,
(cmd.name, generator_id))
assert bdb._sqlite3.totalchanges() - total_changes == 1
# Remember the new name for subsequent commands.
generator = cmd.name
elif isinstance(cmd, ast.AlterGenGeneric):
cmds_generic.append(cmd.command)
else:
assert False, 'Invalid ALTER GENERATOR command: %s' % \
(repr(cmd),)
if cmds_generic:
modelnos = phrase.modelnos
modelnos_invalid = None if modelnos is None else [
modelno for modelno in modelnos if not
core.bayesdb_generator_has_model(bdb, generator_id, modelno)
]
if modelnos_invalid:
raise BQLError(bdb,
'No such models in generator %s: %s' %
(repr(phrase.generator), repr(modelnos)))
# Call generic alternations on the backend.
backend = core.bayesdb_generator_backend(bdb, generator_id)
backend.alter(bdb, generator_id, modelnos, cmds_generic)
return empty_cursor(bdb)
if isinstance(phrase, ast.InitModels):
if not core.bayesdb_has_generator(bdb, None, phrase.generator):
raise BQLError(bdb, 'No such generator: %s' %
(phrase.generator,))
generator_id = core.bayesdb_get_generator(bdb, None, phrase.generator)
modelnos = range(phrase.nmodels)
with bdb.savepoint():
# Find the model numbers. Omit existing ones for
# ifnotexists; reject existing ones otherwise.
if phrase.ifnotexists:
modelnos = set(modelno for modelno in modelnos
if not core.bayesdb_generator_has_model(bdb, generator_id,
modelno))
else:
existing = set(modelno for modelno in modelnos
if core.bayesdb_generator_has_model(bdb, generator_id,
modelno))
if 0 < len(existing):
raise BQLError(bdb, 'Generator %s already has models: %s' %
(repr(phrase.generator), sorted(existing)))
# Stop now if there's nothing to initialize.
if len(modelnos) == 0:
return
# Create the bayesdb_generator_model records.
modelnos = sorted(modelnos)
insert_model_sql = '''
INSERT INTO bayesdb_generator_model
(generator_id, modelno)
VALUES (:generator_id, :modelno)
'''
for modelno in modelnos:
bdb.sql_execute(insert_model_sql, {
'generator_id': generator_id,
'modelno': modelno,
})
# Do backend-specific initialization.
backend = core.bayesdb_generator_backend(bdb, generator_id)
backend.initialize_models(bdb, generator_id, modelnos)
return empty_cursor(bdb)
if isinstance(phrase, ast.AnalyzeModels):
# WARNING: It is the backend's responsibility to work in a
# transaction.
#
# WARNING: It is the backend's responsibility to update the
# iteration count in bayesdb_generator_model records.
#
# We do this so that the backend can save incremental
# progress in case of ^C in the middle.
#
# XXX Put these warning somewhere more appropriate.
if not core.bayesdb_has_generator(bdb, None, phrase.generator):
raise BQLError(bdb, 'No such generator: %s' %
(phrase.generator,))
generator_id = core.bayesdb_get_generator(bdb, None, phrase.generator)
backend = core.bayesdb_generator_backend(bdb, generator_id)
# XXX Should allow parameters for iterations and ckpt/iter.
backend.analyze_models(bdb, generator_id,
modelnos=phrase.modelnos,
iterations=phrase.iterations,
max_seconds=phrase.seconds,
ckpt_iterations=phrase.ckpt_iterations,
ckpt_seconds=phrase.ckpt_seconds,
program=phrase.program)
return empty_cursor(bdb)
if isinstance(phrase, ast.DropModels):
with bdb.savepoint():
generator_id = core.bayesdb_get_generator(
bdb, None, phrase.generator)
backend = core.bayesdb_generator_backend(bdb, generator_id)
modelnos = None
if phrase.modelnos is not None:
lookup_model_sql = '''
SELECT COUNT(*) FROM bayesdb_generator_model
WHERE generator_id = :generator_id
AND modelno = :modelno
'''
modelnos = sorted(list(phrase.modelnos))
for modelno in modelnos:
cursor = bdb.sql_execute(lookup_model_sql, {
'generator_id': generator_id,
'modelno': modelno,
})
if cursor_value(cursor) == 0:
raise BQLError(bdb, 'No such model'
' in generator %s: %s' %
(repr(phrase.generator), repr(modelno)))
backend.drop_models(bdb, generator_id, modelnos=modelnos)
if modelnos is None:
drop_models_sql = '''
DELETE FROM bayesdb_generator_model WHERE generator_id = ?
'''
bdb.sql_execute(drop_models_sql, (generator_id,))
else:
drop_model_sql = '''
DELETE FROM bayesdb_generator_model
WHERE generator_id = :generator_id
AND modelno = :modelno
'''
for modelno in modelnos:
bdb.sql_execute(drop_model_sql, {
'generator_id': generator_id,
'modelno': modelno,
})
return empty_cursor(bdb)
if isinstance(phrase, ast.Regress):
# Retrieve the population.
if not core.bayesdb_has_population(bdb, phrase.population):
raise BQLError(bdb, 'No such population: %r' % (phrase.population,))
population_id = core.bayesdb_get_population(bdb, phrase.population)
# Retrieve the generator
generator_id = None
if phrase.generator:
if not core.bayesdb_has_generator(bdb, population_id,
phrase.generator):
raise BQLError(bdb,
'No such generator: %r' % (phrase.generator,))
generator_id = core.bayesdb_get_generator(
bdb, population_id, phrase.generator)
# Retrieve the target variable.
if not core.bayesdb_has_variable(
bdb, population_id, None, phrase.target):
raise BQLError(bdb, 'No such variable: %r' % (phrase.target,))
colno_target = core.bayesdb_variable_number(
bdb, population_id, None, phrase.target)
stattype = core.bayesdb_variable_stattype(bdb, population_id,
generator_id, colno_target)
if stattype != 'numerical':
raise BQLError(bdb,
'Target variable is not numerical: %r' % (phrase.target,))
# Build the given variables.
if any(isinstance(col, ast.SelColAll) for col in phrase.givens):
# Using * is not allowed to be mixed with other variables.
if len(phrase.givens) > 1:
raise BQLError(bdb, 'Cannot use (*) with other givens.')
colno_givens = core.bayesdb_variable_numbers(
bdb, population_id, None)
else:
if any(isinstance(col, ast.SelColSub) for col in phrase.givens):
# Subexpression needs special compiling.
out = compiler.Output(n_numpar, nampar_map, bindings)
bql_compiler = compiler.BQLCompiler_None()
givens = compiler.expand_select_columns(
bdb, phrase.givens, True, bql_compiler, out)
else:
givens = phrase.givens
colno_givens = [
core.bayesdb_variable_number(
bdb, population_id, None, given.expression.column)
for given in givens
]
# Build the arguments to bqlfn.bayesdb_simulate.
colno_givens_unique = set(
colno for colno in colno_givens if colno!= colno_target
)
if len(colno_givens_unique) == 0:
raise BQLError(bdb, 'No matching given columns.')
constraints = []
colnos = [colno_target] + list(colno_givens_unique)
nsamp = 100 if phrase.nsamp is None else phrase.nsamp.value.value
modelnos = None if phrase.modelnos is None else str(phrase.modelnos)
rows = bqlfn.bayesdb_simulate(
bdb, population_id, generator_id, modelnos, constraints,
colnos, numpredictions=nsamp)
# Retrieve the stattypes.
stattypes = [
core.bayesdb_variable_stattype(
bdb, population_id, generator_id, colno_given)
for colno_given in colno_givens_unique
]
# Separate the target values from the given values.
target_values = [row[0] for row in rows]
given_values = [row[1:] for row in rows]
given_names = [
core.bayesdb_variable_name(bdb, population_id, generator_id, given)
for given in colno_givens_unique
]
# Compute the coefficients. The import to regress_ols is here since the
# feature depends on pandas + sklearn, so avoid module-wide import.
from bayeslite.regress import regress_ols
coefficients = regress_ols(
target_values, given_values, given_names, stattypes)
# Store the results in a winder.
temptable = bdb.temp_table_name()
qtt = sqlite3_quote_name(temptable)
out = compiler.Output(0, {}, {})
out.winder('''
CREATE TEMP TABLE %s (variable TEXT, coefficient REAL);
''' % (qtt,), ())
for variable, coef in coefficients:
out.winder('''
INSERT INTO %s VALUES (?, ?)
''' % (qtt), (variable, coef,))
out.write('SELECT * FROM %s ORDER BY variable' % (qtt,))
out.unwinder('DROP TABLE %s' % (qtt,), ())
winders, unwinders = out.getwindings()
return execute_wound(
bdb, winders, unwinders, out.getvalue(), out.getbindings())
assert False # XXX
def _create_population(bdb, phrase):
# Retrieve the (possibility implicit) population name.
population_name = phrase.name or phrase.table
implicit = 1 if phrase.name is None else 0
# Handle IF NOT EXISTS.
if core.bayesdb_has_population(bdb, population_name):
if phrase.ifnotexists:
return
else:
raise BQLError(bdb, 'Name already defined as population: %r' %
(population_name,))
# Make sure the bayesdb_column table knows all the columns of the
# underlying table.
core.bayesdb_table_guarantee_columns(bdb, phrase.table)
# Retrieve all columns from the base table. The user is required to provide
# a strategy for each single variable, either MODEL, IGNORE, or GUESS.
base_table_columns = core.bayesdb_table_column_names(bdb, phrase.table)
# Create the population record and get the assigned id.
bdb.sql_execute('''
INSERT INTO bayesdb_population (name, tabname, implicit)
VALUES (?, ?, ?)
''', (population_name, phrase.table, implicit))
population_id = core.bayesdb_get_population(bdb, population_name)
# Extract the population column names and stattypes as pairs.
pop_model_vars = list(itertools.chain.from_iterable(
[[(name, s.stattype) for name in s.names]
for s in phrase.schema if isinstance(s, ast.PopModelVars)]))
# Extract the ignored columns.
pop_ignore_vars = list(itertools.chain.from_iterable(
[[(name, 'ignore') for name in s.names]
for s in phrase.schema if isinstance(s, ast.PopIgnoreVars)]))
# Extract the columns to guess.
pop_guess = list(itertools.chain.from_iterable(
[s.names for s in phrase.schema if isinstance(s, ast.PopGuessVars)]))
if '*' in pop_guess:
# Do not allow * to coincide with other variables.
if len(pop_guess) > 1:
raise BQLError(
bdb, 'Cannot use wildcard GUESS with variables names: %r'
% (pop_guess, ))
# Retrieve all variables in the base table.
avoid = set(casefold(t[0]) for t in pop_model_vars + pop_ignore_vars)
pop_guess = [t for t in base_table_columns if casefold(t) not in avoid]
# Perform the guessing.
if pop_guess:
qt = sqlite3_quote_name(phrase.table)
qcns = ','.join(map(sqlite3_quote_name, pop_guess))
cursor = bdb.sql_execute('SELECT %s FROM %s' % (qcns, qt))
rows = cursor.fetchall()
# XXX This function returns a stattype called `key`, which we will add
# to the pop_ignore_vars.
pop_guess_stattypes = bayesdb_guess_stattypes(pop_guess, rows)
pop_guess_vars = zip(pop_guess, [st[0] for st in pop_guess_stattypes])
migrate = [(col, st) for col, st in pop_guess_vars if st=='key']
for col, st in migrate:
pop_guess_vars.remove((col, st))
pop_ignore_vars.append((col, 'ignore'))
else:
pop_guess_vars = []
# Ensure no string-valued variables are being modeled as numerical.
numerical_string_vars = [
var for var, stattype in pop_model_vars
if stattype == 'numerical'
and _column_contains_string(bdb, phrase.table, var)
]
if numerical_string_vars:
raise BQLError(bdb,
'Column(s) with string values modeled as numerical: %r'
% (numerical_string_vars, ))
# Pool all the variables and statistical types together.
pop_all_vars = pop_model_vars + pop_ignore_vars + pop_guess_vars
# Check that everyone in the population is modeled.
# `known` contains all the variables for which a policy is known.
known = [casefold(t[0]) for t in pop_all_vars]
not_found = [t for t in base_table_columns if casefold(t) not in known]
if not_found:
raise BQLError(
bdb, 'Cannot determine a modeling policy for variables: %r'
% (not_found, ))
# Check
# - for duplicates,
# - for nonexistent columns,
# - for invalid statistical types.
seen_variables = set()
duplicates = set()
missing = set()
invalid = set()
stattype_sql = '''
SELECT COUNT(*) FROM bayesdb_stattype WHERE name = :stattype
'''
for nm, st in pop_all_vars:
name = casefold(nm)
stattype = casefold(st)
if name in seen_variables:
duplicates.add(name)
continue
if not core.bayesdb_table_has_column(bdb, phrase.table, nm):
missing.add(name)
continue
cursor = bdb.sql_execute(stattype_sql, {'stattype': stattype})
if cursor_value(cursor) == 0 and stattype != 'ignore':
invalid.add(stattype)
continue
seen_variables.add(nm)
# XXX Would be nice to report these simultaneously.
if missing:
raise BQLError(bdb, 'No such columns in table %r: %r' %
(phrase.table, list(missing)))
if duplicates:
raise BQLError(bdb, 'Duplicate column names: %r' % (list(duplicates),))
if invalid:
raise BQLError(bdb, 'Invalid statistical types: %r' % (list(invalid),))
# Insert variable records.
for nm, st in pop_all_vars:
name = casefold(nm)
stattype = casefold(st)
if stattype == 'ignore':
continue
core.bayesdb_add_variable(bdb, population_id, name, stattype)
def _column_contains_string(bdb, table, column):
qt = sqlite3_quote_name(table)
qc = sqlite3_quote_name(column)
rows = bdb.sql_execute('SELECT %s FROM %s' % (qc, qt))
return any(isinstance(r[0], unicode) for r in rows)
def rename_table(bdb, old, new):
assert core.bayesdb_has_table(bdb, old)
assert not core.bayesdb_has_table(bdb, new)
# Rename the SQL table.
qo = sqlite3_quote_name(old)
qn = sqlite3_quote_name(new)
rename_sql = 'ALTER TABLE %s RENAME TO %s' % (qo, qn)
bdb.sql_execute(rename_sql)
# Update bayesdb_column to use the new name.
update_columns_sql = '''
UPDATE bayesdb_column SET tabname = ? WHERE tabname = ?
'''
bdb.sql_execute(update_columns_sql, (new, old))
# Update bayesdb_population to use the new name.
update_populations_sql = '''
UPDATE bayesdb_population SET tabname = ? WHERE tabname = ?
'''
bdb.sql_execute(update_populations_sql, (new, old))
def empty_cursor(bdb):
return None
def execute_wound(bdb, winders, unwinders, sql, bindings):
if len(winders) == 0 and len(unwinders) == 0:
return bdb.sql_execute(sql, bindings)
with bdb.savepoint():
for (wsql, wbindings) in winders:
bdb.sql_execute(wsql, wbindings)
try:
return WoundCursor(bdb, bdb.sql_execute(sql, bindings), unwinders)
except:
for (usql, ubindings) in unwinders:
bdb.sql_execute(usql, ubindings)
raise
class BayesDBCursor(object):
"""Cursor for a BQL or SQL query from a BayesDB."""
def __init__(self, bdb, cursor):
self._bdb = bdb
self._cursor = cursor
# XXX Must save the description early because apsw discards it
# after we have iterated over all rows -- or if there are no
# rows, discards it immediately!
try:
self._description = cursor.description
except apsw.ExecutionCompleteError:
self._description = []
else:
assert self._description is not None
if self._description is None:
self._description = []
def __iter__(self):
return self
def next(self):
return self._cursor.next()
def fetchone(self):
return self._cursor.fetchone()
def fetchvalue(self):
return cursor_value(self)
def fetchmany(self, size=1):
with txn.bayesdb_caching(self._bdb):
return self._cursor.fetchmany(size=size)
def fetchall(self):
with txn.bayesdb_caching(self._bdb):
return self._cursor.fetchall()
@property
def connection(self):
return self._bdb
@property
def lastrowid(self):
return self._bdb.last_insert_rowid()
@property
def description(self):
return self._description
class WoundCursor(BayesDBCursor):
def __init__(self, bdb, cursor, unwinders):
self._unwinders = unwinders
super(WoundCursor, self).__init__(bdb, cursor)
def __del__(self):
del self._cursor
# If the database is still open, we need to undo the effects
# of the cursor when done. But the effects are (intended to
# be) in-memory only, so otherwise, if the database is closed,
# we need not do anything.
#
# XXX Name the question of whether it's closed a little less
# kludgily. (But that might encourage people outside to
# depend on that, which is not such a great idea.)
if self._bdb._sqlite3 is not None:
for sql, bindings in reversed(self._unwinders):
self._bdb.sql_execute(sql, bindings)
# Apparently object doesn't have a __del__ method.
#super(WoundCursor, self).__del__()
|
|
"""
A node and corresponding command to expose transform information about one
transform node with respect to another.
\b Creation \b Info:
\b Donations: http://adammechtley.com/donations/
\b License: The MIT License
Copyright (c) 2011 Adam Mechtley (http://adammechtley.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the 'Software'), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
\b Usage:
Add this file to your plug-in path and load it in Maya from the Plug-in
Manager (Window -> Settings/Preferences -> Plug-in Manager).
\namespace AM_ExposeTransform
"""
import math, sys, re
import maya.OpenMaya as OM
import maya.OpenMayaMPx as OMMPx
## current version of the plug-in
kVersionNumber = '1.01'
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
## a dictionary of possible rotation order values
kRotateOrderMapping = {
'xyz': OM.MEulerRotation.kXYZ,
'yzx': OM.MEulerRotation.kYZX,
'zxy': OM.MEulerRotation.kZXY,
'xzy': OM.MEulerRotation.kXZY,
'yxz': OM.MEulerRotation.kYXZ,
'zyx': OM.MEulerRotation.kZYX,
'0': OM.MEulerRotation.kXYZ,
'1': OM.MEulerRotation.kYZX,
'2': OM.MEulerRotation.kZXY,
'3': OM.MEulerRotation.kXZY,
'4': OM.MEulerRotation.kYXZ,
'5': OM.MEulerRotation.kZYX
}
# -----------------------------------------------------------------------------
# Command Definition
# -----------------------------------------------------------------------------
class AM_ExposeTransformCmd(OMMPx.MPxCommand):
"""
A command to quickly create, edit, or query an am_exposeTransform node.
"""
## the name of the command
kPluginCmdName = 'am_exposeTransform'
## specifies a name for the am_exposeTransform node (CE)
kNameFlag = '-n'
kNameFlagLong = '-name'
## specifies an exposed object (CEQ)
kExposedObjectFlag = '-o'
kExposedObjectFlagLong = '-object'
## specifies a reference object; otherwise the node exposes world-space info (CEQ)
kReferenceObjectFlag = '-ref'
kReferenceObjectFlagLong = '-referenceObject'
## specifies Euler rotation order that the node should use (CEQ)
kRotateOrderFlag = '-ro'
kRotateOrderFlagLong = '-rotateOrder'
## specifies whether incoming axes should be normalized for computation of dot products (CEQ)
kNormalizeFlag = '-na'
kNormalizeFlagLong = '-normalizeAxes'
## specifies an axis on the object for computing dot and angle (CEQ)
kObjectAxisFlag = '-a'
kObjectAxisFlagLong = '-axis'
## specifies an axis on the reference for computing dot and angle (CEQ)
kReferenceAxisFlag = '-ra'
kReferenceAxisFlagLong = '-referenceAxis'
def __init__(self):
OMMPx.MPxCommand.__init__(self)
self.__isQueryUsed = True # initialize to True so command is not added to queue if argument parsing fails
self.__isEditUsed = False # if the edit flag has been set, then undo will be enabled
self.__exposeNodeArg = OM.MObject() # the am_exposeTransform node selected for edit and query modes
self.__exposeNodeFn = OM.MFnDependencyNode()
self.__exposeNodeName = ''
self.__exposedObjectArg = OM.MDagPath() # the exposed object
self.__referenceObjectArg = OM.MDagPath() # the reference object
self.__rotateOrderArg = kRotateOrderMapping['xyz'] # the Euler rotation order to use
self.__normalizeArg = True # should the incoming axes be normalized for computation of dot product?
self.__objectAxisArg = OM.MVector(0, 0, 1) # axis on the selected objects to use for dot and angle
self.__referenceAxisArg = OM.MVector(0, 0, 1) # axis on the reference object to use for dot and angle
self.__dgModify = OM.MDGModifier() # DG modifier used to create and modify nodes
def doIt(self, args):
# parse the arguments
try:
argData = OM.MArgDatabase(self.syntax(), args) # if this fails, it will raise its own exception...
except:
pass # ...so we can just pass here
else:
# read all of the arguments and store them to the appropriate data attributes
# manually confirm the object list
sel = OM.MSelectionList()
argData.getObjects(sel)
# ordinarily, the command is designed to only operate on a single object at a time
if not sel.length() == 1:
# if in create mode, use the argument specified with the -object flag as the exposed object
if not argData.isEdit() and not argData.isQuery() and argData.isFlagSet(
AM_ExposeTransformCmd.kExposedObjectFlag):
sel.add(argData.flagArgumentString(AM_ExposeTransformCmd.kExposedObjectFlag, 0))
sel.getDagPath(sel.length() - 1, self.__exposedObjectArg)
else:
raise Exception(
'This command requires exactly 1 argument to be specified or selected; found %i.' % sel.length())
else:
iter = OM.MItSelectionList(sel, OM.MFn.kDependencyNode)
selectedObject = OM.MObject()
while not iter.isDone():
# in edit or query mode, the object must be an am_exposeTransform node
if argData.isEdit() or argData.isQuery():
iter.getDependNode(self.__exposeNodeArg)
if not (self.__exposeNodeFn.typeId() == AM_ExposeTransformNode.kPluginNodeId):
raise Exception('The provided dependency node %s is not of type %s.' % (
self.__exposeNodeFn.name(), AM_ExposeTransformNode.kPluginNodeTypeName))
# in create mode, only a transform node is accepted
else:
iter.getDependNode(selectedObject)
if selectedObject.hasFn(OM.MFn.kTransform):
OM.MDagPath.getAPathTo(selectedObject, self.__exposedObjectArg)
else:
selectedObjectFn = OM.MFnDependencyNode(selectedObject)
raise Exception('%s is not a valid transform node.' % selectedObjectFn.name())
iter.next()
# perform the query
if argData.isQuery():
self.__exposedObjectArg = argData.isFlagSet(AM_ExposeTransformCmd.kExposedObjectFlag)
self.__referenceObjectArg = argData.isFlagSet(AM_ExposeTransformCmd.kReferenceObjectFlag)
self.__rotateOrderArg = argData.isFlagSet(AM_ExposeTransformCmd.kRotateOrderFlag)
self.__normalizeArg = argData.isFlagSet(AM_ExposeTransformCmd.kNormalizeFlag)
self.__objectAxisArg = argData.isFlagSet(AM_ExposeTransformCmd.kObjectAxisFlag)
self.__referenceAxisArg = argData.isFlagSet(AM_ExposeTransformCmd.kReferenceAxisFlag)
self.doItQuery()
# set up other arguments and call redoIt() for create or edit mode
else:
# validate the name flag
if argData.isFlagSet(AM_ExposeTransformCmd.kNameFlag):
self.__exposeNodeName = argData.flagArgumentString(AM_ExposeTransformCmd.kNameFlag, 0)
if (len(self.__exposeNodeName) < 1 or self.__exposeNodeName[0].isalpha() is False or len(
re.split('\W+', self.__exposeNodeName)) > 1):
raise Exception(
'%s is not a valid node name. Did you type it correctly?' % self.__exposeNodeName)
# manually specifying an object to expose using an argument will trump selection
if argData.isFlagSet(AM_ExposeTransformCmd.kExposedObjectFlag):
sel = OM.MSelectionList()
sel.add(argData.flagArgumentString(AM_ExposeTransformCmd.kExposedObjectFlag, 0))
sel.getDagPath(0, self.__exposedObjectArg)
if argData.isFlagSet(AM_ExposeTransformCmd.kReferenceObjectFlag):
sel = OM.MSelectionList()
sel.add(argData.flagArgumentString(AM_ExposeTransformCmd.kReferenceObjectFlag, 0))
sel.getDagPath(0, self.__referenceObjectArg)
if argData.isFlagSet(AM_ExposeTransformCmd.kRotateOrderFlag):
rotateOrderStr = argData.flagArgumentString(AM_ExposeTransformCmd.kRotateOrderFlag, 0)
try:
self.__rotateOrderArg = kRotateOrderMapping[rotateOrderStr.lower()]
except:
OM.MGlobal.displayWarning('%s is not a valid rotate order. %s is being used instead.' % (
rotateOrderStr, kRotateOrderMapping[self.__rotateOrderArg]))
else:
self.__rotateOrderArg = None
if argData.isFlagSet(AM_ExposeTransformCmd.kNormalizeFlag):
self.__normalizeArg = argData.flagArgumentBool(AM_ExposeTransformCmd.kNormalizeFlag, 0)
else:
self.__normalizeArg = None
if argData.isFlagSet(AM_ExposeTransformCmd.kObjectAxisFlag):
self.__objectAxisArg = OM.MVector(
argData.flagArgumentDouble(AM_ExposeTransformCmd.kObjectAxisFlag, 0),
argData.flagArgumentDouble(AM_ExposeTransformCmd.kObjectAxisFlag, 1),
argData.flagArgumentDouble(AM_ExposeTransformCmd.kObjectAxisFlag, 2))
if argData.isFlagSet(AM_ExposeTransformCmd.kReferenceAxisFlag):
self.__referenceAxisArg = OM.MVector(
argData.flagArgumentDouble(AM_ExposeTransformCmd.kReferenceAxisFlag, 0),
argData.flagArgumentDouble(AM_ExposeTransformCmd.kReferenceAxisFlag, 1),
argData.flagArgumentDouble(AM_ExposeTransformCmd.kReferenceAxisFlag, 2))
# set the isEditUsed flag only after all arguments have been stored to ensure command is not added to queue before it has done anything
self.__isEditUsed = argData.isEdit()
self.__isQueryUsed = False
self.redoIt()
def doItQuery(self):
# query mode typically only supports one argument at a time
# this principle ensures that the result will be given in a way that can be stored in a variable
path = OM.MDagPath()
doubleArray = OM.MScriptUtil()
if self.__exposedObjectArg:
plug = OM.MPlug(self.__exposeNodeFn.findPlug(AM_ExposeTransformNode.kObjectMatrixAttrName, True))
iter = OM.MItDependencyGraph(plug, OM.MFn.kTransform, OM.MItDependencyGraph.kUpstream)
while not iter.isDone():
OM.MDagPath.getAPathTo(iter.currentItem(), path)
iter.next()
self.setResult(path.partialPathName())
elif self.__referenceObjectArg:
plug = OM.MPlug(self.__exposeNodeFn.findPlug(AM_ExposeTransformNode.kReferenceMatrixAttrName, True))
iter = OM.MItDependencyGraph(plug, OM.MFn.kTransform, OM.MItDependencyGraph.kUpstream)
while not iter.isDone():
OM.MDagPath.getAPathTo(iter.currentItem(), path)
iter.next()
self.setResult(path.partialPathName())
elif self.__rotateOrderArg:
self.setResult(
OM.MPlug(self.__exposeNodeFn.findPlug(AM_ExposeTransformNode.kRotateOrderAttrName, True)).asInt())
elif self.__normalizeArg:
self.setResult(
OM.MPlug(self.__exposeNodeFn.findPlug(AM_ExposeTransformNode.kNormalizeAttrName, True)).asBool())
elif self.__objectAxisArg:
doubleArray.createFromDouble(
OM.MPlug(
self.__exposeNodeFn.findPlug('%s0' % AM_ExposeTransformNode.kObjectAxisAttrName, True)).asDouble(),
OM.MPlug(
self.__exposeNodeFn.findPlug('%s1' % AM_ExposeTransformNode.kObjectAxisAttrName, True)).asDouble(),
OM.MPlug(
self.__exposeNodeFn.findPlug('%s2' % AM_ExposeTransformNode.kObjectAxisAttrName, True)).asDouble())
self.setResult(OM.MDoubleArray(doubleArray.asDoublePtr(), 3))
elif self.__referenceAxisArg:
doubleArray.createFromDouble(
OM.MPlug(self.__exposeNodeFn.findPlug('%s0' % AM_ExposeTransformNode.kReferenceAxisAttrName,
True)).asDouble(),
OM.MPlug(self.__exposeNodeFn.findPlug('%s1' % AM_ExposeTransformNode.kReferenceAxisAttrName,
True)).asDouble(),
OM.MPlug(self.__exposeNodeFn.findPlug('%s2' % AM_ExposeTransformNode.kReferenceAxisAttrName,
True)).asDouble())
self.setResult(OM.MDoubleArray(doubleArray.asDoublePtr(), 3))
def redoIt(self):
# clear out the modifier so it doesn't accumulate old object names
self.__dgModify = OM.MDGModifier()
# create a new node if the command is in create mode
if not self.__isEditUsed:
self.__exposeNodeArg = OM.MObject(self.__dgModify.createNode(AM_ExposeTransformNode.kPluginNodeId))
self.__exposeNodeFn.setObject(self.__exposeNodeArg)
fn = OM.MFnDagNode(self.__exposedObjectArg)
self.__dgModify.renameNode(self.__exposeNodeArg,
'%s_exposeTransform' % fn.name()) # use fn.name() instead of partialPathName(), as the latter may contain invalid characters
# assign the -name argument if provided
if len(self.__exposeNodeName) > 0:
self.__dgModify.renameNode(self.__exposeNodeArg, self.__exposeNodeName)
# WARNING: must tell the DGModifier to doIt() now in order to let Maya's auto-rename kick in and ensure the name is unique
# otherwise attempts to use commandToExecute below may end up using some other object
self.__dgModify.doIt()
# set the attributes on the node
plug = OM.MPlug()
if self.__exposedObjectArg.isValid():
# connect the exposed object's worldMatrix attribute
self.__dgModify.commandToExecute('connectAttr -f %s.worldMatrix %s.%s' % (
self.__exposedObjectArg.fullPathName(), self.__exposeNodeFn.name(),
AM_ExposeTransformNode.kObjectMatrixAttrName))
if self.__referenceObjectArg.isValid():
# connect the reference object's worldMatrix attribute
self.__dgModify.commandToExecute('connectAttr -f %s.worldMatrix %s.%s' % (
self.__referenceObjectArg.fullPathName(), self.__exposeNodeFn.name(),
AM_ExposeTransformNode.kReferenceMatrixAttrName))
if self.__rotateOrderArg is not None:
# set the rotateOrder attribute if it is not connected
plug = self.__exposeNodeFn.findPlug(AM_ExposeTransformNode.kRotateOrderAttrName, True)
if not plug.isConnected():
self.__dgModify.commandToExecute('setAttr %s.%s %i' % (
self.__exposeNodeFn.name(), AM_ExposeTransformNode.kRotateOrderAttrName,
self.__rotateOrderArg))
if self.__normalizeArg is not None:
plug = self.__exposeNodeFn.findPlug(AM_ExposeTransformNode.kNormalizeAttrName, True)
if not plug.isConnected():
self.__dgModify.commandToExecute('setAttr %s.%s %s' % (
self.__exposeNodeFn.name(), AM_ExposeTransformNode.kNormalizeAttrName,
self.__normalizeArg.__str__().lower()))
if self.__objectAxisArg:
# set the objectAxis attribute if it is not connected
plug = self.__exposeNodeFn.findPlug(AM_ExposeTransformNode.kObjectAxisAttrName, True)
if not plug.isConnected():
self.__dgModify.commandToExecute('setAttr %s.%s %f %f %f' % (
self.__exposeNodeFn.name(), AM_ExposeTransformNode.kObjectAxisAttrName,
self.__objectAxisArg.x, self.__objectAxisArg.y, self.__objectAxisArg.z))
if self.__referenceAxisArg:
# set the refAxis attribute if it is not connected
plug = self.__exposeNodeFn.findPlug(AM_ExposeTransformNode.kReferenceAxisAttrName, True)
if not plug.isConnected():
self.__dgModify.commandToExecute('setAttr %s.%s %f %f %f' % (
self.__exposeNodeFn.name(), AM_ExposeTransformNode.kReferenceAxisAttrName,
self.__referenceAxisArg.x, self.__referenceAxisArg.y, self.__referenceAxisArg.z))
# following Maya convention, select the newly created node if the command is in create mode
if not self.__isEditUsed:
self.__dgModify.commandToExecute('select %s' % self.__exposeNodeFn.name())
self.__dgModify.doIt()
self.setResult(self.__exposeNodeFn.name())
def undoIt(self):
self.__dgModify.undoIt()
def isUndoable(self):
# the command should only be undoable if edit or create mode was used
return not self.__isQueryUsed
@classmethod
def cmdCreator(cls):
return OMMPx.asMPxPtr(cls())
@classmethod
def syntaxCreator(cls):
syntax = OM.MSyntax()
syntax.enableQuery() # BUG: including these modes has benefits, but it also breaks built-in object parsing
syntax.enableEdit()
syntax.addFlag(cls.kNameFlag, cls.kNameFlagLong, OM.MSyntax.kString)
syntax.addFlag(cls.kExposedObjectFlag, cls.kExposedObjectFlagLong, OM.MSyntax.kSelectionItem)
syntax.addFlag(cls.kReferenceObjectFlag, cls.kReferenceObjectFlagLong, OM.MSyntax.kSelectionItem)
syntax.addFlag(cls.kRotateOrderFlag, cls.kRotateOrderFlagLong, OM.MSyntax.kString)
syntax.addFlag(cls.kNormalizeFlag, cls.kNormalizeFlagLong, OM.MSyntax.kBoolean)
syntax.addFlag(cls.kObjectAxisFlag, cls.kObjectAxisFlagLong, OM.MSyntax.kDouble, OM.MSyntax.kDouble,
OM.MSyntax.kDouble)
syntax.addFlag(cls.kReferenceAxisFlag, cls.kReferenceAxisFlagLong, OM.MSyntax.kDouble, OM.MSyntax.kDouble,
OM.MSyntax.kDouble)
syntax.useSelectionAsDefault(True)
syntax.setObjectType(OM.MSyntax.kSelectionList)
return syntax
# -----------------------------------------------------------------------------
# Node Definition
# -----------------------------------------------------------------------------
class AM_ExposeTransformNode(OMMPx.MPxNode):
"""
A node to expose transform information about one transform node with
respect to another.
\par Input Attributes:
- \em object: The worldMatrix attribute of the object being exposed.
- \em reference: The worldMatrix attribute of the reference object.
- \em rotateOrder: The rotation order for Euler output.
- \em axis: An axis on the exposed object to use for dot and angle.
- \em refAxis: An axis on the reference to use for dot and angle.
\par Output Attributes:
- \em position: Position of the exposed object relative to the
reference.
- \em distance: Distance between the exposed and reference objects.
- \em rotation: Euler rotation of the exposed object in the space of
the reference.
- \em dot: Dot-product of the specified axes on the exposed object and
the reference.
- \em angle: Angle between the specified axes on the exposed object and
the reference.
- \em dotToTarget: Dot-product of the specified axis on the exposed
object and the direction to the reference.
- \em angleToTarget: Angle between the specified axis on the exposed
object and the direction to the reference.
"""
## the name of the nodeType
kPluginNodeTypeName = 'am_exposeTransform'
## the unique MTypeId for the node
kPluginNodeId = OM.MTypeId(0x001138C2)
# input attributes
## rotation order for Euler output
rotateOrder = OM.MObject()
kRotateOrderAttrName = 'rotateOrder'
kRotateOrderAttrLongName = 'eulerOutputRotateOrder'
## should incoming axes be normalized for computing dot products?
normalize = OM.MObject()
kNormalizeAttrName = 'normalize'
kNormalizeAttrLongName = 'nomalizeAxes'
## axis on the object for computing dot product and angle
objectAxis = OM.MObject()
kObjectAxisAttrName = 'axis'
kObjectAxisAttrLongName = 'objectAxis'
## axis on the reference object for computing dot product and angle
referenceAxis = OM.MObject()
kReferenceAxisAttrName = 'refAxis'
kReferenceAxisAttrLongName = 'referenceAxis'
## worldMatrix of the object
objectMatrix = OM.MObject()
kObjectMatrixAttrName = 'object'
kObjectMatrixAttrLongName = 'objectWorldMatrix'
## worldMatrix of the reference object
referenceMatrix = OM.MObject()
kReferenceMatrixAttrName = 'reference'
kReferenceMatrixAttrLongName = 'referenceWorldMatrix'
# output attributes
## position of the object with respect to the reference
position = OM.MObject()
kPositionAttrName = 'position'
kPositionAttrLongName = 'position'
## distance between the two objects
distance = OM.MObject()
kDistanceAttrName = 'distance'
kDistanceAttrLongName = 'distance'
## rotation of the object with respect to the reference
rotation = OM.MObject()
kRotationAttrName = 'rotation'
kRotationAttrLongName = 'eulerRotation'
## dot product of specified axes on object and reference
dot = OM.MObject()
kDotAttrName = 'dotProduct'
kDotAttrLongName = 'dot'
## angle between specified axes on object and reference
angle = OM.MObject()
kAngleAttrName = 'angle'
kAngleAttrLongName = 'angle'
## dot product of specified axes on object and direction to reference
dotToTarget = OM.MObject()
kDotToTargetAttrName = 'dotTo'
kDotToTargetAttrLongName = 'dotToTarget'
## angle between specified axes on object and direction to reference
angleToTarget = OM.MObject()
kAngleToTargetAttrName = 'angleToTarget'
kAngleToTargetAttrLongName = 'angleTo'
def __init__(self):
OMMPx.MPxNode.__init__(self)
def compute(self, plug, dataBlock):
"""Compute an exposed object's transformations with respect to a reference object."""
if (plug == AM_ExposeTransformNode.position or
(
plug.isChild() and plug.parent() == AM_ExposeTransformNode.position) or # WARNING: without this, position always initializes to 0, 0, 0 when connection is made
plug == AM_ExposeTransformNode.distance or
plug == AM_ExposeTransformNode.rotation or
(
plug.isChild() and plug.parent() == AM_ExposeTransformNode.rotation) or # WARNING: without this, setting rotateOrder attribute manually won't push a compute()
plug == AM_ExposeTransformNode.dot or
plug == AM_ExposeTransformNode.angle or
plug == AM_ExposeTransformNode.dotToTarget or
plug == AM_ExposeTransformNode.angleToTarget):
# get the incoming data
# rotation order for Euler output
dataHandle = OM.MDataHandle(dataBlock.inputValue(AM_ExposeTransformNode.rotateOrder))
eRotateOrder = dataHandle.asShort()
# should the incoming axes be normalized for computing dot products?
dataHandle = OM.MDataHandle(dataBlock.inputValue(AM_ExposeTransformNode.normalize))
bNormalizeInputAxes = dataHandle.asBool()
# axis on the object for computing dot product and angle
dataHandle = OM.MDataHandle(dataBlock.inputValue(AM_ExposeTransformNode.objectAxis))
vObjectAxis = OM.MVector(dataHandle.asVector())
if bNormalizeInputAxes:
vObjectAxis.normalize()
# axis on the reference object for computing dot product and angle
dataHandle = OM.MDataHandle(dataBlock.inputValue(AM_ExposeTransformNode.referenceAxis))
vReferenceAxis = OM.MVector(dataHandle.asVector())
if bNormalizeInputAxes:
vReferenceAxis.normalize()
# worldMatrix of the object
dataHandle = OM.MDataHandle(dataBlock.inputValue(AM_ExposeTransformNode.objectMatrix))
mObjectMatrix = OM.MMatrix(dataHandle.asMatrix())
# worldMatrix of the reference
dataHandle = OM.MDataHandle(dataBlock.inputValue(AM_ExposeTransformNode.referenceMatrix))
mReferenceMatrix = OM.MMatrix(dataHandle.asMatrix())
# compute the output values
mOutputMatrix = OM.MTransformationMatrix(mObjectMatrix * mReferenceMatrix.inverse())
vOutPosition = OM.MVector(mOutputMatrix.getTranslation(OM.MSpace.kTransform))
vOutRotation = OM.MEulerRotation(mOutputMatrix.eulerRotation().reorder(eRotateOrder))
vObjectAxis *= mObjectMatrix # rotate objectAxis into world space
vReferenceAxis *= mReferenceMatrix # rotate referenceAxis into world space
vToTarget = OM.MVector(
mReferenceMatrix(3, 0) - mObjectMatrix(3, 0),
mReferenceMatrix(3, 1) - mObjectMatrix(3, 1),
mReferenceMatrix(3, 2) - mObjectMatrix(3,
2)) # the vector from the object's axis to the reference object's position
if bNormalizeInputAxes:
vObjectAxis.normalize()
vReferenceAxis.normalize()
vToTarget.normalize()
# set the outgoing plugs
outputHandle = dataBlock.outputValue(AM_ExposeTransformNode.position)
outputHandle.set3Double(vOutPosition.x, vOutPosition.y, vOutPosition.z)
outputHandle = dataBlock.outputValue(AM_ExposeTransformNode.distance)
outputHandle.setDouble(vOutPosition.length())
outputHandle = dataBlock.outputValue(AM_ExposeTransformNode.rotation)
outputHandle.set3Double(math.degrees(vOutRotation.x), math.degrees(vOutRotation.y),
math.degrees(vOutRotation.z))
outputHandle = dataBlock.outputValue(AM_ExposeTransformNode.dot)
outputHandle.setDouble(vObjectAxis * vReferenceAxis)
outputHandle = dataBlock.outputValue(AM_ExposeTransformNode.angle)
outputHandle.setDouble(math.degrees(vObjectAxis.angle(vReferenceAxis)))
outputHandle = dataBlock.outputValue(AM_ExposeTransformNode.dotToTarget)
outputHandle.setDouble(vObjectAxis * vToTarget)
outputHandle = dataBlock.outputValue(AM_ExposeTransformNode.angleToTarget)
outputHandle.setDouble(math.degrees(vObjectAxis.angle(vToTarget)))
dataBlock.setClean(plug)
else:
return OM.kUnknownParameter
# -----------------------------------------------------------------------------
# Node Creator
# -----------------------------------------------------------------------------
@classmethod
def nodeCreator(cls):
return OMMPx.asMPxPtr(cls())
# -----------------------------------------------------------------------------
# Node Initializer
# -----------------------------------------------------------------------------
@classmethod
def nodeInitializer(cls):
# input attributes
# rotation order for Euler output
eAttr = OM.MFnEnumAttribute()
cls.rotateOrder = eAttr.create(cls.kRotateOrderAttrLongName, cls.kRotateOrderAttrName, 0)
field0 = 'xyz'
field1 = 'yzx'
field2 = 'zxy'
field3 = 'xzy'
field4 = 'yxz'
field5 = 'zyx'
eAttr.addField(field0, kRotateOrderMapping[field0.lower()])
eAttr.addField(field1, kRotateOrderMapping[field1.lower()])
eAttr.addField(field2, kRotateOrderMapping[field2.lower()])
eAttr.addField(field3, kRotateOrderMapping[field3.lower()])
eAttr.addField(field4, kRotateOrderMapping[field4.lower()])
eAttr.addField(field5, kRotateOrderMapping[field5.lower()])
eAttr.setWritable(True)
eAttr.setStorable(True)
eAttr.setReadable(True)
eAttr.setKeyable(True)
# should incoming axes be normalized for computing dot products?
nAttr = OM.MFnNumericAttribute()
cls.normalize = nAttr.create(cls.kNormalizeAttrLongName, cls.kNormalizeAttrName, OM.MFnNumericData.kBoolean,
True)
nAttr.setWritable(True)
nAttr.setStorable(True)
nAttr.setReadable(True)
nAttr.setKeyable(True)
# axis on the object for computing dot product and angle
cls.objectAxis = nAttr.create(cls.kObjectAxisAttrLongName, cls.kObjectAxisAttrName, OM.MFnNumericData.k3Double)
nAttr.setWritable(True)
nAttr.setStorable(True)
nAttr.setReadable(True)
nAttr.setKeyable(True)
# axis on the reference object for computing dot product and angle
cls.referenceAxis = nAttr.create(cls.kReferenceAxisAttrLongName, cls.kReferenceAxisAttrName,
OM.MFnNumericData.k3Double)
nAttr.setWritable(True)
nAttr.setStorable(True)
nAttr.setReadable(True)
nAttr.setKeyable(True)
# worldMatrix of the object
mAttr = OM.MFnMatrixAttribute()
cls.objectMatrix = mAttr.create(cls.kObjectMatrixAttrLongName, cls.kObjectMatrixAttrName,
OM.MFnMatrixAttribute.kDouble)
mAttr.setWritable(True)
mAttr.setStorable(True)
mAttr.setReadable(True)
mAttr.setKeyable(False)
mAttr.setHidden(False)
# worldMatrix of the reference object
cls.referenceMatrix = mAttr.create(cls.kReferenceMatrixAttrLongName, cls.kReferenceMatrixAttrName,
OM.MFnMatrixAttribute.kDouble)
mAttr.setWritable(True)
mAttr.setStorable(True)
mAttr.setReadable(True)
mAttr.setKeyable(False)
mAttr.setHidden(False)
# output attributes
# position of the object with respect to the reference
nAttr = OM.MFnNumericAttribute()
cls.position = nAttr.create(cls.kPositionAttrLongName, cls.kPositionAttrName, OM.MFnNumericData.k3Double)
nAttr.setWritable(True)
nAttr.setStorable(False)
nAttr.setReadable(True)
nAttr.setChannelBox(True)
# distance between the two objects
cls.distance = nAttr.create(cls.kDistanceAttrLongName, cls.kDistanceAttrName, OM.MFnNumericData.kDouble)
nAttr.setWritable(True)
nAttr.setStorable(False)
nAttr.setReadable(True)
nAttr.setChannelBox(True)
# rotation of the object with respect to the reference
cls.rotation = nAttr.create(cls.kRotationAttrLongName, cls.kRotationAttrName, OM.MFnNumericData.k3Double)
nAttr.setWritable(True)
nAttr.setStorable(False)
nAttr.setReadable(True)
nAttr.setChannelBox(True)
# dot product of specified axes on object and reference
cls.dot = nAttr.create(cls.kDotAttrLongName, cls.kDotAttrName, OM.MFnNumericData.kDouble)
nAttr.setWritable(True)
nAttr.setStorable(False)
nAttr.setReadable(True)
nAttr.setChannelBox(True)
# angle between specified axes on object and reference
cls.angle = nAttr.create(cls.kAngleAttrLongName, cls.kAngleAttrName, OM.MFnNumericData.kDouble)
nAttr.setWritable(True)
nAttr.setStorable(False)
nAttr.setReadable(True)
nAttr.setChannelBox(True)
# dot product of specified axes on object and direction to reference
cls.dotToTarget = nAttr.create(cls.kDotToTargetAttrLongName, cls.kDotToTargetAttrName,
OM.MFnNumericData.kDouble)
nAttr.setWritable(True)
nAttr.setStorable(False)
nAttr.setReadable(True)
nAttr.setChannelBox(True)
# angle between specified axes on object and direction to reference
cls.angleToTarget = nAttr.create(cls.kAngleToTargetAttrLongName, cls.kAngleToTargetAttrName,
OM.MFnNumericData.kDouble)
nAttr.setWritable(True)
nAttr.setStorable(False)
nAttr.setReadable(True)
nAttr.setChannelBox(True)
# add the attributes
cls.addAttribute(cls.objectMatrix)
cls.addAttribute(cls.referenceMatrix)
cls.addAttribute(cls.position)
cls.addAttribute(cls.distance)
cls.addAttribute(cls.rotateOrder)
cls.addAttribute(cls.rotation)
cls.addAttribute(cls.normalize)
cls.addAttribute(cls.objectAxis)
cls.addAttribute(cls.referenceAxis)
cls.addAttribute(cls.dot)
cls.addAttribute(cls.angle)
cls.addAttribute(cls.dotToTarget)
cls.addAttribute(cls.angleToTarget)
# establish effects on position output
cls.attributeAffects(cls.objectMatrix, cls.position)
cls.attributeAffects(cls.referenceMatrix, cls.position)
# establish effects on distance output
cls.attributeAffects(cls.objectMatrix, cls.distance)
cls.attributeAffects(cls.referenceMatrix, cls.distance)
# establish effects on rotation output
cls.attributeAffects(cls.objectMatrix, cls.rotation)
cls.attributeAffects(cls.referenceMatrix, cls.rotation)
cls.attributeAffects(cls.rotateOrder, cls.rotation)
# establish effects on dot product output
cls.attributeAffects(cls.objectMatrix, cls.dot)
cls.attributeAffects(cls.referenceMatrix, cls.dot)
cls.attributeAffects(cls.normalize, cls.dot)
cls.attributeAffects(cls.objectAxis, cls.dot)
cls.attributeAffects(cls.referenceAxis, cls.dot)
# establish effects on angle output
cls.attributeAffects(cls.objectMatrix, cls.angle)
cls.attributeAffects(cls.referenceMatrix, cls.angle)
cls.attributeAffects(cls.objectAxis, cls.angle)
cls.attributeAffects(cls.referenceAxis, cls.angle)
# establish effects on dot product to target output
cls.attributeAffects(cls.objectMatrix, cls.dotToTarget)
cls.attributeAffects(cls.referenceMatrix, cls.dotToTarget)
cls.attributeAffects(cls.normalize, cls.dotToTarget)
cls.attributeAffects(cls.objectAxis, cls.dotToTarget)
cls.attributeAffects(cls.referenceAxis, cls.dotToTarget)
# establish effects on angle to target output
cls.attributeAffects(cls.objectMatrix, cls.angleToTarget)
cls.attributeAffects(cls.referenceMatrix, cls.angleToTarget)
cls.attributeAffects(cls.objectAxis, cls.angleToTarget)
cls.attributeAffects(cls.referenceAxis, cls.angleToTarget)
# -----------------------------------------------------------------------------
# Initialize
# -----------------------------------------------------------------------------
def initializePlugin(mobject):
plugin = OMMPx.MFnPlugin(mobject, 'Adam Mechtley', kVersionNumber, 'Any')
# dependency node
try:
plugin.registerNode(AM_ExposeTransformNode.kPluginNodeTypeName, AM_ExposeTransformNode.kPluginNodeId,
AM_ExposeTransformNode.nodeCreator, AM_ExposeTransformNode.nodeInitializer)
except:
sys.stderr.write('Failed to register node: %s\n' % AM_ExposeTransformNode.kPluginNodeTypeName)
raise
# command
try:
plugin.registerCommand(AM_ExposeTransformCmd.kPluginCmdName, AM_ExposeTransformCmd.cmdCreator,
AM_ExposeTransformCmd.syntaxCreator)
except:
sys.stderr.write('Failed to register command: %s\n' % AM_ExposeTransformCmd.kPluginCmdName)
raise
# -----------------------------------------------------------------------------
# Uninitialize
# -----------------------------------------------------------------------------
def uninitializePlugin(mobject):
plugin = OMMPx.MFnPlugin(mobject)
# dependency node
try:
plugin.deregisterNode(AM_ExposeTransformNode.kPluginNodeId)
except:
sys.stderr.write('Failed to unregister node: %s\n' % AM_ExposeTransformNode.kPluginNodeTypeName)
raise
# command
try:
plugin.deregisterCommand(AM_ExposeTransformCmd.kPluginCmdName)
except:
sys.stderr.write('Failed to unregister command: %s\n' % AM_ExposeTransformCmd.kPluginCmdName)
raise
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/named-explicit-paths/named-explicit-path/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to named explicit
paths
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__name",
"__sid_selection_mode",
"__sid_protection_required",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
self.__sid_selection_mode = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ADJ_SID_ONLY": {}, "MIXED_MODE": {}},
),
default=six.text_type("MIXED_MODE"),
is_leaf=True,
yang_name="sid-selection-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=True,
)
self.__sid_protection_required = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="sid-protection-required",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"named-explicit-paths",
"named-explicit-path",
"config",
]
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/name (string)
YANG Description: A string name that uniquely identifies an explicit
path
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: A string name that uniquely identifies an explicit
path
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """name must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=True)""",
}
)
self.__name = t
if hasattr(self, "_set"):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
def _get_sid_selection_mode(self):
"""
Getter method for sid_selection_mode, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/sid_selection_mode (enumeration)
YANG Description: The restrictions placed on the SIDs to be selected by the
calculation method for the explicit path when it is
instantiated for a SR-TE LSP
"""
return self.__sid_selection_mode
def _set_sid_selection_mode(self, v, load=False):
"""
Setter method for sid_selection_mode, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/sid_selection_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_selection_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_selection_mode() directly.
YANG Description: The restrictions placed on the SIDs to be selected by the
calculation method for the explicit path when it is
instantiated for a SR-TE LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ADJ_SID_ONLY": {}, "MIXED_MODE": {}},
),
default=six.text_type("MIXED_MODE"),
is_leaf=True,
yang_name="sid-selection-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_selection_mode must be of a type compatible with enumeration""",
"defined-type": "openconfig-network-instance:enumeration",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ADJ_SID_ONLY': {}, 'MIXED_MODE': {}},), default=six.text_type("MIXED_MODE"), is_leaf=True, yang_name="sid-selection-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)""",
}
)
self.__sid_selection_mode = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_selection_mode(self):
self.__sid_selection_mode = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ADJ_SID_ONLY": {}, "MIXED_MODE": {}},
),
default=six.text_type("MIXED_MODE"),
is_leaf=True,
yang_name="sid-selection-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=True,
)
def _get_sid_protection_required(self):
"""
Getter method for sid_protection_required, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/sid_protection_required (boolean)
YANG Description: When this value is set to true, only SIDs that are
protected are to be selected by the calculating method
when the explicit path is instantiated by a SR-TE LSP.
"""
return self.__sid_protection_required
def _set_sid_protection_required(self, v, load=False):
"""
Setter method for sid_protection_required, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/sid_protection_required (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_protection_required is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_protection_required() directly.
YANG Description: When this value is set to true, only SIDs that are
protected are to be selected by the calculating method
when the explicit path is instantiated by a SR-TE LSP.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="sid-protection-required",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_protection_required must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="sid-protection-required", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__sid_protection_required = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_protection_required(self):
self.__sid_protection_required = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="sid-protection-required",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
name = __builtin__.property(_get_name, _set_name)
sid_selection_mode = __builtin__.property(
_get_sid_selection_mode, _set_sid_selection_mode
)
sid_protection_required = __builtin__.property(
_get_sid_protection_required, _set_sid_protection_required
)
_pyangbind_elements = OrderedDict(
[
("name", name),
("sid_selection_mode", sid_selection_mode),
("sid_protection_required", sid_protection_required),
]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/named-explicit-paths/named-explicit-path/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to named explicit
paths
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__name",
"__sid_selection_mode",
"__sid_protection_required",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
self.__sid_selection_mode = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ADJ_SID_ONLY": {}, "MIXED_MODE": {}},
),
default=six.text_type("MIXED_MODE"),
is_leaf=True,
yang_name="sid-selection-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=True,
)
self.__sid_protection_required = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="sid-protection-required",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"named-explicit-paths",
"named-explicit-path",
"config",
]
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/name (string)
YANG Description: A string name that uniquely identifies an explicit
path
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: A string name that uniquely identifies an explicit
path
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """name must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=True)""",
}
)
self.__name = t
if hasattr(self, "_set"):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
def _get_sid_selection_mode(self):
"""
Getter method for sid_selection_mode, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/sid_selection_mode (enumeration)
YANG Description: The restrictions placed on the SIDs to be selected by the
calculation method for the explicit path when it is
instantiated for a SR-TE LSP
"""
return self.__sid_selection_mode
def _set_sid_selection_mode(self, v, load=False):
"""
Setter method for sid_selection_mode, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/sid_selection_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_selection_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_selection_mode() directly.
YANG Description: The restrictions placed on the SIDs to be selected by the
calculation method for the explicit path when it is
instantiated for a SR-TE LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ADJ_SID_ONLY": {}, "MIXED_MODE": {}},
),
default=six.text_type("MIXED_MODE"),
is_leaf=True,
yang_name="sid-selection-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_selection_mode must be of a type compatible with enumeration""",
"defined-type": "openconfig-network-instance:enumeration",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ADJ_SID_ONLY': {}, 'MIXED_MODE': {}},), default=six.text_type("MIXED_MODE"), is_leaf=True, yang_name="sid-selection-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)""",
}
)
self.__sid_selection_mode = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_selection_mode(self):
self.__sid_selection_mode = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ADJ_SID_ONLY": {}, "MIXED_MODE": {}},
),
default=six.text_type("MIXED_MODE"),
is_leaf=True,
yang_name="sid-selection-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=True,
)
def _get_sid_protection_required(self):
"""
Getter method for sid_protection_required, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/sid_protection_required (boolean)
YANG Description: When this value is set to true, only SIDs that are
protected are to be selected by the calculating method
when the explicit path is instantiated by a SR-TE LSP.
"""
return self.__sid_protection_required
def _set_sid_protection_required(self, v, load=False):
"""
Setter method for sid_protection_required, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/named_explicit_paths/named_explicit_path/config/sid_protection_required (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_protection_required is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_protection_required() directly.
YANG Description: When this value is set to true, only SIDs that are
protected are to be selected by the calculating method
when the explicit path is instantiated by a SR-TE LSP.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="sid-protection-required",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_protection_required must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="sid-protection-required", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__sid_protection_required = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_protection_required(self):
self.__sid_protection_required = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="sid-protection-required",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
name = __builtin__.property(_get_name, _set_name)
sid_selection_mode = __builtin__.property(
_get_sid_selection_mode, _set_sid_selection_mode
)
sid_protection_required = __builtin__.property(
_get_sid_protection_required, _set_sid_protection_required
)
_pyangbind_elements = OrderedDict(
[
("name", name),
("sid_selection_mode", sid_selection_mode),
("sid_protection_required", sid_protection_required),
]
)
|
|
import json
import unittest
from binascii import b2a_hex
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.utils.six.moves import range
from ..test_data import TestDataMixin
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
if HAS_GDAL:
from django.contrib.gis.gdal import (OGRGeometry, OGRGeomType,
GDALException, OGRIndexError, SpatialReference, CoordTransform,
GDAL_VERSION)
@skipUnless(HAS_GDAL, "GDAL is required")
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
OGRGeomType(1)
OGRGeomType(7)
OGRGeomType('point')
OGRGeomType('GeometrycollectioN')
OGRGeomType('LINearrING')
OGRGeomType('Unknown')
# Should throw TypeError on this input
with self.assertRaises(GDALException):
OGRGeomType(23)
with self.assertRaises(GDALException):
OGRGeomType('fooD')
with self.assertRaises(GDALException):
OGRGeomType(9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(OGRGeomType(1), OGRGeomType(1))
self.assertEqual(OGRGeomType(7), 'GeometryCollection')
self.assertEqual(OGRGeomType('point'), 'POINT')
self.assertNotEqual(OGRGeomType('point'), 2)
self.assertEqual(OGRGeomType('unknown'), 0)
self.assertEqual(OGRGeomType(6), 'MULtiPolyGON')
self.assertEqual(OGRGeomType(1), OGRGeomType('point'))
self.assertNotEqual(OGRGeomType('POINT'), OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Geometry').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertIsNone(OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertEqual(OGRGeomType(wkb25bit + 1), 'Point25D')
self.assertEqual(OGRGeomType('MultiLineString25D'), (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex.encode(), geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex.encode())
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test_json(self):
"Testing GeoJSON input/output."
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
# Test input with some garbage content (but valid json) (#15529)
geom = OGRGeometry('{"type": "Point", "coordinates": [ 100.0, 0.0 ], "other": "<test>"}')
self.assertIsInstance(geom, OGRGeometry)
def test_points(self):
"Testing Point objects."
OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(linestr, OGRGeometry(ls.wkt))
self.assertNotEqual(linestr, prev)
with self.assertRaises(OGRIndexError):
linestr.__getitem__(len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(mlinestr, OGRGeometry(mls.wkt))
self.assertNotEqual(mlinestr, prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
with self.assertRaises(OGRIndexError):
mlinestr.__getitem__(len(mlinestr))
def test_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
# self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(lr, OGRGeometry(rr.wkt))
self.assertNotEqual(lr, prev)
prev = lr
def test_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = OGRGeometry.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(poly, OGRGeometry(p.wkt))
self.assertNotEqual(poly, prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
with self.assertRaises(GDALException):
poly.centroid
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test_multipolygons(self):
"Testing MultiPolygon objects."
OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
with self.assertRaises(OGRIndexError):
mpoly.__getitem__(len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolygon after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
# srs/srid may be assigned their own values, even when srs is None.
mpoly = OGRGeometry(mp.wkt, srs=None)
mpoly.srs = mpoly.srs
mpoly.srid = mpoly.srid
def test_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertTrue(a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
with self.assertRaises(GDALException):
mp.add(pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3):
self.assertEqual(mpoly, tmp)
def test_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
# Testing binary predicates, `assertIs` is used to check that bool is returned.
def test_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertIsNotNone(OGRGeometry('POINT(0 0)'))
self.assertNotEqual(OGRGeometry('LINESTRING(0 0, 1 1)'), 3)
def test_contains(self):
self.assertIs(OGRGeometry('POINT(0 0)').contains(OGRGeometry('POINT(0 0)')), True)
self.assertIs(OGRGeometry('POINT(0 0)').contains(OGRGeometry('POINT(0 1)')), False)
def test_crosses(self):
self.assertIs(OGRGeometry('LINESTRING(0 0, 1 1)').crosses(OGRGeometry('LINESTRING(0 1, 1 0)')), True)
self.assertIs(OGRGeometry('LINESTRING(0 0, 0 1)').crosses(OGRGeometry('LINESTRING(1 0, 1 1)')), False)
def test_disjoint(self):
self.assertIs(OGRGeometry('LINESTRING(0 0, 1 1)').disjoint(OGRGeometry('LINESTRING(0 1, 1 0)')), False)
self.assertIs(OGRGeometry('LINESTRING(0 0, 0 1)').disjoint(OGRGeometry('LINESTRING(1 0, 1 1)')), True)
def test_equals(self):
self.assertIs(OGRGeometry('POINT(0 0)').contains(OGRGeometry('POINT(0 0)')), True)
self.assertIs(OGRGeometry('POINT(0 0)').contains(OGRGeometry('POINT(0 1)')), False)
def test_intersects(self):
self.assertIs(OGRGeometry('LINESTRING(0 0, 1 1)').intersects(OGRGeometry('LINESTRING(0 1, 1 0)')), True)
self.assertIs(OGRGeometry('LINESTRING(0 0, 0 1)').intersects(OGRGeometry('LINESTRING(1 0, 1 1)')), False)
def test_overlaps(self):
self.assertIs(
OGRGeometry('POLYGON ((0 0, 0 2, 2 2, 2 0, 0 0))').overlaps(
OGRGeometry('POLYGON ((1 1, 1 5, 5 5, 5 1, 1 1))')
), True
)
self.assertIs(OGRGeometry('POINT(0 0)').overlaps(OGRGeometry('POINT(0 1)')), False)
def test_touches(self):
self.assertIs(
OGRGeometry('POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))').touches(OGRGeometry('LINESTRING(0 2, 2 0)')), True
)
self.assertIs(OGRGeometry('POINT(0 0)').touches(OGRGeometry('POINT(0 1)')), False)
def test_within(self):
self.assertIs(
OGRGeometry('POINT(0.5 0.5)').within(OGRGeometry('POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))')), True
)
self.assertIs(OGRGeometry('POINT(0 0)').within(OGRGeometry('POINT(0 1)')), False)
|
|
import logging
import signal
import os
import sys
import time
from datetime import datetime
#project
from utils.pidfile import PidFile
from utils.daemon import Daemon
from config import HealthCheckConfig
from config import getconfigpath
from config import getpiddir
from config import getpidname
from healthcheckreporter import HealthcheckReporter
from healthchecklogging import initializeLogging
os.umask(022)
#PATH
AGENT_DIR=os.path.dirname(os.path.abspath(__file__))
DEFAULT_CONFIG_FILE='config.cfg'
PID_NAME = __file__
PID_DIR = AGENT_DIR
CONFIG_FILE=AGENT_DIR + '/' + DEFAULT_CONFIG_FILE
#CONSTANTS
DEFAUTL_LOGGING_LEVEL=logging.INFO
DEFAULT_WAIT_BETWEEN_TASKS=30 #seconds
DEFAULT_WAIT_TIME_BEFORE_KILL=1*60 #1 minute
START_COMMANDS = ['start', 'restart']
#global
log = logging.getLogger(__name__)
def disableLogging():
logging.getLogger("utils").setLevel(logging.WARNING)
#logging.getLogger("config").setLevel(logging.WARNING)
#disableLogging()
DEFAULT_CHECK_INTERVAL=60
DEFAULT_CHECK_FREQUENCY=1
class HealthcheckAgent(Daemon):
log = logging.getLogger('HealthCheckAgent')
def __init__(self, pidfile):
Daemon.__init__(self, pidfile)
self.run_forever = True
self.start_event=True
self.host=''
self.config=None
self.healthcheckreporter = None
self.check_interval = DEFAULT_CHECK_INTERVAL
self.check_frequency = DEFAULT_CHECK_FREQUENCY
self.configfile=AGENT_DIR + '/' + DEFAULT_CONFIG_FILE
#self.host=get_hostname()
def _handle_sigterm(self, signum, frame):
"""Handles SIGTERM and SIGINT, which gracefully stops the agent."""
log = logging.getLogger('HealthCheckAgent._handle_sigterm()')
if self.start_event:
log.info("Caught sigterm. Stopping run loop.")
#log.debug("Parent Process id is: %s" % (super(Daemon, self).pid()))
self.run_forever = False
self.start_event = False
#self.healthcheckreporter.stop()
if self.healthcheckreporter.isRunning():
t_end = time.time() + DEFAULT_WAIT_TIME_BEFORE_KILL #One minutes
while time.time() < t_end:
if self.healthcheckreporter.isRunning():
t_left = t_end - time.time()
log.debug("Healthcheck Reporter thread is running")
log.debug("Waiting.. Time left %d of %d seconds" % (t_left,DEFAULT_WAIT_TIME_BEFORE_KILL) )
time.sleep(5) #Sleep for 5 seconds
continue
else:
self.healthcheckreporter.stop()
log.debug("Healthcheck Reporter thread stopped")
break
log.debug("Timed out waiting for healthcheck reporter thread to finish")
else:
log.debug("Healthcheck Reporter thread stopped")
log.info("Exiting. Bye bye.")
raise SystemExit
else:
log.debug("Stop already in progress")
@classmethod
def info(cls, verbose=None):
logging.getLogger().setLevel(logging.ERROR)
return "Info"
def run(self):
log = logging.getLogger("HealthCheckAgent.run()")
"""Main loop of the healthcheck"""
# Gracefully exit on sigterm
signal.signal(signal.SIGTERM, self._handle_sigterm)
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
initializeLogging()
config=getconfigpath()
#Call function to Initialize logging
if config:
log.info("configuration file %s" % os.path.abspath(config))
self.config=HealthCheckConfig(config)
if self.config.isValid():
log.info("Valid configuration file")
self.healthcheckreporter=HealthcheckReporter(self.config)
else:
log.info("Invalid Configuration file %s" % os.path.abspath(config))
else:
log.info("Configuration File is missing")
if self.healthcheckreporter:
try:
log.info("HealthCheck started at %s" % str(datetime.now()))
start_time=time.time()
self.healthcheckreporter.run()
total_time=time.time()-start_time
log.info("HealthCheck finished at %s" % str(datetime.now()))
log.info("HealthCheck took %s seconds to complete" % total_time)
log.info("Sending message started %s" % str(datetime.now()))
start_time=time.time()
self.healthcheckreporter.send()
total_time=time.time()-start_time
log.info("Send message call finished at %s" % str(datetime.now()))
log.info("Send message took %s seconds to complete" % total_time)
self.healthcheckreporter.running=False
except (KeyboardInterrupt, SystemExit):
self.healthcheckreporter.running=False
finally:
self.healthcheckreporter.running=False
else:
log.debug("Reporter class not initialized")
# Explicitly kill the process, because it might be running as a daemon.
log.info("Exiting. Bye bye.")
sys.exit(0)
def main(argv):
log = logging.getLogger('healthcheck')
COMMANDS_AGENT = [
'start',
'stop',
'restart',
'status'
]
COMMANDS_NO_AGENT = [
'info',
'check',
'configcheck',
'emailcheck'
]
COMMANDS = COMMANDS_AGENT + COMMANDS_NO_AGENT
if len(sys.argv[1:]) < 1:
sys.stderr.write("Usage: %s %s\n" % (sys.argv[0], "|".join(COMMANDS)))
return 2
command = sys.argv[1]
if command not in COMMANDS:
sys.stderr.write("Unknown command: %s\n" % command)
return 3
if command in COMMANDS_AGENT:
#log.debug(CONFIG_FILE)
#Initialize Agent
hcagent = HealthcheckAgent(PidFile(getpidname(), getpiddir()).get_path())
if command in START_COMMANDS:
#log.info('Healthcheck Agent version 1.0')
pass
if 'start' == command:
hcagent.start()
#agent.start()
elif 'stop' == command:
hcagent.stop()
#agent.stop()
elif 'restart' == command:
hcagent.restart()
elif 'status' == command:
hcagent.status()
elif 'info' == command:
return "Health Check Version: 1.0"
elif 'configcheck' == command or 'configtest' == command:
CONFIG_FILE=AGENT_DIR + '/' + DEFAULT_CONFIG_FILE
config=HealthCheckConfig(CONFIG_FILE)
if config.isValid():
sys.stdout.write("Configuration file %s is valid \n" % CONFIG_FILE)
else:
sys.stdout.write("Configuration file %s is invalid \n" % CONFIG_FILE)
elif 'emailcheck' == command:
pass
#config_file_abs_path=PROJECT_DIR + '/config.json'
#healthcheck=HealthcheckReporter(AGENT_DIR + '/' + DEFAULT_CONFIG_FILE)
#healthcheck.testEmail()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[0:]))
except StandardError:
# Try our best to log the error.
try:
log.exception("Uncaught error running the Health Check Agent")
except Exception:
pass
raise
|
|
# -*- coding: utf-8 -*-
import pytest
from marshmallow import (
Schema,
fields,
pre_dump,
post_dump,
pre_load,
post_load,
validates,
validates_schema,
ValidationError,
)
def test_decorated_processors():
class ExampleSchema(Schema):
"""Includes different ways to invoke decorators and set up methods"""
TAG = 'TAG'
value = fields.Integer(as_string=True)
# Implicit default raw, pre dump, static method, return modified item.
@pre_dump
def increment_value(self, item):
item['value'] += 1
return item
# Implicit default raw, post dump, class method, modify in place.
@post_dump
def add_tag(self, item):
item['value'] = self.TAG + item['value']
# Explicitly raw, post dump, instance method, return modified item.
@post_dump(pass_many=True)
def add_envelope(self, data, many):
key = self.get_envelope_key(many)
return {key: data}
# Explicitly raw, pre load, instance method, return modified item.
@pre_load(pass_many=True)
def remove_envelope(self, data, many):
key = self.get_envelope_key(many)
return data[key]
@staticmethod
def get_envelope_key(many):
return 'data' if many else 'datum'
# Explicitly not raw, pre load, instance method, modify in place.
@pre_load(pass_many=False)
def remove_tag(self, item):
item['value'] = item['value'][len(self.TAG):]
# Explicit default raw, post load, instance method, modify in place.
@post_load()
def decrement_value(self, item):
item['value'] -= 1
schema = ExampleSchema()
# Need to re-create these because the processors will modify in place.
make_item = lambda: {'value': 3}
make_items = lambda: [make_item(), {'value': 5}]
item_dumped = schema.dump(make_item()).data
assert item_dumped == {'datum': {'value': 'TAG4'}}
item_loaded = schema.load(item_dumped).data
assert item_loaded == make_item()
items_dumped = schema.dump(make_items(), many=True).data
assert items_dumped == {'data': [{'value': 'TAG4'}, {'value': 'TAG6'}]}
items_loaded = schema.load(items_dumped, many=True).data
assert items_loaded == make_items()
class TestPassOriginal:
def test_pass_original_single_no_mutation(self):
class MySchema(Schema):
foo = fields.Field()
@post_load(pass_original=True)
def post_load(self, data, input_data):
ret = data.copy()
ret['_post_load'] = input_data['sentinel']
return ret
@post_dump(pass_original=True)
def post_dump(self, data, obj):
ret = data.copy()
ret['_post_dump'] = obj['sentinel']
return ret
schema = MySchema()
datum = {'foo': 42, 'sentinel': 24}
item_loaded = schema.load(datum).data
assert item_loaded['foo'] == 42
assert item_loaded['_post_load'] == 24
item_dumped = schema.dump(datum).data
assert item_dumped['foo'] == 42
assert item_dumped['_post_dump'] == 24
def test_pass_original_single_with_mutation(self):
class MySchema(Schema):
foo = fields.Field()
@post_load(pass_original=True)
def post_load(self, data, input_data):
data['_post_load'] = input_data['post_load']
schema = MySchema()
item_loaded = schema.load({'foo': 42, 'post_load': 24}).data
assert item_loaded['foo'] == 42
assert item_loaded['_post_load'] == 24
def test_pass_original_many(self):
class MySchema(Schema):
foo = fields.Field()
@post_load(pass_many=True, pass_original=True)
def post_load(self, data, many, original):
if many:
ret = []
for item, orig_item in zip(data, original):
item['_post_load'] = orig_item['sentinel']
ret.append(item)
else:
ret = data.copy()
ret['_post_load'] = original['sentinel']
return ret
@post_dump(pass_many=True, pass_original=True)
def post_dump(self, data, many, original):
if many:
ret = []
for item, orig_item in zip(data, original):
item['_post_dump'] = orig_item['sentinel']
ret.append(item)
else:
ret = data.copy()
ret['_post_dump'] = original['sentinel']
return ret
schema = MySchema()
data = [{'foo': 42, 'sentinel': 24}, {'foo': 424, 'sentinel': 242}]
items_loaded = schema.load(data, many=True).data
assert items_loaded == [
{'foo': 42, '_post_load': 24},
{'foo': 424, '_post_load': 242},
]
test_values = [e['_post_load'] for e in items_loaded]
assert test_values == [24, 242]
items_dumped = schema.dump(data, many=True).data
assert items_dumped == [
{'foo': 42, '_post_dump': 24},
{'foo': 424, '_post_dump': 242},
]
# Also check load/dump of single item
datum = {'foo': 42, 'sentinel': 24}
item_loaded = schema.load(datum, many=False).data
assert item_loaded == {'foo': 42, '_post_load': 24}
item_dumped = schema.dump(datum, many=False).data
assert item_dumped == {'foo': 42, '_post_dump': 24}
def test_decorated_processor_inheritance():
class ParentSchema(Schema):
@post_dump
def inherited(self, item):
item['inherited'] = 'inherited'
return item
@post_dump
def overridden(self, item):
item['overridden'] = 'base'
return item
@post_dump
def deleted(self, item):
item['deleted'] = 'retained'
return item
class ChildSchema(ParentSchema):
@post_dump
def overridden(self, item):
item['overridden'] = 'overridden'
return item
deleted = None
parent_dumped = ParentSchema().dump({}).data
assert parent_dumped == {
'inherited': 'inherited',
'overridden': 'base',
'deleted': 'retained'
}
child_dumped = ChildSchema().dump({}).data
assert child_dumped == {
'inherited': 'inherited',
'overridden': 'overridden'
}
# https://github.com/marshmallow-code/marshmallow/issues/229#issuecomment-138949436
def test_pre_dump_is_invoked_before_implicit_field_generation():
class Foo(Schema):
field = fields.Integer()
@pre_dump
def hook(s, data):
data['generated_field'] = 7
class Meta:
# Removing generated_field from here drops it from the output
fields = ('field', 'generated_field')
assert Foo().dump({"field": 5}).data == {'field': 5, 'generated_field': 7}
class ValidatesSchema(Schema):
foo = fields.Int()
@validates('foo')
def validate_foo(self, value):
if value != 42:
raise ValidationError('The answer to life the universe and everything.')
class TestValidatesDecorator:
def test_validates_decorator(self):
schema = ValidatesSchema()
errors = schema.validate({'foo': 41})
assert 'foo' in errors
assert errors['foo'][0] == 'The answer to life the universe and everything.'
errors = schema.validate({'foo': 42})
assert errors == {}
errors = schema.validate([{'foo': 42}, {'foo': 43}], many=True)
assert 'foo' in errors[1]
assert len(errors[1]['foo']) == 1
assert errors[1]['foo'][0] == 'The answer to life the universe and everything.'
errors = schema.validate([{'foo': 42}, {'foo': 42}], many=True)
assert errors == {}
errors = schema.validate({})
assert errors == {}
def test_field_not_present(self):
class BadSchema(ValidatesSchema):
@validates('bar')
def validate_bar(self, value):
raise ValidationError('Never raised.')
schema = BadSchema()
with pytest.raises(ValueError) as excinfo:
schema.validate({'foo': 42})
assert '"bar" field does not exist.' in str(excinfo)
def test_precedence(self):
class Schema2(ValidatesSchema):
foo = fields.Int(validate=lambda n: n != 42)
bar = fields.Int(validate=lambda n: n == 1)
@validates('bar')
def validate_bar(self, value):
if value != 2:
raise ValidationError('Must be 2')
schema = Schema2()
errors = schema.validate({'foo': 42})
assert 'foo' in errors
assert len(errors['foo']) == 1
assert 'Invalid value.' in errors['foo'][0]
errors = schema.validate({'bar': 3})
assert 'bar' in errors
assert len(errors['bar']) == 1
assert 'Invalid value.' in errors['bar'][0]
errors = schema.validate({'bar': 1})
assert 'bar' in errors
assert len(errors['bar']) == 1
assert errors['bar'][0] == 'Must be 2'
class TestValidatesSchemaDecorator:
def test_decorated_validators(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema
def validate_schema(self, data):
if data['foo'] <= 3:
raise ValidationError('Must be greater than 3')
@validates_schema(pass_many=True)
def validate_raw(self, data, many):
if many:
if len(data) < 2:
raise ValidationError('Must provide at least 2 items')
@validates_schema
def validate_bar(self, data):
if 'bar' in data and data['bar'] < 0:
raise ValidationError('bar must not be negative', 'bar')
schema = MySchema()
errors = schema.validate({'foo': 3})
assert '_schema' in errors
assert errors['_schema'][0] == 'Must be greater than 3'
errors = schema.validate([{'foo': 4}], many=True)
assert '_schema' in errors[0]
assert len(errors[0]['_schema']) == 1
assert errors[0]['_schema'][0] == 'Must provide at least 2 items'
errors = schema.validate({'foo': 4, 'bar': -1})
assert 'bar' in errors
assert len(errors['bar']) == 1
assert errors['bar'][0] == 'bar must not be negative'
def test_passing_original_data(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema(pass_original=True)
def validate_original(self, data, original_data):
if isinstance(original_data, dict) and isinstance(original_data['foo'], str):
raise ValidationError('foo cannot be a string')
# See https://github.com/marshmallow-code/marshmallow/issues/127
@validates_schema(pass_many=True, pass_original=True)
def check_unknown_fields(self, data, original_data, many):
def check(datum):
for key, val in datum.items():
if key not in self.fields:
raise ValidationError({'code': 'invalid_field'})
if many:
for each in original_data:
check(each)
else:
check(original_data)
schema = MySchema()
errors = schema.validate({'foo': 4, 'baz': 42})
assert '_schema' in errors
assert len(errors['_schema']) == 1
assert errors['_schema'][0] == {'code': 'invalid_field'}
errors = schema.validate({'foo': '4'})
assert '_schema' in errors
assert len(errors['_schema']) == 1
assert errors['_schema'][0] == 'foo cannot be a string'
schema = MySchema()
errors = schema.validate([{'foo': 4, 'baz': 42}], many=True)
assert 0 in errors
assert '_schema' in errors[0]
assert len(errors[0]['_schema']) == 1
assert errors[0]['_schema'][0] == {'code': 'invalid_field'}
# https://github.com/marshmallow-code/marshmallow/issues/273
def test_allow_arbitrary_field_names_in_error(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema(pass_original=True)
def strict_fields(self, data, original_data):
for key in original_data:
if key not in self.fields:
raise ValidationError('Unknown field name', key)
schema = MySchema()
errors = schema.validate({'foo': 2, 'baz': 42})
assert 'baz' in errors
assert len(errors['baz']) == 1
assert errors['baz'][0] == 'Unknown field name'
|
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "xwalkctl" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('xwalkctl'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('xwalkctl'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_app_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 1:
continue
name = pkg_infos[1]
if pkg_name == name:
test_app_id = pkg_infos[0]
print test_app_id
break
return test_app_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"xwalkctl -u %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD(
"rm -rf %s/Others" % SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"xwalkctl -i %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
if not doRemoteCopy("%s/mediasrc" % SCRIPT_DIR, "%s/Others" % SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
# Copyright 2017-present Kensho Technologies, LLC.
from collections import OrderedDict
from datetime import date, datetime
from decimal import Decimal
from hashlib import sha256
from itertools import chain
from typing import Any, FrozenSet, Iterable
# C-based module confuses pylint, which is why we disable the check below.
from ciso8601 import parse_datetime # pylint: disable=no-name-in-module
from graphql import (
DirectiveLocation,
GraphQLArgument,
GraphQLBoolean,
GraphQLDirective,
GraphQLField,
GraphQLFloat,
GraphQLID,
GraphQLInt,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLString,
lexicographic_sort_schema,
print_schema,
)
from graphql.type.directives import specified_directives
import six
from .typedefs import ( # noqa
ClassToFieldTypeOverridesType,
GraphQLSchemaFieldType,
TypeEquivalenceHintsType,
)
# Constraints:
# - 'op_name' can only contain characters [A-Za-z_];
# - cannot be used at or within vertex fields marked @fold;
# - strings in 'value' can be encoded as '%tag_name' if referring to a tag named 'tag_name',
# or as '$parameter_name' if referring to a parameter 'parameter_name' which will be provided
# to the query at execution time.
FilterDirective = GraphQLDirective(
name="filter",
args=OrderedDict(
[
(
"op_name",
GraphQLArgument(
type_=GraphQLNonNull(GraphQLString),
description="Name of the filter operation to perform.",
),
),
(
"value",
GraphQLArgument(
type_=GraphQLList(GraphQLNonNull(GraphQLString)),
description="List of string operands for the operator.",
),
),
]
),
is_repeatable=True,
locations=[
DirectiveLocation.FIELD,
DirectiveLocation.INLINE_FRAGMENT,
],
)
# Constraints:
# - 'tag_name' can only contain characters [A-Za-z_];
# - 'tag_name' has to be distinct for each @output directive;
# - can only be applied to property fields;
# - cannot be applied to fields within a scope marked @fold.
TagDirective = GraphQLDirective(
name="tag",
args=OrderedDict(
[
(
"tag_name",
GraphQLArgument(
type_=GraphQLNonNull(GraphQLString),
description="Name to apply to the given property field.",
),
),
]
),
locations=[
DirectiveLocation.FIELD,
],
)
# Constraints:
# - 'out_name' can only contain characters [A-Za-z_];
# - 'out_name' has to be distinct for each @output directive;
# - can only be applied to property fields.
OutputDirective = GraphQLDirective(
name="output",
args=OrderedDict(
[
(
"out_name",
GraphQLArgument(
type_=GraphQLNonNull(GraphQLString),
description=(
"What to designate the output field generated from this property field."
),
),
),
]
),
locations=[
DirectiveLocation.FIELD,
],
)
# Gremlin queries are designed as pipelines, and do not capture the full Cartesian product of
# all possible traversals that would satisfy the query. For example, consider an example graph
# where vertices A and B are each connected with vertices X and Y via an edge of type E:
#
# A --E-> X, A --E-> Y
# B --E-> X, B --E-> Y
#
# If our query starts at vertices A and B, and traverses the outbound edge E,
# Gremlin will output two possible traversals: one ending in X, and one ending with Y.
# However, which predecessor vertex these traversals will have is undefined:
# one path will be one of {(A, X), (B, X)} and the other will be one of {(A, Y), (B, Y)}.
# A Cartesian product result (which is what OrientDB MATCH returns) would return all four
# traversals: {(A, X), (B, X), (A, Y), (B, Y)}.
#
# The @output_source directive is a mitigation strategy that allows users
# to specify *which* set of results they want fully covered. Namely,
# OutputSource on a given location will ensure that all possible values
# at that location are represented in at least one row of the returned result set.
#
# Constraints:
# - can exist at most once, and only on a vertex field;
# - if it exists, has to be on the last vertex visited by the query;
# - may not exist at or within a vertex marked @optional or @fold.
OutputSourceDirective = GraphQLDirective(
name="output_source",
locations=[
DirectiveLocation.FIELD,
],
)
# Constraints:
# - can only be applied to vertex fields, except the root vertex of the query;
# - may not exist at the same vertex field as @recurse, @fold, or @output_source;
# - when filtering is applied on or within an @optional vertex field, evaluation is sequential:
# the @optional is resolved first, and if a satisfactory edge exists, it is taken;
# then, filtering is applied and eliminates results that don't match from the result set.
OptionalDirective = GraphQLDirective(
name="optional",
locations=[
DirectiveLocation.FIELD,
],
)
# Consider the following query:
# {
# Vertex_A {
# name @output(out_name: "vertex_a_name")
# out_Vertex_B {
# name @output(out_name: "vertex_b_name")
# }
# }
# }
# The query will return one row (with keys "vertex_a_name" and "vertex_b_name")
# per possible traversal starting at a Vertex_A and going outbound toward the Vertex_B.
#
# Suppose you instead wanted one row per Vertex_A, containing Vertex_A's name
# and a list of Vertex_B names containing all the names of Vertex_B elements
# connected to the Vertex_A named by that row. This new query effectively "folds"
# the "vertex_b_name" outputs for each "vertex_a_name" into a list, similarly to
# the SQL operation GROUP BY but grouping according to graph structure rather than value.
# In other words, if two distinct Vertex_A vertices happen to be named the same,
# we'd like to receive two rows from our query -- one corresponding to each Vertex_A object.
# This is what the @fold decorator allows -- the query we should use is:
# {
# Vertex_A {
# name @output(out_name: "vertex_a_name")
# out_Vertex_B @fold {
# name @output(out_name: "vertex_b_name_list")
# }
# }
# }
#
# IMPORTANT: Normally, out_Vertex_B in the above query also filters the result set
# such that Vertex_A objects with no corresponding Vertex_B objects are
# not returned as results. When @fold is applied to out_Vertex_B, however,
# this filtering is not applied, and "vertex_b_name_list" will return
# an empty list for Vertex_A objects that don't have out_Vertex_B data.
#
# Constraints:
# - can only be applied to vertex fields, except the root vertex of the query;
# - may not exist at the same vertex field as @recurse, @optional, @output_source, or @filter;
# - traversals and filtering within a vertex field marked @fold are prohibited;
# - @tag or @fold may not be used within a scope marked @fold.
FoldDirective = GraphQLDirective(
name="fold",
locations=[
DirectiveLocation.FIELD,
],
)
# Constraints:
# - may not be applied to the root vertex of the query (since it requires an edge to recurse on);
# - may not exist at or within a vertex marked @optional or @fold;
# - when not applied to vertex fields of union type, the vertex property type must
# either be an interface type implemented by the type of the current scope, or must be the exact
# same type as the type of the current scope;
# - inline fragments and filters within the @recurse block do not affect the recursion depth,
# but simply eliminate some of its outputs;
# - it must always be the case that depth >= 1, where depth = 1 produces the current vertex
# and its immediate neighbors along the specified edge.
RecurseDirective = GraphQLDirective(
name="recurse",
args=OrderedDict(
[
(
"depth",
GraphQLArgument(
type_=GraphQLNonNull(GraphQLInt),
description=(
"Recurse up to this many times on this edge. A depth of 1 produces "
"the current vertex and its immediate neighbors along the given edge."
),
),
),
]
),
locations=[
DirectiveLocation.FIELD,
],
)
# TODO(selene): comments for the macro directives
MacroEdgeDirective = GraphQLDirective(
name="macro_edge",
locations=[
# Used to mark edges that are defined via macros in the schema.
DirectiveLocation.FIELD_DEFINITION,
],
)
MacroEdgeDefinitionDirective = GraphQLDirective(
name="macro_edge_definition",
args=OrderedDict(
[
(
"name",
GraphQLArgument(
type_=GraphQLNonNull(GraphQLString),
description="Name of the macro edge.",
),
),
]
),
locations=[
DirectiveLocation.FIELD,
],
)
MacroEdgeTargetDirective = GraphQLDirective(
name="macro_edge_target",
locations=[
DirectiveLocation.FIELD,
DirectiveLocation.INLINE_FRAGMENT,
],
)
# TODO(selene): comment for the stitch directive
StitchDirective = GraphQLDirective(
name="stitch",
args=OrderedDict(
[
(
"source_field",
GraphQLArgument(
type_=GraphQLNonNull(GraphQLString),
description="",
),
),
(
"sink_field",
GraphQLArgument(
type_=GraphQLNonNull(GraphQLString),
description="",
),
),
]
),
locations=[DirectiveLocation.FIELD_DEFINITION],
)
OUTBOUND_EDGE_FIELD_PREFIX = "out_"
INBOUND_EDGE_FIELD_PREFIX = "in_"
VERTEX_FIELD_PREFIXES = frozenset({OUTBOUND_EDGE_FIELD_PREFIX, INBOUND_EDGE_FIELD_PREFIX})
def is_vertex_field_name(field_name: str) -> bool:
"""Return True if the field's name indicates it is a non-root vertex field."""
# N.B.: A vertex field is a field whose type is a vertex type. This is what edges are.
return field_name.startswith(OUTBOUND_EDGE_FIELD_PREFIX) or field_name.startswith(
INBOUND_EDGE_FIELD_PREFIX
)
def _unused_function(*args: Any, **kwargs: Any) -> None:
"""Must not be called. Placeholder for functions that are required but aren't used."""
raise NotImplementedError(
"The function you tried to call is not implemented, args / kwargs: "
"{} {}".format(args, kwargs)
)
def _serialize_date(value: Any) -> str:
"""Serialize a Date object to its proper ISO-8601 representation."""
# Python datetime.datetime is a subclass of datetime.date, but in this case, the two are not
# interchangeable. Rather than using isinstance, we will therefore check for exact type
# equality.
if type(value) != date:
raise ValueError(
"Expected argument to be a python date object. "
"Got {} of type {} instead.".format(value, type(value))
)
return value.isoformat()
def _parse_date_value(value: Any) -> date:
"""Deserialize a Date object from its proper ISO-8601 representation."""
if type(value) == date:
# We prefer exact type equality instead of isinstance() because datetime objects
# are subclasses of date but are not interchangeable for dates for our purposes.
return value
elif isinstance(value, str):
# ciso8601 only supports parsing into datetime objects, not date objects.
# This is not a problem in itself: "YYYY-MM-DD" strings will get parsed into datetimes
# with hour/minute/second/microsecond set to 0, and tzinfo=None.
# We don't want our parsing to implicitly lose precision, so before we convert the parsed
# datetime into a date value, we just assert that these fields are set as expected.
dt = parse_datetime(value) # This will raise ValueError in case of bad ISO 8601 formatting.
if (
dt.hour != 0
or dt.minute != 0
or dt.second != 0
or dt.microsecond != 0
or dt.tzinfo is not None
):
raise ValueError(
f"Expected an ISO-8601 date string in 'YYYY-MM-DD' format, but got a datetime "
f"string with a non-empty time component. This is not supported, since converting "
f"it to a date would result in an implicit loss of precision. Received value "
f"{repr(value)}, parsed as {dt}."
)
return dt.date()
else:
raise ValueError(
f"Expected a date object or its ISO-8601 'YYYY-MM-DD' string representation. "
f"Got {value} of type {type(value)} instead."
)
def _serialize_datetime(value: Any) -> str:
"""Serialize a DateTime object to its proper ISO-8601 representation."""
if isinstance(value, datetime) and value.tzinfo is None:
return value.isoformat()
else:
raise ValueError(
f"Expected a timezone-naive datetime object. Got {value} of type {type(value)} instead."
)
def _parse_datetime_value(value: Any) -> datetime:
"""Deserialize a DateTime object from a date/datetime or a ISO-8601 string representation."""
if isinstance(value, datetime) and value.tzinfo is None:
return value
elif isinstance(value, str):
dt = parse_datetime(value) # This will raise ValueError in case of bad ISO 8601 formatting.
if dt.tzinfo is not None:
raise ValueError(
f"Expected a timezone-naive datetime value, but got a timezone-aware datetime "
f"string. This is not supported, since discarding the timezone component would "
f"result in an implicit loss of precision. Received value {repr(value)}, "
f"parsed as {dt}."
)
return dt
elif type(value) == date:
# The date type is a supertype of datetime. We check for exact type equality
# rather than using isinstance(), to avoid having this branch get hit
# by timezone-aware datetimes (i.e. ones that fail the value.tzinfo is None check above).
#
# This is a widening conversion (there's no loss of precision) so we allow it to be implicit
# since use ciso8601 parsing logic for parsing datetimes, and ciso8601 successfully parses
# datetimes that only have data down to day precision.
return datetime(value.year, value.month, value.day)
else:
raise ValueError(
f"Expected a timezone-naive datetime or an ISO-8601 string representation parseable "
f"by the ciso8601 library. Got {value} of type {type(value)} instead."
)
GraphQLDate = GraphQLScalarType(
name="Date",
description=(
"The `Date` scalar type represents day-accuracy date objects."
"Values are serialized following the ISO-8601 datetime format specification, "
'for example "2017-03-21". Serialization and parsing support is guaranteed for the format '
"described here, with the year, month and day fields included and separated by dashes as "
"in the example. Implementations are allowed to support additional serialization formats, "
"if they so choose."
# GraphQL compiler's implementation of GraphQL-based querying uses the ciso8601 library
# for date and datetime parsing, so it additionally supports the subset of the ISO-8601
# standard supported by that library.
),
serialize=_serialize_date,
parse_value=_parse_date_value,
parse_literal=_unused_function, # We don't yet support parsing Date objects in literals.
)
GraphQLDateTime = GraphQLScalarType(
name="DateTime",
description=(
"The `DateTime` scalar type represents timezone-naive timestamps with up to microsecond "
"accuracy. Values are serialized following the ISO-8601 datetime format specification, "
'for example "2017-03-21T12:34:56.012345" or "2017-03-21T12:34:56". Serialization and '
"parsing support is guaranteed for the format described here, with all fields down to "
"and including seconds required to be included, and fractional seconds optional, as in "
"the example. Implementations are allowed to support additional serialization formats, "
"if they so choose."
# GraphQL compiler's implementation of GraphQL-based querying uses the ciso8601 library
# for date and datetime parsing, so it additionally supports the subset of the ISO-8601
# standard supported by that library.
),
serialize=_serialize_datetime,
parse_value=_parse_datetime_value,
parse_literal=_unused_function, # We don't yet support parsing DateTime objects in literals.
)
GraphQLDecimal = GraphQLScalarType(
name="Decimal",
description=(
"The `Decimal` scalar type is an arbitrary-precision decimal number object "
"useful for representing values that should never be rounded, such as "
"currency amounts. Values are allowed to be transported as either a native Decimal "
"type, if the underlying transport allows that, or serialized as strings in "
'decimal format, without thousands separators and using a "." as the '
'decimal separator: for example, "12345678.012345".'
),
serialize=str,
parse_value=Decimal,
parse_literal=_unused_function, # We don't yet support parsing Decimal objects in literals.
)
CUSTOM_SCALAR_TYPES: FrozenSet[GraphQLScalarType] = frozenset(
{
GraphQLDecimal,
GraphQLDate,
GraphQLDateTime,
}
)
SUPPORTED_SCALAR_TYPES: FrozenSet[GraphQLScalarType] = frozenset(
{
GraphQLInt,
GraphQLString,
GraphQLBoolean,
GraphQLFloat,
GraphQLID,
}
).union(CUSTOM_SCALAR_TYPES)
DIRECTIVES = (
FilterDirective,
TagDirective,
OutputDirective,
OutputSourceDirective,
OptionalDirective,
RecurseDirective,
FoldDirective,
MacroEdgeDirective,
StitchDirective,
)
TYPENAME_META_FIELD_NAME = "__typename" # This meta field is built-in.
COUNT_META_FIELD_NAME = "_x_count"
ALL_SUPPORTED_META_FIELDS = frozenset(
(
TYPENAME_META_FIELD_NAME,
COUNT_META_FIELD_NAME,
)
)
EXTENDED_META_FIELD_DEFINITIONS = OrderedDict(((COUNT_META_FIELD_NAME, GraphQLField(GraphQLInt)),))
def is_meta_field(field_name: str) -> bool:
"""Return True if the field is considered a meta field in the schema, and False otherwise."""
return field_name in ALL_SUPPORTED_META_FIELDS
def insert_meta_fields_into_existing_schema(graphql_schema: GraphQLSchema) -> None:
"""Add compiler-specific meta-fields into all interfaces and types of the specified schema.
It is preferable to use the EXTENDED_META_FIELD_DEFINITIONS constant above to directly inject
the meta-fields during the initial process of building the schema, as that approach
is more robust. This function does its best to not mutate unexpected definitions, but
may break unexpectedly as the GraphQL standard is extended and the underlying
GraphQL library is updated.
Use this function at your own risk. Don't say you haven't been warned.
Properties added include:
- "_x_count", which allows filtering folds based on the number of elements they capture.
Args:
graphql_schema: GraphQLSchema object describing the schema that is going to be used with
the compiler. N.B.: MUTATED IN-PLACE in this method.
"""
query_type = graphql_schema.query_type
if query_type is None:
raise AssertionError(
f"Unexpectedly received a GraphQL schema with no defined query type. It is impossible "
f"to insert GraphQL compiler's meta fields into such a schema, since the schema cannot "
f"be used for querying with GraphQL compiler. Received schema type map: "
f"{graphql_schema.type_map}"
)
root_type_name = query_type.name
for type_name, type_obj in six.iteritems(graphql_schema.type_map):
if type_name.startswith("__") or type_name == root_type_name:
# Ignore the types that are built into GraphQL itself, as well as the root query type.
continue
if not isinstance(type_obj, (GraphQLObjectType, GraphQLInterfaceType)):
# Ignore definitions that are not interfaces or types.
continue
for meta_field_name, meta_field in six.iteritems(EXTENDED_META_FIELD_DEFINITIONS):
if meta_field_name in type_obj.fields:
raise AssertionError(
"Unexpectedly encountered an existing field named {} while "
"attempting to add a meta-field of the same name. Make sure "
"you are not attempting to add meta-fields twice.".format(meta_field_name)
)
type_obj.fields[meta_field_name] = meta_field
def check_for_nondefault_directive_names(directives: Iterable[GraphQLDirective]) -> None:
"""Check if any user-created directives are present."""
# Include compiler-supported directives, and the default directives GraphQL defines.
expected_directive_names = {
directive.name for directive in chain(DIRECTIVES, specified_directives)
}
directive_names = {directive.name for directive in directives}
nondefault_directives_found = directive_names - expected_directive_names
if nondefault_directives_found:
raise AssertionError("Unsupported directives found: {}".format(nondefault_directives_found))
def compute_schema_fingerprint(schema: GraphQLSchema) -> str:
"""Compute a fingerprint compactly representing the data in the given schema.
The fingerprint is not sensitive to things like type or field order. This function is guaranteed
to be robust enough that if two GraphQLSchema have the same fingerprint, then they also
represent the same schema.
Because of internal implementation changes, different versions of this library *may* produce
different fingerprints for the same schema. Since cross-version fingerprint stability
is an *explicit non-goal* here, changing a schema's fingerprint will not be considered
a breaking change.
The fingerprint is computed on a best-effort basis and has some known issues at the moment.
Please see the discussion in the pull request below for more details.
https://github.com/kensho-technologies/graphql-compiler/pull/737
Args:
schema: the schema for which to compute a fingerprint.
Returns:
a hexadecimal string fingerprint compactly representing the data in the schema.
"""
lexicographically_sorted_schema = lexicographic_sort_schema(schema)
text = print_schema(lexicographically_sorted_schema)
return sha256(text.encode("utf-8")).hexdigest()
|
|
# -*- coding: utf-8 -*-
__version__ = "1.1.0"
import os
class NetflixReporter(object):
"""Creates Netflix compliant reports of Episodes/Sequences"""
csv_header = (
"Episode;Shot Name;VFX Shot Status;Shot Methodologies;Scope of Work;Vendors;"
"VFX Turnover to Vendor Date;VFX Next Studio Review Date;VFX Final Delivery Date;VFX Final Version;"
"Shot Cost;Currency;Report Date;Report Note"
)
csv_format = (
"{episode_number};{shot.code};{status};{shot_methodologies};{scope_of_work};{vendors};"
"{vfx_turnover_to_vendor_date};{vfx_next_studio_review_date};{vfx_final_delivery_date};"
"{vfx_final_version};{shot_cost};{currency};{report_date};{report_note}"
)
status_lut = {
"WFD": "Waiting To Start",
"RTS": "Waiting To Start",
"WIP": "In Progress",
"PREV": "Pending Netflix Review",
"HREV": "In Progress",
"DREV": "In Progress",
"CMPL": "Approved",
"STOP": "Omit",
"OH": "On Hold",
}
date_time_format = "%Y-%m-%d"
def __init__(self):
pass
def map_status_code(self, status_code):
"""Maps the given status to Netflix statuses
:param status_code: A Stalker Status instance or Status.name or Status.code
:return:
"""
from stalker import Status
if isinstance(status_code, Status):
status_code = status_code.code
return self.status_lut[status_code]
@classmethod
def get_shot_status(cls, shot):
"""calculates the shot status
:param shot:
:return:
"""
# The problem here is that, because of the Plate task, all the shots seem to be WIP at the beginning
# also we can not use the Comp or Cleanup task status, because they may be WFD but the Camera or Lighting
# task could be CMPL
# so we need to calculate the shot status from scratch
from stalker.db.session import DBSession
with DBSession.no_autoflush:
wfd = shot.status_list["WFD"]
rts = shot.status_list["RTS"]
wip = shot.status_list["WIP"]
cmpl = shot.status_list["CMPL"]
parent_statuses_lut = [wfd, rts, wip, cmpl]
# +--------- WFD
# |+-------- RTS
# ||+------- WIP
# |||+------ PREV
# ||||+----- HREV
# |||||+---- DREV
# ||||||+--- OH
# |||||||+-- STOP
# ||||||||+- CMPL
# |||||||||
# 0b000000000
binary_status_codes = {
"WFD": 256,
"RTS": 128,
"WIP": 64,
"PREV": 32,
"HREV": 16,
"DREV": 8,
"OH": 4,
"STOP": 2,
"CMPL": 1,
}
# use Python
# logger.debug('using pure Python to query children statuses')
binary_status = 0
children_statuses = []
for child in shot.children:
# skip Plate task
if child.type and child.type.name == "Plate":
continue
# consider every status only once
if child.status not in children_statuses:
children_statuses.append(child.status)
binary_status += binary_status_codes[child.status.code]
#
# I know that the following list seems cryptic but the it shows the
# final status index in parent_statuses_lut[] list.
#
# So by using the cumulative statuses of children we got an index from
# the following table, and use the found element (integer) as the index
# for the parent_statuses_lut[] list, and we find the desired status
#
# We are doing it in this way for a couple of reasons:
#
# 1. We shouldn't hold the statuses in the following list,
# 2. Using a dictionary is another alternative, where the keys are
# the cumulative binary status codes, but at the end the result of
# this cumulative thing is a number between 0-511 so no need to
# use a dictionary with integer keys
#
children_to_parent_statuses_lut = [
0,
3,
3,
3,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
2,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0,
2,
0,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
2,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
status_index = children_to_parent_statuses_lut[binary_status]
status = parent_statuses_lut[status_index]
# logger.debug('binary statuses value : %s' % binary_status)
# logger.debug('setting status to : %s' % status.code)
return status
@classmethod
def generate_shot_methodologies(cls, shot):
"""Generates Netflix complaint shot methodologies field value by looking at the task related information
:param shot: A Stalker Shot instance.
:return: Returns a list of string containing the shot methodologies
"""
shot_methodologies = []
child_tasks = shot.children
child_task_type_names = []
for child_task in child_tasks:
if child_task.type:
child_task_type_names.append(child_task.type.name.lower())
# Comp -> "2D Comp"
if "comp" in child_task_type_names:
shot_methodologies.append("2D Comp")
# Cleanup -> "2D Paint"
if "cleanup" in child_task_type_names:
shot_methodologies.append("2D Paint")
# Lighting.dependency to Layout -> "3D Set Extension"
if "lighting" in child_task_type_names:
from stalker import Task
# get the lighting task first
lighting_tasks = filter(lambda x: x.name == "Lighting", child_tasks)
for lighting_task in lighting_tasks:
assert isinstance(lighting_task, Task)
deps = lighting_task.depends
for dep in deps:
assert isinstance(dep, Task)
if dep.type:
dep_type_name = dep.type.name
if dep_type_name == "Layout":
shot_methodologies.append("3D Set Extension")
break
# Animation -> "3D Animated Object"
# Animation.dependency -> Character.Rig -> "3D Character"
if "animation" in child_task_type_names:
shot_methodologies.append("3D Animated Object")
# also check if there are any dependencies to a character rig
animation_tasks = filter(
lambda x: x.type and x.type.name.lower().startswith("anim"), child_tasks
)
for animation_task in animation_tasks:
for dep in animation_task.depends:
if dep.type and dep.type.name.lower() == "rig":
# check if this is a rig for a character
parent_asset = dep.parent
if (
parent_asset.type
and parent_asset.type.name.lower().starts_widht("char")
):
shot_methodologies.append("3D Character")
break
# MattePaint -> "2D DMP"
if "matte" in child_task_type_names or "mattepaint" in child_task_type_names:
shot_methodologies.append("2D DMP")
# FX -> "Dynamic Sim"
if "fx" in child_task_type_names:
shot_methodologies.append("Dynamic Sim")
return shot_methodologies
def report(
self,
seq,
csv_output_path,
vfx_turnover_to_vendor_date,
vfx_next_studio_review_date,
vendors,
hourly_cost,
currency,
):
"""Generates the report
:param seq: The Sequence to generate the report of
:param csv_output_path: The output path of the resultant CSV file
:param vfx_turnover_to_vendor_date: The date that the picture lock has been received.
:param vfx_next_studio_review_date: The date that Netflix can review the CMPL shots
:param list vendors: A list of vendor names
:param hourly_cost: The hourly cost for the budget field.
:param currency: The currency of the hourly cost.
"""
import datetime
import pytz
from stalker import Task, Shot, Version, Type
from stalker.db.session import DBSession
from anima.utils import do_db_setup
do_db_setup()
utc_now = datetime.datetime.now(pytz.utc)
ep = seq
data = [self.csv_header]
scene_type = Type.query.filter(Type.name == "Scene").first()
scenes = (
Task.query.filter(Task.type == scene_type)
.filter(Task.parent == ep)
.order_by(Task.name)
.all()
)
for scene in scenes:
shots_task = (
Task.query.filter(Task.name == "Shots")
.filter(Task.parent == scene)
.first()
)
for shot in (
Shot.query.filter(Shot.parent == shots_task).order_by(Shot.code).all()
):
comp_or_cleanup_task = (
Task.query.filter(Task.parent == shot)
.filter(Task.name == "Comp")
.first()
)
if not comp_or_cleanup_task:
comp_or_cleanup_task = (
Task.query.filter(Task.parent == shot)
.filter(Task.name == "Cleanup")
.first()
)
if not comp_or_cleanup_task:
# no comp or cleanup task, something wrong
print("No Comp or CleanUp task in: %s" % shot.name)
continue
vfx_final_version = ""
if (
comp_or_cleanup_task
and comp_or_cleanup_task.status
and comp_or_cleanup_task.status.code == "CMPL"
):
version = Version.query.filter(
Version.task == comp_or_cleanup_task
).first()
if version:
latest_version = version.latest_version
vfx_final_version = "v%03i" % latest_version.version_number
# {shot_cost};{currency};{report_date};{report_note}
total_bid_seconds = 0
for child in shot.children:
if child.schedule_model == "duration":
# skip ``duration`` based tasks
continue
total_bid_seconds += shot.to_seconds(
child.bid_timing, child.bid_unit, child.schedule_model
)
shot.update_schedule_info()
rendered_data = self.csv_format.format(
episode_number=ep.name[2:],
episode=ep,
scene=scene,
scene_number=scene.name[4:],
shot=shot,
task=comp_or_cleanup_task,
status=self.map_status_code(
self.get_shot_status(shot).code
if comp_or_cleanup_task.status.code != "PREV"
else "PREV"
),
shot_methodologies=", ".join(
self.generate_shot_methodologies(shot)
),
scope_of_work=shot.description,
vendors=", ".join(vendors),
vfx_turnover_to_vendor_date=vfx_turnover_to_vendor_date.strftime(
self.date_time_format
),
vfx_next_studio_review_date=vfx_next_studio_review_date.strftime(
self.date_time_format
)
if comp_or_cleanup_task.status.code in ["CMPL", "PREV"]
else "",
vfx_final_delivery_date=shot.end.strftime(self.date_time_format),
vfx_final_version=vfx_final_version,
shot_cost="%0.2f" % (total_bid_seconds / 3600 * hourly_cost),
currency=currency,
report_date=utc_now.strftime(self.date_time_format),
report_note="",
)
data.append(rendered_data)
# we may have updated the schedule info
DBSession.commit()
# make dirs
os.makedirs(os.path.dirname(csv_output_path), exist_ok=True)
with open(csv_output_path, "w") as f:
f.write("\n".join(data))
class NetflixReview(object):
"""Generates data for Netflix review process.
Generally it is used to generate CSVs suitable to upload to Netflix review process.
"""
def __init__(self):
self.outputs = []
@classmethod
def get_version_from_output(cls, output_path):
"""Returns the related Stalker Version from the given output path
:param str output_path:
:return:
"""
import os
basename = os.path.basename(output_path)
version_name = basename.split(".")[0]
from anima.utils import do_db_setup
do_db_setup()
from stalker import Version
return (
Version.query.filter(Version.full_path.contains(version_name))
.order_by(Version.full_path)
.first()
)
def generate_csv(
self, output_path="", vendor="", submission_note="", submitting_for=""
):
"""outputs a CSV suitable to upload to Netflix review process
:param output_path: The output path.
:param vendor: The vendor name.
:param submission_note: The submission note.
:param submitting_for: "FINAL" or "WIP". The default value comes from the related task, if the task status is
CMPL then it is set to "FINAL" else "WIP". If this argument is not empty then the value will be used directly.
"""
from anima.utils import do_db_setup
do_db_setup()
import os
data = [
"Version Name,Link,Scope Of Work,Vendor,Submitting For,Submission Note",
]
for output in self.outputs:
version_data = list()
output_base_name = os.path.basename(output)
version = self.get_version_from_output(output)
if not version:
continue
# Version Name
version_data.append(output_base_name)
# Link
# Link the related shot
shot = version.task.parent
version_data.append(shot.name)
# Scope Of Work
version_data.append('"%s"' % shot.description)
# Vendor
version_data.append(vendor)
# Submitting For
if submitting_for == "":
submitting_for = "FINAL" if shot.status.name == "CMPL" else "WIP"
version_data.append(submitting_for)
# Submission Note
version_data.append(submission_note)
data.append(",".join(version_data))
print(data)
with open(output_path, "w+") as f:
f.write("\n".join(data))
|
|
""" Non-stationary correlation models for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
import numpy as np
from scipy.special import gamma, kv
from scipy.stats import expon, norm
from sklearn.cluster import KMeans
from .stationary import l1_cross_differences
MACHINE_EPSILON = np.finfo(np.double).eps
class LocalLengthScalesCorrelation(object):
""" Non-stationary correlation model based on local smoothness estimates.
This non-stationary correlation model learns internally point estimates of
local smoothness using a second-level Gaussian Process. For this, it
selects a subset of the training data and learns length-scales at this
specific points. These length scales are generalized using the second-level
Gaussian Process. Furthermore, global (isotropic or anisotropic) length
scales are learned for both the top-level GP and the length-scale GP.
The correlation model is based on the family of (stationary) Matern
kernels. The parameter nu of the Matern kernels (governing the smoothness
of the GP prior) can either be set or learned jointly with the remaining
parameters.
Parameters
----------
isotropic : bool, default=True
Whether the global length-scales of the top-level GP are isotropic or
anisotropic
nu: float, default=1.5
The parameter nu of the Matern kernels (governing the smoothness
of the GP prior). If None, nu is learned along with the other
hyperparameters.
l_isotropic : bool, default=True
Whether the global length-scales of the length-scale GP are isotropic
or anisotropic
l_samples: int, default=10
How many datapoints from the training data are selected as support
points for learning the length-scale GP
prior_b: float, default=inf
The variance of the log-normal prior distribution on the length scales.
If set to infinity, the distribution is assumed to be uniform.
.. seealso::
"Nonstationary Gaussian Process Regression using Point Estimates of Local
Smoothness", Christian Plagemann, Kristian Kersting, and Wolfram Burgard,
ECML 2008
"""
def __init__(self, isotropic=True, nu=1.5, l_isotropic=True, l_samples=10,
prior_b=np.inf, X_=None):
self.isotropic = isotropic
self.nu = nu
self.l_isotropic = l_isotropic
self.l_samples = l_samples
self.prior_b = prior_b
self.X_ = X_
if self.X_ is not None:
assert self.X_.shape[0] == self.l_samples
def fit(self, X, nugget=10. * MACHINE_EPSILON):
""" Fits the correlation model for training data X
Parameters
----------
X : array_like, shape=(n_samples, n_features)
An array of training datapoints at which observations were made,
i.e., where the outputs y are known
nugget : double or ndarray, optional
The Gaussian Process nugget parameter
The nugget is added to the diagonal of the assumed training
covariance; in this way it acts as a Tikhonov regularization in
the problem. In the special case of the squared exponential
correlation function, the nugget mathematically represents the
variance of the input values. Default assumes a nugget close to
machine precision for the sake of robustness
(nugget = 10. * MACHINE_EPSILON).
"""
self.X = X
self.nugget = nugget
self.n_samples = X.shape[0]
self.n_dims = X.shape[1]
# Determine how many entries in theta belong to the different
# categories (used later for parsing theta)
self.theta_gp_size = 1 if self.isotropic else self.n_dims
self.theta_l_size = 1 if self.l_isotropic else self.n_dims
self.nu_size = 1 if not self.nu else 0
self.theta_size = self.theta_gp_size + self.theta_l_size \
+ self.l_samples + self.nu_size
# Calculate array with shape (n_eval, n_features) giving the
# componentwise distances between locations x and x' at which the
# correlation model should be evaluated.
self.D, self.ij = l1_cross_differences(self.X)
if self.X_ is None:
# Select subset of X for which length scales are optimized.
# Generalization of length scales to other datapoints is acheived
# by means of a separate Gaussian Process (gp_l)
if self.X.shape[0] >= self.l_samples:
kmeans = KMeans(n_clusters=self.l_samples)
self.X_ = kmeans.fit(self.X).cluster_centers_
else: # Fallback to select centers using sampling with replacement
self.X_ = self.X[np.random.choice(np.arange(self.X.shape[0]),
self.l_samples)]
return self
def __call__(self, theta, X=None):
""" Compute correlation for given correlation parameter(s) theta.
Parameters
----------
theta : array_like
An array giving the autocorrelation parameter(s).
X : array_like, shape(n_eval, n_features)
An array containing the n_eval query points whose correlation with
the training datapoints shall be computed. If None, autocorrelation
of the training datapoints is computed instead.
Returns
-------
r : array_like, shape=(n_eval, n_samples) if X != None
(n_samples, n_samples) if X == None
An array containing the values of the correlation model.
"""
# Parse theta into its components
theta_gp, theta_l, length_scales, nu = self._parse_theta(theta)
# Train length-scale Gaussian Process
from skgp.estimators import GaussianProcess
self.gp_l = \
GaussianProcess(corr="matern_1.5",
theta0=theta_l).fit(self.X_,
np.log10(length_scales))
l_train = 10**self.gp_l.predict(self.X)
# Prepare distances and length scale information for any pair of
# datapoints, whose correlation shall be computed
if X is not None:
# Get pairwise componentwise L1-differences to the input training
# set
d = X[:, np.newaxis, :] - self.X[np.newaxis, :, :]
d = d.reshape((-1, X.shape[1]))
# Predict length scales for query datapoints
l_query = 10**self.gp_l.predict(X)
l = np.transpose([np.tile(l_train, len(l_query)),
np.repeat(l_query, len(l_train))])
else:
# No external datapoints given; auto-correlation of training set
# is used instead
d = self.D
l = l_train[self.ij]
# Compute general Matern kernel
if d.ndim > 1 and theta_gp.size == d.ndim:
activation = np.sum(theta_gp.reshape(1, d.ndim) * d ** 2, axis=1)
else:
activation = theta_gp[0] * np.sum(d ** 2, axis=1)
tmp = 0.5*(l**2).sum(1)
tmp2 = np.maximum(2*np.sqrt(nu * activation / tmp), 1e-5)
r = np.sqrt(l[:, 0]) * np.sqrt(l[:, 1]) / (gamma(nu) * 2**(nu - 1))
r /= np.sqrt(tmp)
r *= tmp2**nu * kv(nu, tmp2)
# Convert correlations to 2d matrix
if X is not None:
return r.reshape(-1, self.n_samples)
else: # exploit symmetry of auto-correlation
R = np.eye(self.n_samples) * (1. + self.nugget)
R[self.ij[:, 0], self.ij[:, 1]] = r
R[self.ij[:, 1], self.ij[:, 0]] = r
return R
def log_prior(self, theta):
""" Returns the (log) prior probability of parameters theta.
The prior is assumed to be uniform over the parameter space except for
the length-scales dimensions. These are assumed to be log-normal
distributed with mean 0 and variance self.prior_b. If
self.prior_b is np.inf, the log length-scales are assumed to be
uniformly distributed as well.
NOTE: The returned quantity is an improper prior as its integral over
the parameter space is not equal to 1.
Parameters
----------
theta : array_like
An array giving the autocorrelation parameter(s).
Returns
-------
log_p : float
The (log) prior probability of parameters theta. An improper
probability.
"""
if self.prior_b == np.inf:
return 0.0
_, _, length_scales, _ = self._parse_theta(theta)
squared_dist = (np.log10(length_scales)**2).sum()
return -squared_dist / self.prior_b
def _parse_theta(self, theta):
""" Parse parameter vector theta into its components.
Parameters
----------
theta : array_like
An array containing all hyperparameters.
Returns
-------
theta_gp : array_like
An array containing the hyperparameters of the main GP.
theta_l : array_like
An array containing the hyperparameters of the length-scale GP.
length_scales : array_like
An array containing the length-scales for the length-scale GP.
nu : float
The parameter nu controlling the smoothness of the Matern kernel.
"""
theta = np.asarray(theta, dtype=np.float)
assert (theta.size == self.theta_size), \
"theta does not have the expected size (expected: %d, " \
"actual size %d). Expected: %d entries for main GP, " \
"%d entries for length-scale GP, %d entries containing the "\
"length scales, and %d entries for nu." \
% (self.theta_size, theta.size, self.theta_gp_size,
self.theta_l_size, self.l_samples, self.nu_size)
# Split theta in its components
theta_gp = theta[:self.theta_gp_size]
theta_l = \
theta[self.theta_gp_size:][:self.theta_l_size]
length_scales = \
theta[self.theta_gp_size+self.theta_l_size:][:self.l_samples]
nu = self.nu if self.nu else theta[-1]
return theta_gp, theta_l, length_scales, nu
@classmethod
def create(cls, dims, isotropic=True, theta0=1e-1,
thetaL=None, thetaU=None,
l_isotropic=True, theta_l_0=1e-1,
theta_l_L=None, theta_l_U=None,
l_samples=20, l_0=1.0, l_L=None, l_U=None,
nu_0=1.5, nu_L=None, nu_U=None, prior_b=np.inf,
*args, **kwargs):
""" Factory method for creating non-stationary correlation models.
..note:: In addtion to returning an instance of
NonStationaryCorrelation, the specification of the search
space for the hyperparameters theta of the Gaussian process
is returned. This includes the start point of the search
(theta0) as well as the lower and upper boundaries thetaL and
thetaU for the values of theta.
"""
theta0 = [theta0] * (1 if isotropic else dims)
thetaL = [thetaL] * (1 if isotropic else dims)
thetaU = [thetaU] * (1 if isotropic else dims)
theta0 += [theta_l_0] * (1 if l_isotropic else dims)
thetaL += [theta_l_L] * (1 if l_isotropic else dims)
thetaU += [theta_l_U] * (1 if l_isotropic else dims)
theta0 += [l_0] * l_samples
thetaL += [l_L] * l_samples
thetaU += [l_U] * l_samples
if nu_L is not None:
theta0 += [nu_0]
thetaL += [nu_L]
thetaU += [nu_U]
corr = cls(isotropic=isotropic, nu=None if nu_L else nu_0,
l_isotropic=l_isotropic, l_samples=l_samples,
prior_b=prior_b)
return corr, theta0, thetaL, thetaU
class ManifoldCorrelation(object):
""" Non-stationary correlation model based on manifold learning.
This non-stationary correlation model consists internally of two parts:
a mapping from the actual data space onto a manifold and a stationary
correlation model on this manifold. The mapping is realized by a neural
network whose architecture can be specified externally. The parameters of
this network are learned along with the length scales of the Gaussian
process, typically such that the marginal likelihood or the posterior
probability of the GP are maximized. Any common stationary correlation
model can then be used on top of this manifold.
Parameters
----------
base_corr: string or instance of StationaryCorrelation, optional
The top-level, stationary autocorrelation function returning
the autocorrelation between two points M(x) and M(x') on the manifold.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
architecture: sequence of tuples
Defines the structure of the internal neural network architecture
mapping the data from the original data space onto a manifold. Note
that different data dimensions can be processed by different networks
and that the networks can have different number of layers. For
instance, the architecture ((1, 2),(2, 4, 5)) would map a 3-dimensional
input space onto a 7-dimensional manifold. For this, the first input
dimension would be processed by the network (1, 2) with 1 inputs,
2 outputs, and no hidden layer yielding the first two manifold
dimensions. The other two input dimensions would be processed by a
network (2, 4, 5) with 2 inputs, 4 hidden units, and 5 outputs
yielding the remaining five manifold dimensions.
isotropic : bool, default=True
Whether the global length-scales of the GP are isotropic or anisotropic
prior_nn_scale: float, default=inf
The standard deviation of the Gaussian prior distribution on the
network parameters. If set to infinity, the distribution is assumed to
be uniform.
prior_gp_scale: float, default=inf
The scale parameter of the exponential prior distribution on the
length-scales. If set to infinity, the distribution is assumed to be
uniform.
transfer_fct: str, default="tanh"
The transfer function used in the hidden and output units. Supported
are "tanh" and the rectified linear unit ("relu"). Defaults is "tanh"
.. seealso::
"Manifold Gaussian Process for Regression",
Roberto Calandra, Jan Peters, Carl Edward Rasmussen, Marc Peter Deisenroth,
http://arxiv.org/abs/1402.5876
"""
def __init__(self, base_corr, architecture, theta_nn_size,
isotropic=True, prior_nn_scale=np.inf, prior_gp_scale=np.inf,
transfer_fct="tanh"):
self.architecture = architecture
self.n_inputs = sum([subnet[0] for subnet in architecture])
self.n_outputs = sum([subnet[-1] for subnet in architecture])
self.theta_nn_size = theta_nn_size
self.isotropic = isotropic
self.prior_nn_scale = prior_nn_scale
self.prior_gp_scale = prior_gp_scale
self.transfer_fct = transfer_fct
self.theta_gp_size = 1 if self.isotropic else self.n_outputs
self.theta_size = self.theta_gp_size + self.theta_nn_size
self.base_corr = base_corr
if not callable(self.base_corr):
from skgp.correlation_models import CORRELATION_TYPES
if self.base_corr in CORRELATION_TYPES:
self.base_corr = CORRELATION_TYPES[self.base_corr]()
else:
raise ValueError("base_corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(),
self.base_corr))
def fit(self, X, nugget=10. * MACHINE_EPSILON):
""" Fits the correlation model for training data X
Parameters
----------
X : array_like, shape=(n_samples, n_features)
An array of training datapoints at which observations were made,
i.e., where the outputs y are known
nugget : double or ndarray, optional
The Gaussian Process nugget parameter
The nugget is added to the diagonal of the assumed training
covariance; in this way it acts as a Tikhonov regularization in
the problem. In the special case of the squared exponential
correlation function, the nugget mathematically represents the
variance of the input values. Default assumes a nugget close to
machine precision for the sake of robustness
(nugget = 10. * MACHINE_EPSILON).
"""
assert X.shape[1] == self.n_inputs
self.X = X
self.nugget = nugget
self.n_samples = X.shape[0]
return self
def __call__(self, theta, X=None):
""" Compute correlation for given correlation parameter(s) theta.
Parameters
----------
theta : array_like
An array giving the autocorrelation parameter(s).
X : array_like, shape(n_eval, n_features)
An array containing the n_eval query points whose correlation with
the training datapoints shall be computed. If None, autocorrelation
of the training datapoints is computed instead.
Returns
-------
r : array_like, shape=(n_eval, n_samples) if X != None
(n_samples, n_samples) if X == None
An array containing the values of the correlation model.
"""
# Parse theta into its components
theta_gp, theta_nn = self._parse_theta(theta)
# Map training data onto manifold
if np.any(theta_nn == 0):
theta_nn[np.where(theta_nn == 0)] \
+= np.random.random((theta_nn == 0).sum()) * 2e-5 - 1e-5
X_train_nn = self._project_manifold(self.X, theta_nn)
self.base_corr.fit(X_train_nn, nugget=self.nugget)
if X is not None:
X_test_nn = self._project_manifold(X, theta_nn)
return self.base_corr(theta_gp, X_test_nn)
else:
return self.base_corr(theta_gp)
def _project_manifold(self, X, theta_nn):
# Lazila fetch transfer function (to keep object pickable)
if self.transfer_fct == "tanh":
transfer_fct = np.tanh
elif self.transfer_fct == "sin":
transfer_fct = np.sin
elif self.transfer_fct == "relu":
transfer_fct = lambda x: np.maximum(0, x)
elif hasattr(self.transfer_fct, "__call__"):
transfer_fct = self.transfer_fct
y = []
for subnet in self.architecture:
y.append(X[:, :subnet[0]])
for layer in range(len(subnet) - 1):
W = theta_nn[:subnet[layer]*subnet[layer+1]]
W = W.reshape((subnet[layer], subnet[layer+1]))
b = theta_nn[subnet[layer]*subnet[layer+1]:
(subnet[layer]+1)*subnet[layer+1]]
a = y[-1].dot(W) + b
y[-1] = transfer_fct(a)
# chop off weights of this layer
theta_nn = theta_nn[(subnet[layer]+1)*subnet[layer+1]:]
X = X[:, subnet[0]:] # chop off used input dimensions
return np.hstack(y)
def log_prior(self, theta):
""" Returns the (log) prior probability of parameters theta.
TODO
NOTE: The returned quantity is an improper prior as its integral over
the parameter space is not equal to 1.
Parameters
----------
theta : array_like
An array giving the autocorrelation parameter(s).
Returns
-------
log_p : float
The (log) prior probability of parameters theta. An improper
probability.
"""
theta_gp, theta_nn = self._parse_theta(theta)
if self.prior_nn_scale == np.inf:
prior_nn = 0.0
else:
prior_nn = norm.logpdf(theta_nn, scale=self.prior_nn_scale).sum()
if self.prior_gp_scale == np.inf:
prior_gp = 0.0
else:
prior_gp = expon.logpdf(theta_gp, scale=self.prior_gp_scale).sum()
return prior_nn + prior_gp
def _parse_theta(self, theta):
""" Parse parameter vector theta into its components.
Parameters
----------
theta : array_like
An array containing all hyperparameters.
Returns
-------
theta_gp : array_like
An array containing the hyperparameters of the main GP.
theta_nn : array_like
An array containing the hyperparameters of the manifold model.
"""
theta = np.asarray(theta, dtype=np.float)
assert (theta.size == self.theta_size), \
"theta does not have the expected size (expected: %d, " \
"actual size %d). Expected: %d entries for main GP and " \
"%d entries for length-scale GP." \
% (self.theta_size, theta.size, self.theta_gp_size,
self.theta_nn_size)
# Split theta in its components
theta_gp = theta[:self.theta_gp_size]
theta_nn = theta[self.theta_gp_size:]
return theta_gp, np.log10(theta_nn)
@classmethod
def create(cls, base_corr, architecture, isotropic=True,
theta0=1e-1, thetaL=None, thetaU=None,
max_nn_weight=5, prior_nn_scale=np.inf, prior_gp_scale=np.inf,
transfer_fct="tanh", *args, **kwargs):
""" Factory method for creating manifold correlation models.
..note:: In addition to returning an instance of
ManifoldCorrelation, the specification of the search
space for the hyperparameters theta of the Gaussian process
is returned. This includes the start point of the search
(theta0) as well as the lower and upper boundaries thetaL and
thetaU for the values of theta.
"""
assert "prior_b" not in kwargs
n_outputs, theta_nn_size = cls.determine_network_layout(architecture)
theta0 = [theta0] * (1 if isotropic else n_outputs)
thetaL = [thetaL] * (1 if isotropic else n_outputs)
thetaU = [thetaU] * (1 if isotropic else n_outputs)
theta0 += \
list(10**np.random.uniform(-max_nn_weight, max_nn_weight,
theta_nn_size))
thetaL += [10**-max_nn_weight] * theta_nn_size
thetaU += [10**max_nn_weight] * theta_nn_size
corr = cls(base_corr, architecture, theta_nn_size=theta_nn_size,
isotropic=isotropic, prior_nn_scale=prior_nn_scale,
prior_gp_scale=prior_gp_scale, transfer_fct=transfer_fct)
return corr, theta0, thetaL, thetaU
@staticmethod
def determine_network_layout(architecture):
""" Determine number of outputs and params of given architecture."""
n_outputs = 0
n_params = 0
for subnet in architecture:
for layer in range(len(subnet) - 1):
n_params += (subnet[layer] + 1) * subnet[layer+1]
n_outputs += subnet[-1]
return n_outputs, n_params
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse
from neutron.tests.tempest.common import service_client
class IdentityV3ClientJSON(service_client.ServiceClient):
api_version = "v3"
def create_user(self, user_name, password=None, project_id=None,
email=None, domain_id='default', **kwargs):
"""Creates a user."""
en = kwargs.get('enabled', True)
description = kwargs.get('description', None)
default_project_id = kwargs.get('default_project_id')
post_body = {
'project_id': project_id,
'default_project_id': default_project_id,
'description': description,
'domain_id': domain_id,
'email': email,
'enabled': en,
'name': user_name,
'password': password
}
post_body = json.dumps({'user': post_body})
resp, body = self.post('users', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['user'])
def update_user(self, user_id, name, **kwargs):
"""Updates a user."""
body = self.get_user(user_id)
email = kwargs.get('email', body['email'])
en = kwargs.get('enabled', body['enabled'])
project_id = kwargs.get('project_id', body['project_id'])
if 'default_project_id' in body.keys():
default_project_id = kwargs.get('default_project_id',
body['default_project_id'])
else:
default_project_id = kwargs.get('default_project_id')
description = kwargs.get('description', body['description'])
domain_id = kwargs.get('domain_id', body['domain_id'])
post_body = {
'name': name,
'email': email,
'enabled': en,
'project_id': project_id,
'default_project_id': default_project_id,
'id': user_id,
'domain_id': domain_id,
'description': description
}
post_body = json.dumps({'user': post_body})
resp, body = self.patch('users/%s' % user_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['user'])
def update_user_password(self, user_id, password, original_password):
"""Updates a user password."""
update_user = {
'password': password,
'original_password': original_password
}
update_user = json.dumps({'user': update_user})
resp, _ = self.post('users/%s/password' % user_id, update_user)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['projects'])
def get_users(self, params=None):
"""Get the list of users."""
url = 'users'
if params:
url += '?%s' % parse.urlencode(params)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['users'])
def get_user(self, user_id):
"""GET a user."""
resp, body = self.get("users/%s" % user_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['user'])
def delete_user(self, user_id):
"""Deletes a User."""
resp, body = self.delete("users/%s" % user_id)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def create_project(self, name, **kwargs):
"""Creates a project."""
description = kwargs.get('description', None)
en = kwargs.get('enabled', True)
domain_id = kwargs.get('domain_id', 'default')
post_body = {
'description': description,
'domain_id': domain_id,
'enabled': en,
'name': name
}
post_body = json.dumps({'project': post_body})
resp, body = self.post('projects', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['project'])
def list_projects(self, params=None):
url = "projects"
if params:
url += '?%s' % parse.urlencode(params)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['projects'])
def update_project(self, project_id, **kwargs):
body = self.get_project(project_id)
name = kwargs.get('name', body['name'])
desc = kwargs.get('description', body['description'])
en = kwargs.get('enabled', body['enabled'])
domain_id = kwargs.get('domain_id', body['domain_id'])
post_body = {
'id': project_id,
'name': name,
'description': desc,
'enabled': en,
'domain_id': domain_id,
}
post_body = json.dumps({'project': post_body})
resp, body = self.patch('projects/%s' % project_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['project'])
def get_project(self, project_id):
"""GET a Project."""
resp, body = self.get("projects/%s" % project_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['project'])
def delete_project(self, project_id):
"""Delete a project."""
resp, body = self.delete('projects/%s' % str(project_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def create_role(self, name):
"""Create a Role."""
post_body = {
'name': name
}
post_body = json.dumps({'role': post_body})
resp, body = self.post('roles', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['role'])
def get_role(self, role_id):
"""GET a Role."""
resp, body = self.get('roles/%s' % str(role_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['role'])
def list_roles(self):
"""Get the list of Roles."""
resp, body = self.get("roles")
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['roles'])
def update_role(self, name, role_id):
"""Create a Role."""
post_body = {
'name': name
}
post_body = json.dumps({'role': post_body})
resp, body = self.patch('roles/%s' % str(role_id), post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['role'])
def delete_role(self, role_id):
"""Delete a role."""
resp, body = self.delete('roles/%s' % str(role_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def assign_user_role(self, project_id, user_id, role_id):
"""Add roles to a user on a project."""
resp, body = self.put('projects/%s/users/%s/roles/%s' %
(project_id, user_id, role_id), None)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def create_domain(self, name, **kwargs):
"""Creates a domain."""
description = kwargs.get('description', None)
en = kwargs.get('enabled', True)
post_body = {
'description': description,
'enabled': en,
'name': name
}
post_body = json.dumps({'domain': post_body})
resp, body = self.post('domains', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['domain'])
def delete_domain(self, domain_id):
"""Delete a domain."""
resp, body = self.delete('domains/%s' % str(domain_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def list_domains(self):
"""List Domains."""
resp, body = self.get('domains')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['domains'])
def update_domain(self, domain_id, **kwargs):
"""Updates a domain."""
body = self.get_domain(domain_id)
description = kwargs.get('description', body['description'])
en = kwargs.get('enabled', body['enabled'])
name = kwargs.get('name', body['name'])
post_body = {
'description': description,
'enabled': en,
'name': name
}
post_body = json.dumps({'domain': post_body})
resp, body = self.patch('domains/%s' % domain_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['domain'])
def get_domain(self, domain_id):
"""Get Domain details."""
resp, body = self.get('domains/%s' % domain_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['domain'])
def get_token(self, resp_token):
"""Get token details."""
headers = {'X-Subject-Token': resp_token}
resp, body = self.get("auth/tokens", headers=headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['token'])
def delete_token(self, resp_token):
"""Deletes token."""
headers = {'X-Subject-Token': resp_token}
resp, body = self.delete("auth/tokens", headers=headers)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def create_group(self, name, **kwargs):
"""Creates a group."""
description = kwargs.get('description', None)
domain_id = kwargs.get('domain_id', 'default')
project_id = kwargs.get('project_id', None)
post_body = {
'description': description,
'domain_id': domain_id,
'project_id': project_id,
'name': name
}
post_body = json.dumps({'group': post_body})
resp, body = self.post('groups', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['group'])
def get_group(self, group_id):
"""Get group details."""
resp, body = self.get('groups/%s' % group_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['group'])
def list_groups(self):
"""Lists the groups."""
resp, body = self.get('groups')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['groups'])
def update_group(self, group_id, **kwargs):
"""Updates a group."""
body = self.get_group(group_id)
name = kwargs.get('name', body['name'])
description = kwargs.get('description', body['description'])
post_body = {
'name': name,
'description': description
}
post_body = json.dumps({'group': post_body})
resp, body = self.patch('groups/%s' % group_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['group'])
def delete_group(self, group_id):
"""Delete a group."""
resp, body = self.delete('groups/%s' % str(group_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def add_group_user(self, group_id, user_id):
"""Add user into group."""
resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),
None)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def list_group_users(self, group_id):
"""List users in group."""
resp, body = self.get('groups/%s/users' % group_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['users'])
def list_user_groups(self, user_id):
"""Lists groups which a user belongs to."""
resp, body = self.get('users/%s/groups' % user_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['groups'])
def delete_group_user(self, group_id, user_id):
"""Delete user in group."""
resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def assign_user_role_on_project(self, project_id, user_id, role_id):
"""Add roles to a user on a project."""
resp, body = self.put('projects/%s/users/%s/roles/%s' %
(project_id, user_id, role_id), None)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def assign_user_role_on_domain(self, domain_id, user_id, role_id):
"""Add roles to a user on a domain."""
resp, body = self.put('domains/%s/users/%s/roles/%s' %
(domain_id, user_id, role_id), None)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def list_user_roles_on_project(self, project_id, user_id):
"""list roles of a user on a project."""
resp, body = self.get('projects/%s/users/%s/roles' %
(project_id, user_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['roles'])
def list_user_roles_on_domain(self, domain_id, user_id):
"""list roles of a user on a domain."""
resp, body = self.get('domains/%s/users/%s/roles' %
(domain_id, user_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['roles'])
def revoke_role_from_user_on_project(self, project_id, user_id, role_id):
"""Delete role of a user on a project."""
resp, body = self.delete('projects/%s/users/%s/roles/%s' %
(project_id, user_id, role_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def revoke_role_from_user_on_domain(self, domain_id, user_id, role_id):
"""Delete role of a user on a domain."""
resp, body = self.delete('domains/%s/users/%s/roles/%s' %
(domain_id, user_id, role_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def assign_group_role_on_project(self, project_id, group_id, role_id):
"""Add roles to a user on a project."""
resp, body = self.put('projects/%s/groups/%s/roles/%s' %
(project_id, group_id, role_id), None)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def assign_group_role_on_domain(self, domain_id, group_id, role_id):
"""Add roles to a user on a domain."""
resp, body = self.put('domains/%s/groups/%s/roles/%s' %
(domain_id, group_id, role_id), None)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def list_group_roles_on_project(self, project_id, group_id):
"""list roles of a user on a project."""
resp, body = self.get('projects/%s/groups/%s/roles' %
(project_id, group_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['roles'])
def list_group_roles_on_domain(self, domain_id, group_id):
"""list roles of a user on a domain."""
resp, body = self.get('domains/%s/groups/%s/roles' %
(domain_id, group_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['roles'])
def revoke_role_from_group_on_project(self, project_id, group_id, role_id):
"""Delete role of a user on a project."""
resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
(project_id, group_id, role_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def revoke_role_from_group_on_domain(self, domain_id, group_id, role_id):
"""Delete role of a user on a domain."""
resp, body = self.delete('domains/%s/groups/%s/roles/%s' %
(domain_id, group_id, role_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def create_trust(self, trustor_user_id, trustee_user_id, project_id,
role_names, impersonation, expires_at):
"""Creates a trust."""
roles = [{'name': n} for n in role_names]
post_body = {
'trustor_user_id': trustor_user_id,
'trustee_user_id': trustee_user_id,
'project_id': project_id,
'impersonation': impersonation,
'roles': roles,
'expires_at': expires_at
}
post_body = json.dumps({'trust': post_body})
resp, body = self.post('OS-TRUST/trusts', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['trust'])
def delete_trust(self, trust_id):
"""Deletes a trust."""
resp, body = self.delete("OS-TRUST/trusts/%s" % trust_id)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def get_trusts(self, trustor_user_id=None, trustee_user_id=None):
"""GET trusts."""
if trustor_user_id:
resp, body = self.get("OS-TRUST/trusts?trustor_user_id=%s"
% trustor_user_id)
elif trustee_user_id:
resp, body = self.get("OS-TRUST/trusts?trustee_user_id=%s"
% trustee_user_id)
else:
resp, body = self.get("OS-TRUST/trusts")
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['trusts'])
def get_trust(self, trust_id):
"""GET trust."""
resp, body = self.get("OS-TRUST/trusts/%s" % trust_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['trust'])
def get_trust_roles(self, trust_id):
"""GET roles delegated by a trust."""
resp, body = self.get("OS-TRUST/trusts/%s/roles" % trust_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['roles'])
def get_trust_role(self, trust_id, role_id):
"""GET role delegated by a trust."""
resp, body = self.get("OS-TRUST/trusts/%s/roles/%s"
% (trust_id, role_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['role'])
def check_trust_role(self, trust_id, role_id):
"""HEAD Check if role is delegated by a trust."""
resp, body = self.head("OS-TRUST/trusts/%s/roles/%s"
% (trust_id, role_id))
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
|
|
# polling_location/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import PollingLocation, PollingLocationManager
from .controllers import filter_polling_locations_structured_json_for_local_duplicates, \
import_and_save_all_polling_locations_data, polling_locations_import_from_structured_json
from admin_tools.views import redirect_to_sign_in_page
from ballot.models import BallotReturnedListManager
from config.base import get_environment_variable
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.db.models import Q
from django.shortcuts import render
from exception.models import handle_record_found_more_than_one_exception
from voter.models import voter_has_authority
from wevote_functions.functions import convert_state_code_to_state_text, convert_to_float, convert_to_int, \
positive_value_exists, process_request_from_master
import wevote_functions.admin
from django.http import HttpResponse
import json
WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY")
POLLING_LOCATIONS_SYNC_URL = get_environment_variable("POLLING_LOCATIONS_SYNC_URL") # pollingLocationsSyncOut
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
logger = wevote_functions.admin.get_logger(__name__)
# These are states for which we have polling location data
STATE_LIST_IMPORT = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
# 'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
# 'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
# 'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
# 'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
# 'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
# 'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
polling_locations_import_status_string = ""
# This page does not need to be protected.
def polling_locations_sync_out_view(request): # pollingLocationsSyncOut
state = request.GET.get('state', '')
try:
polling_location_list = PollingLocation.objects.using('readonly').all()
if positive_value_exists(state):
polling_location_list = polling_location_list.filter(state__iexact=state)
polling_location_list_dict = polling_location_list.values('we_vote_id', 'city', 'directions_text',
'latitude', 'longitude',
'line1', 'line2', 'location_name',
'polling_hours_text',
'polling_location_id', 'state',
'use_for_bulk_retrieve',
'polling_location_deleted',
'zip_long')
if polling_location_list_dict:
polling_location_list_json = list(polling_location_list_dict)
return HttpResponse(json.dumps(polling_location_list_json), content_type='application/json')
except Exception as e:
pass
json_data = {
'success': False,
'status': 'POLLING_LOCATION_LIST_MISSING'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
@login_required
def polling_locations_import_from_master_server_view(request):
"""
This view reaches out to the master servers configured in WeVoteServer/config/environment_variables.json
:param request:
:return:
"""
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
if WE_VOTE_SERVER_ROOT_URL in POLLING_LOCATIONS_SYNC_URL:
messages.add_message(request, messages.ERROR, "Cannot sync with Master We Vote Server -- "
"this is the Master We Vote Server.")
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
global polling_locations_import_status_string
status = ""
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
# results = polling_locations_import_from_master_server(request, state_code)
import_results, structured_json = process_request_from_master(
request, "Loading Polling Locations from We Vote Master servers",
POLLING_LOCATIONS_SYNC_URL, {
"key": WE_VOTE_API_KEY, # This comes from an environment variable
"state": state_code,
}
)
duplicates_removed = 0
json_retrieved = False
saved = 0
updated = 0
not_processed = 0
if import_results['success']:
status += import_results['status']
json_retrieved = True
polling_locations_import_status_string = "Checking for duplicate Polling locations. "
results = filter_polling_locations_structured_json_for_local_duplicates(structured_json)
filtered_structured_json = results['structured_json']
duplicates_removed = results['duplicates_removed']
polling_locations_import_status_string = "Importing Polling locations."
import_results = polling_locations_import_from_structured_json(filtered_structured_json)
saved = import_results['saved']
updated = import_results['updated']
not_processed = import_results['not_processed']
else:
polling_locations_import_status_string = "Not able to retrieve filtered_structured_json from Master Server. "
status += polling_locations_import_status_string + import_results['status']
if not json_retrieved:
messages.add_message(request, messages.ERROR, status)
else:
messages.add_message(request, messages.INFO, 'Polling Locations import completed. '
'Saved: {saved}, Updated: {updated}, '
'Duplicates skipped: '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=saved,
updated=updated,
duplicates_removed=duplicates_removed,
not_processed=not_processed))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def polling_locations_import_from_master_server_status_view(request):
global polling_locations_import_status_string
if 'polling_locations_import_status_string' not in globals():
polling_locations_import_status_string = ""
json_data = {
'text': polling_locations_import_status_string,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
@login_required
def import_polling_locations_process_view(request):
"""
This view imports the polling location data from xml files from VIP (http://data.votinginfoproject.org)
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
state_code = request.GET.get('state_code', '')
# state_code = 'mo' # State code for Missouri
if not positive_value_exists(state_code):
messages.add_message(request, messages.INFO,
'State code required to run import_polling_locations_process.')
return HttpResponseRedirect(reverse('polling_location:polling_location_list',
args=()) + "?state_code={var}".format(
var=state_code))
results = import_and_save_all_polling_locations_data(state_code.lower())
messages.add_message(request, messages.INFO,
'Polling locations retrieved from file. '
'({saved} added, {updated} updated, {not_processed} not_processed)'.format(
saved=results['saved'],
updated=results['updated'],
not_processed=results['not_processed'],))
return HttpResponseRedirect(reverse('polling_location:polling_location_list',
args=()) + "?state_code={var}".format(
var=state_code))
@login_required
def polling_location_edit_process_view(request):
"""
Process the new or edit polling_location forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
status = ""
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
state_code = request.POST.get('state_code', "")
polling_location_id = convert_to_int(request.POST['polling_location_id'])
location_name = request.POST.get('location_name', "")
line1 = request.POST.get('line1', "")
line2 = request.POST.get('line2', "")
city = request.POST.get('city', "")
zip_long_raw = request.POST.get('zip_long', "")
zip_long = zip_long_raw.strip()
latitude = convert_to_float(request.POST.get('latitude', 0))
longitude = convert_to_float(request.POST.get('longitude', 0))
use_for_bulk_retrieve = request.POST.get('use_for_bulk_retrieve', False)
polling_location_deleted = request.POST.get('polling_location_deleted', False)
# Check to see if this polling_location is already being used anywhere
polling_location_on_stage_found = False
polling_location_on_stage = PollingLocation()
polling_location_manager = PollingLocationManager()
polling_location_we_vote_id = ""
try:
polling_location_query = PollingLocation.objects.filter(id=polling_location_id)
if len(polling_location_query):
polling_location_on_stage = polling_location_query[0]
polling_location_on_stage_found = True
except Exception as e:
pass
try:
if not polling_location_on_stage_found:
# Create new
polling_location_on_stage = PollingLocation.objects.create(
state=state_code,
zip_long=zip_long,
)
polling_location_on_stage.location_name = location_name
polling_location_on_stage.state = state_code
polling_location_on_stage.line1 = line1
polling_location_on_stage.line2 = line2
polling_location_on_stage.city = city
polling_location_on_stage.zip_long = zip_long
polling_location_on_stage.latitude = latitude
polling_location_on_stage.longitude = longitude
polling_location_on_stage.use_for_bulk_retrieve = positive_value_exists(use_for_bulk_retrieve)
polling_location_on_stage.polling_location_deleted = positive_value_exists(polling_location_deleted)
polling_location_on_stage.save()
polling_location_id = polling_location_on_stage.id
polling_location_we_vote_id = polling_location_on_stage.we_vote_id
if not zip_long or not latitude or not longitude:
lat_long_results = polling_location_manager.populate_latitude_and_longitude_for_polling_location(
polling_location_on_stage)
status += lat_long_results['status']
latitude = lat_long_results['latitude']
longitude = lat_long_results['longitude']
if polling_location_on_stage_found:
# Update
messages.add_message(request, messages.INFO, 'Polling location updated. ' + status)
else:
# Create new
messages.add_message(request, messages.INFO, 'Polling location created. ' + status)
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not save polling_location. ' + status)
# Now update ballot returned with lat/long
try:
if latitude and longitude:
ballot_returned_list_manager = BallotReturnedListManager()
results = ballot_returned_list_manager.retrieve_ballot_returned_list(
google_civic_election_id, polling_location_we_vote_id)
if results['ballot_returned_list_found']:
ballot_returned_list = results['ballot_returned_list']
for one_ballot_returned in ballot_returned_list:
one_ballot_returned.latitude = latitude
one_ballot_returned.longitude = longitude
one_ballot_returned.save()
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not update ballot_returned. ' + status)
url_variables = "?google_civic_election_id=" + str(google_civic_election_id) + \
"&state_code=" + str(state_code)
if positive_value_exists(polling_location_we_vote_id):
return HttpResponseRedirect(reverse('polling_location:polling_location_summary_by_we_vote_id',
args=(polling_location_we_vote_id,)) + url_variables)
else:
return HttpResponseRedirect(reverse('polling_location:polling_location_list', args=()) + url_variables)
@login_required
def polling_location_edit_view(request, polling_location_local_id=0, polling_location_we_vote_id=""):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
state_code = request.GET.get('state_code', "")
messages_on_stage = get_messages(request)
polling_location_local_id = convert_to_int(polling_location_local_id)
polling_location_on_stage_found = False
polling_location_on_stage = PollingLocation()
try:
if positive_value_exists(polling_location_local_id):
polling_location_on_stage = PollingLocation.objects.get(id=polling_location_local_id)
polling_location_on_stage_found = True
elif positive_value_exists(polling_location_we_vote_id):
polling_location_on_stage = PollingLocation.objects.get(we_vote_id=polling_location_we_vote_id)
polling_location_on_stage_found = True
except PollingLocation.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except PollingLocation.DoesNotExist:
# This is fine, create new
pass
if polling_location_on_stage_found:
template_values = {
'google_civic_election_id': google_civic_election_id,
'messages_on_stage': messages_on_stage,
'polling_location': polling_location_on_stage,
'polling_location_id': polling_location_on_stage.id,
'state_code': state_code,
}
else:
template_values = {
'google_civic_election_id': google_civic_election_id,
'messages_on_stage': messages_on_stage,
'polling_location_id': 0,
'state_code': state_code,
}
return render(request, 'polling_location/polling_location_edit.html', template_values)
@login_required
def polling_location_list_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
limit = convert_to_int(request.GET.get('limit', 100))
show_bulk_retrieve = request.GET.get('show_bulk_retrieve', 0)
state_code = request.GET.get('state_code', '')
polling_location_search = request.GET.get('polling_location_search', '')
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = polling_location_count_query.filter(polling_location_deleted=False)
polling_location_without_latitude_count = 0
polling_location_query = PollingLocation.objects.all()
polling_location_query = polling_location_query.filter(polling_location_deleted=False)
if positive_value_exists(show_bulk_retrieve):
polling_location_count_query = polling_location_count_query.filter(use_for_bulk_retrieve=True)
polling_location_query = polling_location_query.filter(use_for_bulk_retrieve=True)
if positive_value_exists(state_code):
polling_location_count_query = polling_location_count_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_without_latitude_count_query = PollingLocation.objects.all()
polling_location_without_latitude_count_query = \
polling_location_without_latitude_count_query.filter(state__iexact=state_code)
polling_location_without_latitude_count_query = \
polling_location_without_latitude_count_query.filter(polling_location_deleted=False)
if positive_value_exists(show_bulk_retrieve):
polling_location_without_latitude_count_query = \
polling_location_without_latitude_count_query.filter(use_for_bulk_retrieve=True)
polling_location_without_latitude_count_query = \
polling_location_without_latitude_count_query.filter(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_without_latitude_count = polling_location_without_latitude_count_query.count()
if positive_value_exists(polling_location_search):
search_words = polling_location_search.split()
for one_word in search_words:
filters = []
new_filter = Q(we_vote_id__icontains=one_word)
filters.append(new_filter)
new_filter = Q(location_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(directions_text__icontains=one_word)
filters.append(new_filter)
new_filter = Q(city__icontains=one_word)
filters.append(new_filter)
new_filter = Q(zip_long__icontains=one_word)
filters.append(new_filter)
new_filter = Q(line1__icontains=one_word)
filters.append(new_filter)
new_filter = Q(line2__icontains=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
polling_location_count_query = polling_location_count_query.filter(final_filters)
polling_location_query = polling_location_query.filter(final_filters)
polling_location_count = polling_location_count_query.count()
info_message = '{polling_location_count} polling locations found.'.format(
polling_location_count=polling_location_count)
if positive_value_exists(polling_location_without_latitude_count):
info_message += ' {polling_location_without_latitude_count} polling locations without lat/long.'.format(
polling_location_without_latitude_count=polling_location_without_latitude_count)
messages.add_message(request, messages.INFO, info_message)
polling_location_list = polling_location_query.order_by('location_name')[:limit]
state_list = STATE_LIST_IMPORT
sorted_state_list = sorted(state_list.items())
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'google_civic_election_id': google_civic_election_id,
'polling_location_list': polling_location_list,
'polling_location_count': polling_location_count,
'polling_location_search': polling_location_search,
'show_bulk_retrieve': show_bulk_retrieve,
'state_code': state_code,
'state_name': convert_state_code_to_state_text(state_code),
'state_list': sorted_state_list,
}
return render(request, 'polling_location/polling_location_list.html', template_values)
@login_required
def polling_locations_add_latitude_and_longitude_view(request):
"""
Find polling location entries that don't have latitude/longitude (up to a limit), and update them
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
status = ""
limit = request.GET.get('limit', 1000)
state_code = request.GET.get('state_code', "")
refresh_all = request.GET.get('refresh_all', "")
google_civic_election_id = request.GET.get('google_civic_election_id', "")
if not positive_value_exists(state_code):
messages.add_message(request, messages.ERROR, 'State code required.')
return HttpResponseRedirect(reverse('polling_location:polling_location_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
polling_location_manager = PollingLocationManager()
polling_location_we_vote_id = ""
polling_location_list = []
polling_locations_saved = 0
polling_locations_not_saved = 0
try:
# Find all polling locations with an empty latitude (with limit)
polling_location_query = PollingLocation.objects.all()
if positive_value_exists(refresh_all):
# Do not restrict to entries without lat/long
pass
else:
polling_location_query = polling_location_query.filter(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.order_by('location_name')[:limit]
polling_location_list = list(polling_location_query)
except Exception as e:
messages.add_message(request, messages.ERROR, 'No polling locations found that need lat/long: ' + str(e))
for polling_location_on_stage in polling_location_list:
try:
lat_long_results = polling_location_manager.populate_latitude_and_longitude_for_polling_location(
polling_location_on_stage)
status += lat_long_results['status']
if lat_long_results['success']:
polling_locations_saved += 1
else:
polling_locations_not_saved += 1
except Exception as e:
polling_locations_not_saved += 1
messages.add_message(request, messages.INFO, 'Polling locations saved: ' + str(polling_locations_saved) +
", not saved: " + str(polling_locations_not_saved))
url_variables = "?google_civic_election_id=" + str(google_civic_election_id) + \
"&state_code=" + str(state_code)
if positive_value_exists(polling_location_we_vote_id):
return HttpResponseRedirect(reverse('polling_location:polling_location_summary_by_we_vote_id',
args=(polling_location_we_vote_id,)) + url_variables)
else:
return HttpResponseRedirect(reverse('polling_location:polling_location_list', args=()) + url_variables)
@login_required
def polling_location_summary_view(request, polling_location_local_id):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
messages_on_stage = get_messages(request)
polling_location_local_id = convert_to_int(polling_location_local_id)
polling_location_on_stage_found = False
polling_location_on_stage = PollingLocation()
try:
polling_location_on_stage = PollingLocation.objects.get(id=polling_location_local_id)
polling_location_on_stage_found = True
except PollingLocation.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except PollingLocation.DoesNotExist:
# This is fine, create new
pass
template_values = {
'google_civic_election_id': google_civic_election_id,
'messages_on_stage': messages_on_stage,
'polling_location': polling_location_on_stage,
}
return render(request, 'polling_location/polling_location_summary.html', template_values)
@login_required
def polling_location_summary_by_we_vote_id_view(request, polling_location_we_vote_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
messages_on_stage = get_messages(request)
polling_location_on_stage_found = False
polling_location_on_stage = PollingLocation()
try:
polling_location_on_stage = PollingLocation.objects.get(we_vote_id=polling_location_we_vote_id)
polling_location_on_stage_found = True
except PollingLocation.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except PollingLocation.DoesNotExist:
# This is fine, create new
pass
template_values = {
'google_civic_election_id': google_civic_election_id,
'messages_on_stage': messages_on_stage,
'polling_location': polling_location_on_stage,
}
return render(request, 'polling_location/polling_location_summary.html', template_values)
|
|
"""
To run this test, type this in command line <kolibri manage test -- kolibri.content>
"""
import datetime
import os
import shutil
import tempfile
from django.test import TestCase
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db import connections
from django.test.utils import override_settings
from kolibri.content import models as content
from django.conf import settings
from ..constants import content_kinds
from ..content_db_router import set_active_content_database, using_content_database
from ..errors import ContentModelUsedOutsideDBContext
from rest_framework.test import APITestCase
from kolibri.auth.models import DeviceOwner, Facility, FacilityUser
from kolibri.logger.models import ContentSummaryLog
CONTENT_STORAGE_DIR_TEMP = tempfile.mkdtemp()
CONTENT_DATABASE_DIR_TEMP = tempfile.mkdtemp()
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
CONTENT_DATABASE_DIR=CONTENT_DATABASE_DIR_TEMP,
)
class ContentNodeTestCase(TestCase):
"""
Testcase for content metadata methods
"""
fixtures = ['content_test.json']
multi_db = True
the_channel_id = 'content_test'
connections.databases[the_channel_id] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
# set the active content database for the duration of the test
set_active_content_database(self.the_channel_id)
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
# Create files in the temporary directory
self.temp_f_1 = open(os.path.join(self.test_dir, 'test_1.pdf'), 'wb')
self.temp_f_2 = open(os.path.join(self.test_dir, 'test_2.mp4'), 'wb')
# Write something to it
self.temp_f_1.write(('The owls are not what they seem').encode('utf-8'))
self.temp_f_2.write(('The owl are not what they seem').encode('utf-8'))
# Reopen the file and check if what we read back is the same
self.temp_f_1 = open(os.path.join(self.test_dir, 'test_1.pdf'))
self.temp_f_2 = open(os.path.join(self.test_dir, 'test_2.mp4'))
self.assertEqual(self.temp_f_1.read(), 'The owls are not what they seem')
self.assertEqual(self.temp_f_2.read(), 'The owl are not what they seem')
"""Tests for content API methods"""
def test_get_prerequisites_for(self):
"""
test the directional characteristic of prerequisite relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
root = content.ContentNode.objects.get(title="root")
# if root is the prerequisite of c1
expected_output = content.ContentNode.objects.filter(title__in=["root"])
actual_output = content.ContentNode.objects.filter(prerequisite_for=c1)
self.assertEqual(set(expected_output), set(actual_output))
# then c1 should not be the prerequisite of root
unexpected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(prerequisite_for=root)
self.assertNotEqual(set(actual_output), set(unexpected_output))
def test_get_has_prerequisites(self):
"""
test the directional characteristic of prerequisite relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
root = content.ContentNode.objects.get(title="root")
# if root is the prerequisite of c1
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(has_prerequisite=root)
self.assertEqual(set(expected_output), set(actual_output))
# then c1 should not be the prerequisite of root
unexpected_output = content.ContentNode.objects.filter(title__in=["root"])
actual_output = content.ContentNode.objects.filter(has_prerequisite=c1)
self.assertNotEqual(set(actual_output), set(unexpected_output))
def test_get_all_related(self):
"""
test the nondirectional characteristic of related relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
c2 = content.ContentNode.objects.get(title="c2")
# if c1 is related to c2
expected_output = content.ContentNode.objects.filter(title__in=["c2"])
actual_output = content.ContentNode.objects.filter(related=c1)
self.assertEqual(set(expected_output), set(actual_output))
# then c2 should be related to c1
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(related=c2)
self.assertEqual(set(expected_output), set(actual_output))
def test_descendants_of_kind(self):
p = content.ContentNode.objects.get(title="root")
expected_output = content.ContentNode.objects.filter(title__in=["c2"])
actual_output = p.get_descendants(include_self=False).filter(kind=content_kinds.TOPIC)
self.assertEqual(set(expected_output), set(actual_output))
def test_get_top_level_topics(self):
p = content.ContentNode.objects.get(title="root")
expected_output = content.ContentNode.objects.filter(parent=p, kind=content_kinds.TOPIC)
actual_output = content.ContentNode.objects.get(parent__isnull=True).get_children().filter(kind=content_kinds.TOPIC)
self.assertEqual(set(expected_output), set(actual_output))
def test_all_str(self):
# test for File __str__
p = content.File.objects.get(id="725257a0570044acbd59f8cf6a68b2bf")
self.assertEqual(str(p), '.mp4')
# test for ContentTag __str__
p = content.ContentTag.objects.get(tag_name="tag_2")
self.assertEqual(str(p), 'tag_2')
# test for Language __str__
p = content.Language.objects.get(lang_code="en")
self.assertEqual(str(p), 'en')
# test for ChannelMetadata __str__
p = content.ChannelMetadata.objects.get(name="testing")
self.assertEqual(str(p), 'testing')
def tearDown(self):
"""
clean up files/folders created during the test
"""
# set the active content database to None now that the test is over
set_active_content_database(None)
try:
shutil.rmtree(settings.CONTENT_COPY_DIR)
shutil.rmtree(self.test_dir)
except:
pass
super(ContentNodeTestCase, self).tearDown()
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
CONTENT_DATABASE_DIR=CONTENT_DATABASE_DIR_TEMP,
)
class DatabaseRoutingTests(TestCase):
multi_db = True
the_channel_id = 'content_test'
connections.databases[the_channel_id] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
def test_accessing_node_without_active_db_throws_exception(self):
set_active_content_database(None)
with self.assertRaises(ContentModelUsedOutsideDBContext):
list(content.ContentNode.objects.all())
def test_accessing_data_within_context_manager_works(self):
with using_content_database(self.the_channel_id):
list(content.ContentNode.objects.all())
def test_accessing_data_within_decorated_function_works(self):
@using_content_database(self.the_channel_id)
def my_func():
return list(content.ContentNode.objects.all())
my_func()
def test_accessing_nonexistent_db_raises_error(self):
with self.assertRaises(KeyError):
with using_content_database("nonexistent_db"):
list(content.ContentNode.objects.all())
def test_database_on_disk_works_too(self):
the_other_channel_id = 'content_test_2'
filename = os.path.join(settings.CONTENT_DATABASE_DIR, the_other_channel_id + '.sqlite3')
connections.databases[the_other_channel_id] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': filename,
}
call_command('migrate', database=the_other_channel_id)
del connections.databases[the_other_channel_id]
with using_content_database(the_other_channel_id):
list(content.ContentNode.objects.all())
def test_empty_database_on_disk_throws_error(self):
yet_another_channel_id = 'content_test_3'
filename = os.path.join(settings.CONTENT_DATABASE_DIR, yet_another_channel_id + '.sqlite3')
open(filename, 'a').close() # touch the file to create an empty DB
with self.assertRaises(KeyError):
with using_content_database(yet_another_channel_id):
list(content.ContentNode.objects.all())
del connections.databases[yet_another_channel_id]
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
CONTENT_DATABASE_DIR=CONTENT_DATABASE_DIR_TEMP,
)
class ContentNodeAPITestCase(APITestCase):
"""
Testcase for content API methods
"""
fixtures = ['content_test.json']
multi_db = True
the_channel_id = '15137d33c49f489ebe08893bfa6b5414'
connections.databases[the_channel_id] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
# set the active content database for the duration of the test
set_active_content_database(self.the_channel_id)
def _reverse_channel_url(self, pattern_name, extra_kwargs={}):
"""Helper method to reverse a URL using the current channel ID"""
kwargs = {"channel_id": self.the_channel_id}
kwargs.update(extra_kwargs)
return reverse(pattern_name, kwargs=kwargs)
def test_prerequisite_for_filter(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"prerequisite_for": c1_id})
self.assertEqual(response.data[0]['title'], 'root')
def test_has_prerequisite_filter(self):
root_id = content.ContentNode.objects.get(title="root").id
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"has_prerequisite": root_id})
self.assertEqual(response.data[0]['title'], 'c1')
def test_related_filter(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"related": c1_id})
self.assertEqual(response.data[0]['title'], 'c2')
def test_contentnode_list(self):
response = self.client.get(self._reverse_channel_url("contentnode-list"))
self.assertEqual(len(response.data), 6)
def test_contentnode_retrieve(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': c1_id}))
self.assertEqual(response.data['pk'], c1_id.__str__())
def test_contentnode_field_filtering(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': c1_id}), data={"fields": "title,description"})
self.assertEqual(response.data['title'], "c1")
self.assertEqual(response.data['description'], "balbla2")
self.assertTrue("pk" not in response.data)
def test_contentnode_recommendations(self):
root_id = content.ContentNode.objects.get(title="root").id
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"recommendations_for": root_id})
self.assertEqual(len(response.data), 4)
def test_channelmetadata_list(self):
data = content.ChannelMetadata.objects.values()[0]
content.ChannelMetadataCache.objects.create(**data)
response = self.client.get(reverse("channel-list", kwargs={}))
self.assertEqual(response.data[0]['name'], 'testing')
def test_channelmetadata_retrieve(self):
data = content.ChannelMetadata.objects.values()[0]
content.ChannelMetadataCache.objects.create(**data)
response = self.client.get(reverse("channel-detail", kwargs={'pk': data["id"]}))
self.assertEqual(response.data['name'], 'testing')
def test_channelmetadata_recommendations(self):
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"recommendations": ""})
self.assertEqual(len(response.data), 4)
def test_file_list(self):
response = self.client.get(self._reverse_channel_url("file-list"))
self.assertEqual(len(response.data), 5)
def test_file_retrieve(self):
response = self.client.get(self._reverse_channel_url("file-detail", {'pk': "9f9438fe6b0d42dd8e913d7d04cfb2b1"}))
self.assertEqual(response.data['preset'], 'high_res_video')
def test_contentnode_progress(self):
# set up data for testing progress_fraction field on content node endpoint
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="learner", facility=facility)
user.set_password("pass")
user.save()
root = content.ContentNode.objects.get(title="root")
c1 = content.ContentNode.objects.get(title="c1")
c2 = content.ContentNode.objects.get(title="c2")
c2c1 = content.ContentNode.objects.get(title="c2c1")
c2c3 = content.ContentNode.objects.get(title="c2c3")
for node, progress in [(c2c1, 0.7), (c2c3, 0.5)]:
ContentSummaryLog.objects.create(
user=user,
content_id=node.content_id,
progress=progress,
channel_id=self.the_channel_id,
start_timestamp=datetime.datetime.now()
)
def assert_progress(node, progress):
response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': node.id}))
self.assertEqual(response.data["progress_fraction"], progress)
# check that there is no progress when not logged in
assert_progress(root, 0)
assert_progress(c1, 0)
assert_progress(c2, 0)
assert_progress(c2c1, 0)
# check that progress is calculated appropriately when user is logged in
self.client.login(username="learner", password="pass", facility=facility)
assert_progress(root, 0.3)
assert_progress(c1, 0)
assert_progress(c2, 0.4)
assert_progress(c2c1, 0.7)
def tearDown(self):
"""
clean up files/folders created during the test
"""
# set the active content database to None now that the test is over
set_active_content_database(None)
super(ContentNodeAPITestCase, self).tearDown()
|
|
# Copyright 2022 The CLU Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function for creating and logging TF/JAX variable overviews."""
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
from absl import logging
import dataclasses
import flax
import numpy as np
import tensorflow as tf
# TODO(b/200953513): Migrate away from logging imports (on module level)
# to logging the actual usage. See b/200953513.
ModuleOrVariables = Union[tf.Module, List[tf.Variable]]
ParamsContainer = Union[tf.Module, Dict[str, np.ndarray],
Mapping[str, Mapping[str, Any]]]
@dataclasses.dataclass
class ParamRow:
name: str
shape: Tuple[int]
size: int
@dataclasses.dataclass
class ParamRowWithStats(ParamRow):
mean: float
std: float
def flatten_dict(input_dict: Dict[str, Any],
*,
prefix: str = "",
delimiter: str = "/") -> Dict[str, Any]:
"""Flattens the keys of a nested dictionary."""
output_dict = {}
for key, value in input_dict.items():
nested_key = f"{prefix}{delimiter}{key}" if prefix else key
if isinstance(value, (dict, flax.core.FrozenDict)):
output_dict.update(
flatten_dict(value, prefix=nested_key, delimiter=delimiter))
else:
output_dict[nested_key] = value
return output_dict
def count_parameters(params: ParamsContainer) -> int:
"""Returns the count of variables for the module or parameter dictionary."""
if isinstance(params, tf.Module):
return sum(np.prod(v.shape) for v in params.trainable_variables) # pytype: disable=attribute-error
params = flatten_dict(params)
return sum(np.prod(v.shape) for v in params.values())
def get_params(module: tf.Module) -> Tuple[List[str], List[np.ndarray]]:
"""Returns the trainable variables of a module as flattened dictionary."""
assert isinstance(module, tf.Module), module
variables = sorted(module.trainable_variables, key=lambda v: v.name)
return [v.name for v in variables], [v.numpy() for v in variables]
def get_parameter_rows(
params: ParamsContainer,
*,
include_stats: bool = False,
) -> List[Union[ParamRow, ParamRowWithStats]]:
"""Returns information about parameters as a list of dictionaries.
Args:
params: Dictionary with parameters as NumPy arrays. The dictionary can be
nested. Alternatively a `tf.Module` can be provided, in which case the
`trainable_variables` of the module will be used.
include_stats: If True add columns with mean and std for each variable. Note
that this can be considerably more compute intensive and cause a lot of
memory to be transferred to the host (with `tf.Module`).
Returns:
A list of `ParamRow`, or `ParamRowWithStats`, depending on the passed value
of `include_stats`.
"""
if isinstance(params, tf.Module):
names, values = get_params(params)
else:
assert isinstance(params, (dict, flax.core.FrozenDict))
if params:
params = flatten_dict(params)
names, values = map(list, tuple(zip(*sorted(params.items()))))
else:
names, values = [], []
def make_row(name, value):
if include_stats:
return ParamRowWithStats(
name=name,
shape=value.shape,
size=int(np.prod(value.shape)),
mean=float(value.mean()),
std=float(value.std()),
)
else:
return ParamRow(
name=name, shape=value.shape, size=int(np.prod(value.shape)))
return [make_row(name, value) for name, value in zip(names, values)]
def _default_table_value_formatter(value):
"""Formats ints with "," between thousands and floats to 3 digits."""
if isinstance(value, bool):
return str(value)
elif isinstance(value, int):
return "{:,}".format(value)
elif isinstance(value, float):
return "{:.3}".format(value)
else:
return str(value)
def make_table(
rows: List[Any],
*,
column_names: Optional[Sequence[str]] = None,
value_formatter: Callable[[Any], str] = _default_table_value_formatter,
max_lines: Optional[int] = None,
) -> str:
"""Renders a list of rows to a table.
Args:
rows: List of dataclass instances of a single type (e.g. `ParamRow`).
column_names: List of columns that that should be included in the output. If
not provided, then the columns are taken from keys of the first row.
value_formatter: Callable used to format cell values.
max_lines: Don't render a table longer than this.
Returns:
A string representation of the table in the form:
+---------+---------+
| Col1 | Col2 |
+---------+---------+
| value11 | value12 |
| value21 | value22 |
+---------+---------+
"""
if any(not dataclasses.is_dataclass(row) for row in rows):
raise ValueError("Expected `rows` to be list of dataclasses")
if len(set(map(type, rows))) > 1:
raise ValueError("Expected elements of `rows` be of same type.")
class Column:
def __init__(self, name, values):
self.name = name.capitalize()
self.values = values
self.width = max(len(v) for v in values + [name])
if column_names is None:
if not rows:
return "(empty table)"
column_names = [field.name for field in dataclasses.fields(rows[0])]
columns = [
Column(name, [value_formatter(getattr(row, name))
for row in rows])
for name in column_names
]
var_line_format = "|" + "".join(f" {{: <{c.width}s}} |" for c in columns)
sep_line_format = var_line_format.replace(" ", "-").replace("|", "+")
header = var_line_format.replace(">", "<").format(*[c.name for c in columns])
separator = sep_line_format.format(*["" for c in columns])
lines = [separator, header, separator]
for i in range(len(rows)):
if max_lines and len(lines) >= max_lines - 3:
lines.append("[...]")
break
lines.append(var_line_format.format(*[c.values[i] for c in columns]))
lines.append(separator)
return "\n".join(lines)
def get_parameter_overview(params: ParamsContainer,
*,
include_stats: bool = True,
max_lines: Optional[int] = None) -> str:
"""Returns a string with variables names, their shapes, count.
Args:
params: Dictionary with parameters as NumPy arrays. The dictionary can be
nested. Alternatively a `tf.Module` can be provided, in which case the
`trainable_variables` of the module will be used.
include_stats: If True, add columns with mean and std for each variable.
max_lines: If not `None`, the maximum number of variables to include.
Returns:
A string with a table like in the example.
+----------------+---------------+------------+
| Name | Shape | Size |
+----------------+---------------+------------+
| FC_1/weights:0 | (63612, 1024) | 65,138,688 |
| FC_1/biases:0 | (1024,) | 1,024 |
| FC_2/weights:0 | (1024, 32) | 32,768 |
| FC_2/biases:0 | (32,) | 32 |
+----------------+---------------+------------+
Total: 65,172,512
"""
rows = get_parameter_rows(params, include_stats=include_stats)
total_weights = count_parameters(params)
RowType = ParamRowWithStats if include_stats else ParamRow
# Pass in `column_names` to enable rendering empty tables.
column_names = [field.name for field in dataclasses.fields(RowType)]
table = make_table(rows, max_lines=max_lines, column_names=column_names)
return table + f"\nTotal: {total_weights:,}"
def log_parameter_overview(params: ParamsContainer,
*,
include_stats: bool = True,
max_lines: Optional[int] = None,
msg: Optional[str] = None):
"""Writes a table with variables name and shapes to INFO log.
See get_parameter_overview for details.
Args:
params: Dictionary with parameters as NumPy arrays. The dictionary can be
nested. Alternatively a `tf.Module` can be provided, in which case the
`trainable_variables` of the module will be used.
include_stats: If True, add columns with mean and std for each variable.
max_lines: If not `None`, the maximum number of variables to include.
msg: Message to be logged before the overview.
"""
table = get_parameter_overview(params, include_stats=include_stats,
max_lines=max_lines)
lines = [msg] if msg else []
lines += table.split("\n")
# The table can be too large to fit into one log entry.
for i in range(0, len(lines), 80):
logging.info("\n%s", "\n".join(lines[i:i + 80]))
|
|
# -*- coding: utf-8 -*-
import six
import unittest
import requests
import api_toolkit
from .helpers import use_cassette as use_pw_cassette
from passaporte_web.main import PassaporteWeb, ServiceAccount, Account, Identity
from passaporte_web.tests.helpers import TEST_USER, APP_CREDENTIALS
from passaporte_web.tests.service_account import CanGetServiceAccount
__all__ = ['PassaporteWebTest', 'UsersTest', 'AccountsTest', 'ApplicationsTest']
class PassaporteWebTest(unittest.TestCase):
def setUp(self):
with use_pw_cassette('application/collections_options'):
self.app = PassaporteWeb(**APP_CREDENTIALS)
def test_instance_has_accounts_users_and_applications(self):
self.assertTrue(hasattr(self.app, 'accounts'))
self.assertTrue(isinstance(self.app.accounts, api_toolkit.Collection))
self.assertTrue(hasattr(self.app, 'users'))
self.assertTrue(isinstance(self.app.users, api_toolkit.Collection))
self.assertTrue(hasattr(self.app, 'applications'))
self.assertTrue(isinstance(self.app.applications, api_toolkit.Collection))
class UsersTest(unittest.TestCase):
def setUp(self):
with use_pw_cassette('application/collections_options'):
self.app = PassaporteWeb(**APP_CREDENTIALS)
def test_application_users_are_not_iterable(self):
app_users = self.app.users.all()
self.assertRaises(ValueError, six.next, app_users)
def test_application_users_can_be_created(self):
user_data = {
'first_name': 'Myfc ID',
'last_name': 'Clients',
'email': TEST_USER['email'],
'password': TEST_USER['password'],
'password2': TEST_USER['password'],
'tos': True,
}
with use_pw_cassette('user/registration_success'):
user = self.app.users.create(**user_data)
self.assertTrue(isinstance(user, Identity))
def test_application_users_must_be_created_with_correct_data(self):
user_data = {
'first_name': 'Myfc ID',
'last_name': 'Clients',
'email': TEST_USER['email'],
'password': TEST_USER['password'],
'tos': True,
}
with use_pw_cassette('user/registration_failure_one_password'):
self.assertRaises(requests.HTTPError, self.app.users.create ,**user_data)
def test_application_must_have_permissions_to_create_users(self):
user_data = {
'first_name': 'Myfc ID',
'last_name': 'Clients',
'email': TEST_USER['email'],
'password': TEST_USER['password'],
'password2': TEST_USER['password'],
'tos': True,
}
with use_pw_cassette('user/registration_failure_app_without_permissions'):
self.assertRaises(requests.HTTPError, self.app.users.create ,**user_data)
def test_user_email_must_be_unique(self):
user_data = {
'first_name': 'Myfc ID',
'last_name': 'Clients',
'email': TEST_USER['email'],
'password': TEST_USER['password'],
'password2': TEST_USER['password'],
'tos': True,
}
with use_pw_cassette('user/registration_failure_duplicated_email'):
self.assertRaises(requests.HTTPError, self.app.users.create ,**user_data)
def test_user_cpf_must_be_unique(self):
user_data = {
'first_name': 'Myfc ID',
'last_name': 'Clients',
'email': TEST_USER['email'],
'password': TEST_USER['password'],
'password2': TEST_USER['password'],
'cpf': '11111111111',
'tos': True,
}
with use_pw_cassette('user/registration_failure_duplicated_cpf'):
self.assertRaises(requests.HTTPError, self.app.users.create ,**user_data)
def test_user_cpf_must_be_valid(self):
user_data = {
'first_name': 'Myfc ID',
'last_name': 'Clients',
'email': TEST_USER['email'],
'password': TEST_USER['password'],
'password2': TEST_USER['password'],
'cpf': '1111111111111111111',
'tos': True,
}
with use_pw_cassette('user/registration_failure_invalid_cpf'):
self.assertRaises(requests.HTTPError, self.app.users.create ,**user_data)
def test_user_must_agree_with_tos(self):
user_data = {
'first_name': 'Myfc ID',
'last_name': 'Clients',
'email': TEST_USER['email'],
'password': TEST_USER['password'],
'password2': TEST_USER['password'],
}
with use_pw_cassette('user/registration_failure_missing_tos'):
self.assertRaises(requests.HTTPError, self.app.users.create ,**user_data)
def test_user_passwords_must_match(self):
user_data = {
'first_name': 'Myfc ID',
'last_name': 'Clients',
'email': TEST_USER['email'],
'password': TEST_USER['password'],
'password2': 'will not match',
'tos': True,
}
with use_pw_cassette('user/registration_failure_password_mismatch'):
self.assertRaises(requests.HTTPError, self.app.users.create ,**user_data)
def test_get_user_by_email(self):
with use_pw_cassette('user/get_by_email'):
user = self.app.users.get(email=TEST_USER['email'])
self.assertTrue(isinstance(user, Identity))
self.assertEqual(user.email, TEST_USER['email'])
self.assertEqual(len(list(user.accounts.from_seed())), 5)
def test_get_user_by_email_fails_when_email_is_not_registered(self):
with use_pw_cassette('user/get_by_unknown_email'):
self.assertRaises(requests.HTTPError, self.app.users.get, email='notregistered@test.example')
def test_get_user_by_uuid(self):
with use_pw_cassette('user/get_by_uuid'):
user = self.app.users.get(uuid=TEST_USER['uuid'])
self.assertTrue(isinstance(user, Identity))
self.assertEqual(user.email, TEST_USER['email'])
self.assertEqual(len(list(user.accounts.from_seed())), 4)
def test_get_user_including_expired_accounts(self):
with use_pw_cassette('user/get_user_including_expired_accounts'):
user = self.app.users.get(email=TEST_USER['email'], include_expired_accounts=True)
self.assertTrue(isinstance(user, Identity))
self.assertEqual(user.email, TEST_USER['email'])
self.assertEqual(len(list(user.accounts.from_seed())), 8)
def test_get_user_including_accounts_from_other_services(self):
with use_pw_cassette('user/get_user_including_other_services'):
user = self.app.users.get(email=TEST_USER['email'], include_other_services=True)
self.assertTrue(isinstance(user, Identity))
self.assertEqual(user.email, TEST_USER['email'])
self.assertEqual(len(list(user.accounts.from_seed())), 6)
service_accounts = [item for item in user.accounts.from_seed() if isinstance(item, ServiceAccount)]
external_accounts = [item for item in user.accounts.from_seed() if isinstance(item, Account)]
self.assertEqual(len(external_accounts), 1)
self.assertEqual(len(service_accounts), 5)
def test_get_user_by_uuid_fails_when_uuid_is_not_registered(self):
with use_pw_cassette('user/get_by_unknown_uuid'):
self.assertRaises(requests.HTTPError, self.app.users.get, uuid='001')
def test_application_must_have_permission_to_get_user(self):
with use_pw_cassette('user/get_without_permission'):
self.assertRaises(requests.HTTPError, self.app.users.get, uuid=TEST_USER['uuid'])
def test_get_by_unknown_parameter_raises_TypeError(self):
self.assertRaises(TypeError, self.app.users.get, first_name='Myfc ID')
def test_authenticate_user_with_email_and_password(self):
with use_pw_cassette('user/authenticate_with_email_and_password'):
user = self.app.users.authenticate(email=TEST_USER['email'], password=TEST_USER['password'])
self.assertTrue(isinstance(user, Identity))
self.assertEqual(user.uuid, TEST_USER['uuid'])
def test_authenticate_user_with_email_and_wrong_password(self):
with use_pw_cassette('user/authenticate_with_email_and_wrong_password'):
self.assertRaises(
requests.HTTPError,
self.app.users.authenticate, email=TEST_USER['email'], password='wrong password'
)
def test_authenticate_user_with_id_token(self):
with use_pw_cassette('user/authenticate_with_id_token'):
user = self.app.users.authenticate(id_token=TEST_USER['id_token'])
self.assertTrue(isinstance(user, Identity))
self.assertEqual(user.uuid, TEST_USER['uuid'])
def test_authenticate_user_with_invalid_id_token(self):
with use_pw_cassette('user/authenticate_with_invalid_id_token'):
self.assertRaises(requests.HTTPError, self.app.users.authenticate, id_token='invalid_id_token')
def test_user_credentials_are_replaced_by_app_credentials_in_session(self):
with use_pw_cassette('user/authenticate_with_email_and_password'):
user = self.app.users.authenticate(email=TEST_USER['email'], password=TEST_USER['password'])
self.assertEqual(user._session.auth, (
APP_CREDENTIALS['token'], APP_CREDENTIALS['secret']
))
class CanLoadServiceAccounts(unittest.TestCase):
def test_load_accounts(self):
# Expired accounts are also loaded
with use_pw_cassette('application/account_list'):
app_accounts = list(self.app.accounts.all())
self.assertEqual(len(app_accounts), 26)
for item in app_accounts:
self.assertTrue(isinstance(item, ServiceAccount))
def test_load_for_user_without_accounts(self):
with use_pw_cassette('application/empty_account_list'):
accounts = list(self.app.accounts.all())
self.assertEqual(len(accounts), 0)
def test_load_using_invalid_credentials(self):
with use_pw_cassette('accounts/using_invalid_credentials'):
self.app.accounts._session.auth = ('invalid', 'credentials')
self.assertRaises(requests.HTTPError, six.next, self.app.accounts.all())
def test_load_for_application_without_permissions(self):
with use_pw_cassette('accounts/application_without_permissions'):
self.assertRaises(requests.HTTPError, six.next, self.app.accounts.all())
class AccountsTest(CanGetServiceAccount, CanLoadServiceAccounts):
def setUp(self):
with use_pw_cassette('application/collections_options'):
self.app = PassaporteWeb(**APP_CREDENTIALS)
self.collection = self.app.accounts
def test_application_accounts_are_a_collection(self):
self.assertTrue(isinstance(self.app.accounts, api_toolkit.Collection))
def test_application_accounts_cannot_be_deleted(self):
with use_pw_cassette('application/account_list'):
first_account = six.next(self.app.accounts.all())
first_account.load_options()
self.assertRaises(ValueError, first_account.delete)
def test_application_accounts_can_be_updated(self):
with use_pw_cassette('application/account_list'):
first_account = six.next(self.app.accounts.all())
first_account.load_options()
with use_pw_cassette('accounts/update_with_same_data'):
updated_account = first_account.save()
def test_application_accounts_cannot_be_created(self):
with use_pw_cassette('application/account_list'):
self.assertRaises(
ValueError, self.app.accounts.create,
name='Test Account',
plan_slug='unittest',
expiration=None,
)
class ApplicationsTest(unittest.TestCase):
def setUp(self):
with use_pw_cassette('application/collections_options'):
self.app = PassaporteWeb(**APP_CREDENTIALS)
self.collection = self.app.applications
def test_applications_are_a_collection(self):
self.assertTrue(isinstance(self.app.applications, api_toolkit.Collection))
def test_applications_cannot_be_deleted(self):
with use_pw_cassette('application/applications_list'):
first_application = six.next(self.app.applications.all())
first_application.load_options()
self.assertRaises(ValueError, first_application.delete)
def test_applications_cannot_be_updated(self):
with use_pw_cassette('application/application_list'):
first_application = six.next(self.app.applications.all())
first_application.load_options()
self.assertRaises(ValueError, first_application.save)
def test_applications_cannot_be_created(self):
with use_pw_cassette('application/application_list'):
self.assertRaises(
ValueError, self.app.applications.create,
name='Test App',
slug='unittest',
)
|
|
from hearthbreaker.cards.minions.neutral import (
BloodfenRaptor,
IronbeakOwl,
NoviceEngineer,
StonetuskBoar,
WarGolem,
MogushanWarden,
FaerieDragon,
KoboldGeomancer,
ElvenArcher,
ArgentSquire,
SilvermoonGuardian,
TwilightDrake,
MagmaRager,
DireWolfAlpha,
WorgenInfiltrator,
Archmage,
DalaranMage,
Malygos,
AzureDrake,
OgreMagi,
Spellbreaker,
BloodmageThalnos,
LootHoarder,
LeperGnome,
IronforgeRifleman,
GnomishInventor,
GoldshireFootman,
FrostwolfGrunt,
IronfurGrizzly,
LordOfTheArena,
MurlocRaider,
ManaAddict,
OasisSnapjaw,
RecklessRocketeer,
RiverCrocolisk,
SenjinShieldmasta,
ScarletCrusader,
Shieldbearer,
SilverbackPatriarch,
JunglePanther,
RavenholdtAssassin,
StormpikeCommando,
StormwindKnight,
StranglethornTiger,
Sunwalker,
ThrallmarFarseer,
WindfuryHarpy,
YoungDragonhawk,
Wolfrider,
BootyBayBodyguard,
BoulderfistOgre,
ChillwindYeti,
CoreHound,
VoodooDoctor,
EarthenRingFarseer,
ArcaneGolem,
PriestessOfElune,
DarkscaleHealer,
ArgentCommander,
BluegillWarrior,
Wisp,
Nightblade,
ShatteredSunCleric,
TheBlackKnight,
AbusiveSergeant,
DarkIronDwarf,
Abomination,
AmaniBerserker,
SilverHandKnight,
FenCreeper,
VentureCoMercenary,
StormwindChampion,
Deathwing,
Alexstrasza,
EmperorCobra,
CrazedAlchemist,
AcidicSwampOoze,
AncientBrewmaster,
YouthfulBrewmaster,
BaronGeddon,
AngryChicken,
RagingWorgen,
TaurenWarrior,
SpitefulSmith,
BloodKnight,
FrostwolfWarlord,
RaidLeader,
DragonlingMechanic,
MurlocTidehunter,
RazorfenHunter,
KnifeJuggler,
CairneBloodhoof,
HarvestGolem,
TheBeast,
SylvanasWindrunner,
StampedingKodo,
FrostElemental,
Demolisher,
Doomsayer,
Gruul,
Hogger,
ImpMaster,
InjuredBlademaster,
MasterSwordsmith,
NatPagle,
Nozdormu,
RagnarosTheFirelord,
ColdlightOracle,
ColdlightSeer,
GrimscaleOracle,
MurlocWarleader,
AncientWatcher,
BigGameHunter,
BloodsailCorsair,
BloodsailRaider,
CaptainGreenskin,
HungryCrab,
MadBomber,
ManaWraith,
MindControlTech,
MurlocTidecaller,
Onyxia,
SouthseaCaptain,
SouthseaDeckhand,
YoungPriestess,
AcolyteOfPain,
CultMaster,
Secretkeeper,
VioletTeacher,
GadgetzanAuctioneer,
IllidanStormrage,
Lightwarden,
FlesheatingGhoul,
QuestingAdventurer,
GurubashiBerserker,
AncientMage,
DefenderOfArgus,
SunfuryProtector,
HarrisonJones,
KingMukla,
LeeroyJenkins,
SeaGiant,
MoltenGiant,
MountainGiant,
DreadCorsair,
CaptainsParrot,
TinkmasterOverspark,
AlarmoBot,
EliteTaurenChieftain,
MillhouseManastorm,
PintSizedSummoner,
OldMurkEye,
Ysera,
GelbinMekkatorque,
LorewalkerCho,
WildPyromancer,
FacelessManipulator,
NerubianEgg,
Maexxna,
HauntedCreeper,
NerubarWeblord,
UnstableGhoul,
Loatheb,
StoneskinGargoyle,
SludgeBelcher,
BaronRivendare,
DancingSwords,
Deathlord,
SpectralKnight,
Undertaker,
WailingSoul,
ZombieChow,
Feugen,
Stalagg,
MadScientist,
EchoingOoze,
ShadeOfNaxxramas,
KelThuzad,
PilotedShredder,
PilotedSkyGolem,
SneedsOldShredder,
AntiqueHealbot,
AnnoyoTron,
ArcaneNullifierX21,
Blingtron3000,
BombLobber,
BurlyRockjawTrogg,
Mechwarper,
Frog,
ClockworkGiant,
ClockworkGnome,
BoomBot,
DoctorBoom,
TargetDummy,
ExplosiveSheep,
Puddlestomper,
MicroMachine,
MechanicalYeti,
SpiderTank,
GilblinStalker,
ShipsCannon,
OgreBrute,
MogorTheOgre,
Toshley,
ForceTankMAX,
FelReaver,
MadderBomber,
Gazlowe,
MiniMage,
SaltyDog,
GnomereganInfantry,
FlyingMachine,
LostTallstrider,
HemetNesingwary,
Illuminator,
MekgineerThermaplugg,
StonesplinterTrogg,
TroggzorTheEarthinator,
Hobgoblin,
Cogmaster,
GoblinSapper,
TinkertownTechnician,
Junkbot,
Jeeves,
Recombobulator,
LilExorcist,
EnhanceoMechano,
FoeReaper4000,
KezanMystic,
MimironsHead,
GnomishExperimenter,
HungryDragon,
GrimPatron,
BlackwingTechnician,
EmperorThaurissan,
MajordomoExecutus,
VolcanicDrake,
BlackwingCorruptor,
DrakonidCrusher,
DragonEgg,
Chromaggus,
DragonkinSorcerer,
RendBlackhand,
Nefarian,
TournamentMedic,
)
from hearthbreaker.cards.minions.druid import (
KeeperOfTheGrove,
DruidOfTheClaw,
AncientOfLore,
AncientOfWar,
IronbarkProtector,
Cenarius,
AnodizedRoboCub,
MechBearCat,
DruidOfTheFang,
Malorne,
GroveTender,
DruidOfTheFlame,
VolcanicLumberer,
)
from hearthbreaker.cards.minions.hunter import (
TimberWolf,
SavannahHighmane,
Houndmaster,
KingKrush,
StarvingBuzzard,
TundraRhino,
ScavengingHyena,
Webspinner,
Hound,
Huffer,
Misha,
Leokk,
Snake,
MetaltoothLeaper,
KingOfBeasts,
Gahzrilla,
SteamwheedleSniper,
CoreRager,
Acidmaw,
)
from hearthbreaker.cards.minions.mage import (
ManaWyrm,
SorcerersApprentice,
KirinTorMage,
EtherealArcanist,
WaterElemental,
ArchmageAntonidas,
Snowchugger,
GoblinBlastmage,
SootSpewer,
WeeSpellstopper,
FlameLeviathan,
Flamewaker
)
from hearthbreaker.cards.minions.paladin import (
AldorPeacekeeper,
ArgentProtector,
GuardianOfKings,
TirionFordring,
CobaltGuardian,
SilverHandRecruit,
ShieldedMinibot,
Quartermaster,
ScarletPurifier,
BolvarFordragon,
DragonConsort,
)
from hearthbreaker.cards.minions.priest import (
AuchenaiSoulpriest,
CabalShadowPriest,
Lightspawn,
Lightwell,
NorthshireCleric,
ProphetVelen,
TempleEnforcer,
DarkCultist,
Shrinkmeister,
UpgradedRepairBot,
Shadowbomber,
Shadowboxer,
Voljin,
TwilightWhelp,
)
from hearthbreaker.cards.minions.rogue import (
AnubarAmbusher,
DefiasRingleader,
EdwinVanCleef,
Kidnapper,
MasterOfDisguise,
PatientAssassin,
SI7Agent,
OneeyedCheat,
IronSensei,
OgreNinja,
TradePrinceGallywix,
GoblinAutoBarber,
DarkIronSkulker,
Anubarak,
)
from hearthbreaker.cards.minions.shaman import (
AlAkirTheWindlord,
DustDevil,
EarthElemental,
FireElemental,
FlametongueTotem,
ManaTideTotem,
UnboundElemental,
Windspeaker,
HealingTotem,
SearingTotem,
StoneclawTotem,
WrathOfAirTotem,
SpiritWolf,
VitalityTotem,
SiltfinSpiritwalker,
WhirlingZapomatic,
DunemaulShaman,
Neptulon,
FireguardDestroyer,
)
from hearthbreaker.cards.minions.warlock import (
FlameImp,
PitLord,
Voidwalker,
DreadInfernal,
Felguard,
Doomguard,
Succubus,
SummoningPortal,
BloodImp,
LordJaraxxus,
VoidTerror,
Voidcaller,
AnimaGolem,
WorthlessImp,
FelCannon,
MalGanis,
FloatingWatcher,
MistressOfPain,
ImpGangBoss,
)
from hearthbreaker.cards.minions.warrior import (
ArathiWeaponsmith,
Armorsmith,
CruelTaskmaster,
FrothingBerserker,
GrommashHellscream,
KorkronElite,
WarsongCommander,
Warbot,
Shieldmaiden,
SiegeEngine,
IronJuggernaut,
ScrewjankClunker,
AxeFlinger,
AlexstraszasChampion,
)
|
|
# -*- coding: utf-8 -*-
import logging
import os
import shutil
import subprocess
import zipfile
from enum import Enum
from functools import wraps
from typing import Type
import requests_pkcs12
from django.db import models
import dj_database_url
import requests
from django.conf import settings
from django.core.cache import cache
from notifications.signals import notify
from django.contrib.auth.models import User
from eventkit_cloud.utils import auth_requests
logger = logging.getLogger(__name__)
def get_id(user: User):
if hasattr(user, "oauth"):
return user.oauth.identification
else:
return user.username
def get_model_by_params(model_class: models.Model, **kwargs):
return model_class.objects.get(**kwargs)
def get_cached_model(model: Type[models.Model], prop: str, value: str) -> Type[models.Model]:
return cache.get_or_set(f"{model.__name__}-{prop}-{value}", get_model_by_params(model, **{prop: value}), 360)
def get_query_cache_key(*args):
from eventkit_cloud.tasks.helpers import normalize_name
cleaned_args = []
for arg in args:
if hasattr(arg, "__name__"):
cleaned_args += [normalize_name(str(arg.__name__))]
else:
cleaned_args += [normalize_name(str(arg))]
return "-".join(cleaned_args)
def download_file(url, download_dir=None):
download_dir = download_dir or settings.EXPORT_STAGING_ROOT
file_location = os.path.join(download_dir, os.path.basename(url))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(file_location, "wb") as f:
for chunk in r:
f.write(chunk)
return file_location
else:
logger.error("Failed to download file, STATUS_CODE: {0}".format(r.status_code))
return None
def extract_zip(zipfile_path, extract_dir=None):
extract_dir = extract_dir or settings.EXPORT_STAGING_ROOT
logger.info("Extracting {0} to {1}...".format(zipfile_path, extract_dir))
zip_ref = zipfile.ZipFile(zipfile_path, "r")
zip_ref.extractall(extract_dir)
logger.info("Finished Extracting.")
zip_ref.close()
return extract_dir
def get_vector_file(directory):
for file in os.listdir(directory):
if file.endswith((".shp", ".geojson", ".gpkg")):
logger.info("Found: {0}".format(file))
return os.path.join(directory, file)
def load_land_vectors(db_conn=None, url=None):
if not url:
url = settings.LAND_DATA_URL
if db_conn:
database = dj_database_url.config(default=db_conn)
else:
database = settings.DATABASES["feature_data"]
logger.info("Downloading land data: {0}".format(url))
download_filename = download_file(url)
logger.info("Finished downloading land data: {0}".format(url))
file_dir = None
if os.path.splitext(download_filename)[1] == ".zip":
extract_zip(download_filename)
file_dir = os.path.splitext(download_filename)[0]
file_name = get_vector_file(file_dir)
else:
file_name = download_filename
cmd = (
'ogr2ogr -s_srs EPSG:3857 -t_srs EPSG:4326 -f "PostgreSQL" '
'PG:"host={host} user={user} password={password} dbname={name} port={port}" '
"{file} land_polygons".format(
host=database["HOST"],
user=database["USER"],
password=database["PASSWORD"].replace("$", "\$"),
name=database["NAME"],
port=database["PORT"],
file=file_name,
)
)
logger.info("Loading land data...")
exit_code = subprocess.call(cmd, shell=True)
if exit_code:
logger.error("There was an error importing the land data.")
if file_dir:
shutil.rmtree(file_dir)
os.remove(download_filename)
try:
os.remove(file_name)
except OSError:
pass
finally:
logger.info("Finished loading land data.")
def handle_auth(func):
"""
Decorator for requests methods that supplies username and password as HTTPBasicAuth header.
Checks first for credentials environment variable, then URL, and finally kwarg parameters.
:param func: A requests method that returns an instance of requests.models.Response
:return: result of requests function call
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
cert_path, cert_pass = auth_requests.get_cert_info(kwargs)
kwargs["cert_path"] = cert_path
kwargs["cert_pass"] = cert_pass
cred_var = kwargs.pop("cred_var", None) or kwargs.pop("slug", None)
url = kwargs.get("url")
cred = auth_requests.get_cred(cred_var=cred_var, url=url, params=kwargs.get("params", None))
if cred:
kwargs["username"] = cred[0]
kwargs["password"] = cred[1]
logger.debug(
"requests.%s('%s', %s)", func.__name__, url, ", ".join(["%s=%s" % (k, v) for k, v in kwargs.items()])
)
response = func(*args, **kwargs)
return response
except Exception:
raise
return wrapper
@handle_auth
def get_or_update_session(session=None, max_retries=3, headers=None, cookie=None, **auth_info):
username = auth_info.get("username")
password = auth_info.get("password")
cert_path = auth_info.get("cert_path")
cert_pass = auth_info.get("cert_pass")
ssl_verify = getattr(settings, "SSL_VERIFICATION", True)
if not session:
session = requests.Session()
if username and password:
logger.debug(f"setting {username} and {password} for session")
session.auth = (username, password)
adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
session.mount("http://", adapter)
session.mount("https://", adapter)
if cert_path and cert_pass:
try:
adapter = requests_pkcs12.Pkcs12Adapter(
pkcs12_filename=cert_path,
pkcs12_password=cert_pass,
max_retries=max_retries,
)
session.mount("https://", adapter)
except FileNotFoundError:
logger.error("No cert found at path {}".format(cert_path))
if cookie:
session.cookies.set(**cookie)
logger.debug("Using %s for SSL verification.", str(ssl_verify))
session.verify = ssl_verify
if headers:
session.headers.update(headers)
return session
class NotificationLevel(Enum):
SUCCESS = "success"
INFO = "info"
WARNING = "warning"
ERROR = "ERROR"
class NotificationVerb(Enum):
RUN_STARTED = "run_started"
RUN_COMPLETED = "run_completed"
RUN_FAILED = "run_failed"
RUN_DELETED = "run_deleted"
RUN_CANCELED = "run_canceled"
REMOVED_FROM_GROUP = "removed_from_group"
ADDED_TO_GROUP = "added_to_group"
SET_AS_GROUP_ADMIN = "set_as_group_admin"
REMOVED_AS_GROUP_ADMIN = "removed_as_group_admin"
RUN_EXPIRING = "run_expiring"
def sendnotification(actor, recipient, verb, action_object, target, level, description):
try:
notify.send(
actor,
recipient=recipient,
verb=verb,
action_object=action_object,
target=target,
level=level,
description=description,
)
except Exception as err:
logger.debug("notify send error ignored: %s" % err)
|
|
import uuid
import pytest
from app.models.organisation import Organisation
from app.models.service import Service
from app.models.user import User
from tests import organisation_json, service_json
from tests.conftest import ORGANISATION_ID
INV_PARENT_FOLDER_ID = '7e979e79-d970-43a5-ac69-b625a8d147b0'
INV_CHILD_1_FOLDER_ID = '92ee1ee0-e4ee-4dcc-b1a7-a5da9ebcfa2b'
VIS_PARENT_FOLDER_ID = 'bbbb222b-2b22-2b22-222b-b222b22b2222'
INV_CHILD_2_FOLDER_ID = 'fafe723f-1d39-4a10-865f-e551e03d8886'
def _get_all_folders(active_user_with_permissions):
return [
{
'name': "Invisible folder",
'id': str(uuid.uuid4()),
'parent_id': None,
'users_with_permission': []
},
{
'name': "Parent 1 - invisible",
'id': INV_PARENT_FOLDER_ID,
'parent_id': None,
'users_with_permission': []
},
{
'name': "1's Visible child",
'id': str(uuid.uuid4()),
'parent_id': INV_PARENT_FOLDER_ID,
'users_with_permission': [active_user_with_permissions['id']],
},
{
'name': "1's Invisible child",
'id': INV_CHILD_1_FOLDER_ID,
'parent_id': INV_PARENT_FOLDER_ID,
'users_with_permission': []
},
{
'name': "1's Visible grandchild",
'id': str(uuid.uuid4()),
'parent_id': INV_CHILD_1_FOLDER_ID,
'users_with_permission': [active_user_with_permissions['id']],
},
{
'name': "Parent 2 - visible",
'id': VIS_PARENT_FOLDER_ID,
'parent_id': None,
'users_with_permission': [active_user_with_permissions['id']],
},
{
'name': "2's Visible child",
'id': str(uuid.uuid4()),
'parent_id': VIS_PARENT_FOLDER_ID,
'users_with_permission': [active_user_with_permissions['id']],
},
{
'name': "2's Invisible child",
'id': INV_CHILD_2_FOLDER_ID,
'parent_id': VIS_PARENT_FOLDER_ID,
'users_with_permission': []
},
{
'name': "2's Visible grandchild",
'id': str(uuid.uuid4()),
'parent_id': INV_CHILD_2_FOLDER_ID,
'users_with_permission': [active_user_with_permissions['id']],
},
]
def test_get_user_template_folders_only_returns_folders_visible_to_user(
notify_admin,
mock_get_template_folders,
service_one,
active_user_with_permissions,
mocker
):
mock_get_template_folders.return_value = _get_all_folders(active_user_with_permissions)
service = Service(service_one)
result = service.get_user_template_folders(User(active_user_with_permissions))
assert result == [
{
'name': ["Parent 1 - invisible", "1's Visible child"],
'id': mocker.ANY,
'parent_id': None,
'users_with_permission': [active_user_with_permissions['id']],
},
{
'name': ["Parent 1 - invisible", ["1's Invisible child", "1's Visible grandchild"]],
'id': mocker.ANY,
'parent_id': None,
'users_with_permission': [active_user_with_permissions['id']],
},
{
'name': "2's Visible child",
'id': mocker.ANY,
'parent_id': VIS_PARENT_FOLDER_ID,
'users_with_permission': [active_user_with_permissions['id']],
},
{
'name': ["2's Invisible child", "2's Visible grandchild"],
'id': mocker.ANY,
'parent_id': VIS_PARENT_FOLDER_ID,
'users_with_permission': [active_user_with_permissions['id']],
},
{
'name': "Parent 2 - visible",
'id': VIS_PARENT_FOLDER_ID,
'parent_id': None,
'users_with_permission': [active_user_with_permissions['id']],
},
]
def test_get_template_folders_shows_user_folders_when_user_id_passed_in(
notify_admin,
mock_get_template_folders,
service_one,
active_user_with_permissions,
mocker
):
mock_get_template_folders.return_value = _get_all_folders(active_user_with_permissions)
service = Service(service_one)
result = service.get_template_folders(user=User(active_user_with_permissions))
assert result == [
{
'name': ["Parent 1 - invisible", "1's Visible child"],
'id': mocker.ANY,
'parent_id': None,
'users_with_permission': [active_user_with_permissions['id']]
},
{
'name': ["Parent 1 - invisible", ["1's Invisible child", "1's Visible grandchild"]],
'id': mocker.ANY,
'parent_id': None,
'users_with_permission': [active_user_with_permissions['id']]
},
{
'name': "Parent 2 - visible",
'id': VIS_PARENT_FOLDER_ID,
'parent_id': None,
'users_with_permission': [active_user_with_permissions['id']]
},
]
def test_get_template_folders_shows_all_folders_when_user_id_not_passed_in(
mock_get_template_folders,
service_one,
active_user_with_permissions,
mocker
):
mock_get_template_folders.return_value = _get_all_folders(active_user_with_permissions)
service = Service(service_one)
result = service.get_template_folders()
assert result == [
{
'name': "Invisible folder",
'id': mocker.ANY,
'parent_id': None,
'users_with_permission': []
},
{
'name': "Parent 1 - invisible",
'id': INV_PARENT_FOLDER_ID,
'parent_id': None,
'users_with_permission': []
},
{
'name': "Parent 2 - visible",
'id': VIS_PARENT_FOLDER_ID,
'parent_id': None,
'users_with_permission': [active_user_with_permissions['id']],
}
]
def test_organisation_type_when_services_organisation_has_no_org_type(mocker, service_one):
service = Service(service_one)
service._dict['organisation_id'] = ORGANISATION_ID
org = organisation_json(organisation_type=None)
mocker.patch('app.organisations_client.get_organisation', return_value=org)
assert not org['organisation_type']
assert service.organisation_type == 'central'
def test_organisation_type_when_service_and_its_org_both_have_an_org_type(mocker, service_one):
# service_one has an organisation_type of 'central'
service = Service(service_one)
service._dict['organisation'] = ORGANISATION_ID
org = organisation_json(organisation_type='local')
mocker.patch('app.organisations_client.get_organisation', return_value=org)
assert service.organisation_type == 'local'
def test_organisation_name_comes_from_cache(mocker, service_one):
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=b'"Borchester Council"',
)
mock_get_organisation = mocker.patch('app.organisations_client.get_organisation')
service = Service(service_one)
service._dict['organisation'] = ORGANISATION_ID
assert service.organisation_name == 'Borchester Council'
mock_redis_get.assert_called_once_with(f'organisation-{ORGANISATION_ID}-name')
assert mock_get_organisation.called is False
def test_organisation_name_goes_into_cache(mocker, service_one):
mocker.patch(
'app.extensions.RedisClient.get',
return_value=None,
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
mocker.patch(
'app.organisations_client.get_organisation',
return_value=organisation_json(),
)
service = Service(service_one)
service._dict['organisation'] = ORGANISATION_ID
assert service.organisation_name == 'Test Organisation'
mock_redis_set.assert_called_once_with(
f'organisation-{ORGANISATION_ID}-name',
'"Test Organisation"',
ex=604800,
)
def test_service_without_organisation_doesnt_need_org_api(mocker, service_one):
mock_redis_get = mocker.patch('app.extensions.RedisClient.get')
mock_get_organisation = mocker.patch('app.organisations_client.get_organisation')
service = Service(service_one)
service._dict['organisation'] = None
assert service.organisation_id is None
assert service.organisation_name is None
assert isinstance(service.organisation, Organisation)
assert mock_redis_get.called is False
assert mock_get_organisation.called is False
def test_bad_permission_raises(service_one):
with pytest.raises(KeyError) as e:
Service(service_one).has_permission('foo')
assert str(e.value) == "'foo is not a service permission'"
@pytest.mark.parametrize("purchase_order_number,expected_result", [
[None, None],
["PO1234", [None, None, None, "PO1234"]]
])
def test_service_billing_details(purchase_order_number, expected_result):
service = Service(service_json(purchase_order_number=purchase_order_number))
service._dict['purchase_order_number'] = purchase_order_number
assert service.billing_details == expected_result
|
|
#! /usr/bin/env python
"""
signature.py
Written by Geremy Condra
Released on 18 March 2010
Licensed under MIT License
This module provides a basic interface to OpenSSL's EVP
signature functions.
All functions in this module will raise a SignatureError
in the event of a malfunction.
The goal of cryptographic signatures is to provide some
degree of assurance that the data you are processing is
both coming from the person you think is sending it and
is what they sent.
Note that this does not encrypt data in the sense that
it does not provide secrecy for it, while evpy.cipher
and evpy.envelope provide secrecy but no other security
properties.
Usage:
>>> from evpy import signature
>>> data = b"abcdefg"
>>> public_key = "test/keys/public1.pem"
>>> private_key = "test/keys/private1.pem"
>>> s = signature.sign(data, private_key)
>>> signature.verify(data, s, public_key)
True
"""
import ctypes
import evp
class SignatureError(evp.SSLError):
pass
def sign(data, keyfile=None, key=None):
"""Signs the given data, raising SignatureError on failure.
Exactly one of keyfile, key should be given; if key is not
defined, then the key will be read from the given file.
Usage:
>>> from evpy import signature
>>> f = open("test/short.txt", "rb")
>>> data = f.read()
>>> public_key = "test/keys/public1.pem"
>>> private_key = "test/keys/private1.pem"
>>> s = signature.sign(data, private_key)
>>> signature.verify(data, s, public_key)
True
"""
# add the digests
evp.OpenSSL_add_all_digests()
# build the context
ctx = evp.EVP_MD_CTX_create()
if not ctx:
raise SignatureError("Could not create context")
# get the signing key
if key and not keyfile:
skey = _build_skey_from_string(key)
elif keyfile and not key:
skey = _build_skey_from_file(keyfile)
else:
raise SignatureError("Exactly one of key, keyfile must be specified")
# build the hash object
evp_hash = _build_hash()
if not evp.EVP_DigestInit(ctx, evp_hash):
_cleanup(skey, ctx)
raise SignatureError("Could not initialize signature")
# update
if not evp.EVP_DigestUpdate(ctx, data, len(data)):
_cleanup(skey, ctx)
raise SignatureError("Could not update signature")
# finalize
output_buflen = ctypes.c_int(evp.EVP_PKEY_size(skey))
output = ctypes.create_string_buffer(output_buflen.value)
if not evp.EVP_SignFinal(ctx, output, ctypes.byref(output_buflen), skey):
_cleanup(skey, ctx)
raise SignatureError("Could not finalize signature")
# cleanup
_cleanup(skey, ctx)
# and go home
return ctypes.string_at(output, output_buflen)
def verify(data, sig, keyfile=None, key=None):
"""Verifies the given signature, returning a boolean.
Exactly one of keyfile, key should be specified.
This function raises SignatureError on error.
Usage:
>>> from evpy import signature
>>> f = open("test/short.txt", "rb")
>>> data = f.read()
>>> public_key = "test/keys/public1.pem"
>>> private_key = "test/keys/private1.pem"
>>> s = signature.sign(data, private_key)
>>> signature.verify(data, s, public_key)
True
"""
# add the digests
evp.OpenSSL_add_all_digests()
# build the context
ctx = evp.EVP_MD_CTX_create()
if not ctx:
raise SignatureError("Could not create context")
# get the vkey
if key and not keyfile:
vkey = _build_vkey_from_string(key)
elif keyfile and not key:
vkey = _build_vkey_from_file(keyfile)
else:
raise SignatureError("Exactly one of key, keyfile must be specified")
# build the hash object
evp_hash = _build_hash()
if not evp.EVP_DigestInit(ctx, evp_hash):
_cleanup(vkey, ctx)
raise SignatureError("Could not initialize verifier")
# update
if not evp.EVP_DigestUpdate(ctx, data, len(data)):
_cleanup(vkey, ctx)
raise SignatureError("Could not update verifier")
# finalize
retcode = evp.EVP_VerifyFinal(ctx, sig, len(sig), vkey)
# cleanup
_cleanup(vkey, ctx)
# and go home
if retcode == 1:
return True
elif retcode == 0:
return False
else:
raise SignatureError("Error verifying signature")
def _cleanup(key, ctx):
evp.EVP_PKEY_free(key)
evp.EVP_MD_CTX_cleanup(ctx)
evp.EVP_MD_CTX_destroy(ctx)
def _string_to_bio(s):
return evp.BIO_new_mem_buf(s, len(s))
def _build_skey_from_file(keyfile):
fp = evp.fopen(keyfile, "r")
if not fp:
raise SignatureError("Could not open keyfile")
# get the signing key
skey = evp.PEM_read_PrivateKey(fp, None, None, None)
if not skey:
evp.fclose(fp)
raise SignatureError("Could not read signing key")
# close the file
evp.fclose(fp)
return skey
def _build_skey_from_string(key):
buf = ctypes.create_string_buffer(key)
bio = evp.BIO_new_mem_buf(buf, len(buf.value))
skey = evp.PEM_read_bio_PrivateKey(bio, None, None, None, None)
if not skey:
raise SignatureError("Could not construct signing key from the given string")
evp.BIO_free(bio)
return skey
def _build_vkey_from_file(keyfile):
fp = evp.fopen(keyfile, "r")
if not fp:
raise SignatureError("Could not open keyfile")
# get the verification key
vkey = evp.PEM_read_PUBKEY(fp, None, None, None)
if not vkey:
evp.fclose(fp)
raise SignatureError("Could not read verification key")
# close the file
evp.fclose(fp)
return vkey
def _build_vkey_from_string(key):
buf = ctypes.create_string_buffer(key)
bio = evp.BIO_new_mem_buf(buf, len(buf.value))
vkey = evp.PEM_read_bio_PUBKEY(bio, None, None, None)
if not vkey:
raise SignatureError("Could not construct verification key from the given string")
return vkey
def _build_hash():
evp_hash = evp.EVP_get_digestbyname("sha512")
if not evp_hash:
raise SignatureError("Could not create hash object")
return evp_hash
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image_dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.python.compat import v2_compat
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.preprocessing import image as image_preproc
from tensorflow.python.keras.preprocessing import image_dataset
from tensorflow.python.platform import test
try:
import PIL # pylint:disable=g-import-not-at-top
except ImportError:
PIL = None
class ImageDatasetFromDirectoryTest(keras_parameterized.TestCase):
def _get_images(self, count=16, color_mode='rgb'):
width = height = 24
imgs = []
for _ in range(count):
if color_mode == 'grayscale':
img = np.random.randint(0, 256, size=(height, width, 1))
elif color_mode == 'rgba':
img = np.random.randint(0, 256, size=(height, width, 4))
else:
img = np.random.randint(0, 256, size=(height, width, 3))
img = image_preproc.array_to_img(img)
imgs.append(img)
return imgs
def _prepare_directory(self,
num_classes=2,
grayscale=False,
nested_dirs=False,
color_mode='rgb',
count=16):
# Get a unique temp directory
temp_dir = os.path.join(self.get_temp_dir(), str(np.random.randint(1e6)))
os.mkdir(temp_dir)
self.addCleanup(shutil.rmtree, temp_dir)
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = 'class_%s' % (class_index,)
if nested_dirs:
class_paths = [
class_directory, os.path.join(class_directory, 'subfolder_1'),
os.path.join(class_directory, 'subfolder_2'), os.path.join(
class_directory, 'subfolder_1', 'sub-subfolder')
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save images to the paths
i = 0
for img in self._get_images(color_mode=color_mode, count=count):
path = paths[count % len(paths)]
if color_mode == 'rgb':
ext = 'jpg'
else:
ext = 'png'
filename = os.path.join(path, 'image_%s.%s' % (i, ext))
img.save(os.path.join(temp_dir, filename))
i += 1
return temp_dir
def test_image_dataset_from_directory_binary(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='int')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, 'int32')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='binary')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, 'float32')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='categorical')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, 'float32')
def test_sample_count(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_image_dataset_from_directory_multiclass(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8, 18, 18, 3))
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='int')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, 'int32')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='categorical')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, 'float32')
def test_image_dataset_from_directory_color_modes(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, color_mode='rgba')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode='rgba')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 4))
self.assertEqual(batch[0].dtype.name, 'float32')
directory = self._prepare_directory(num_classes=4, color_mode='grayscale')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode='grayscale')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 1))
self.assertEqual(batch[0].dtype.name, 'float32')
def test_image_dataset_from_directory_validation_split(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=10)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=10, image_size=(18, 18),
validation_split=0.2, subset='training')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=10, image_size=(18, 18),
validation_split=0.2, subset='validation')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 18, 18, 3))
def test_image_dataset_from_directory_manual_labels(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=2)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18),
labels=[0, 1], shuffle=False)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_image_dataset_from_directory_follow_links(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=25,
nested_dirs=True)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None,
follow_links=True)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_image_dataset_from_directory_errors(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, '`labels` argument should be'):
_ = image_dataset.image_dataset_from_directory(
directory, labels=None)
with self.assertRaisesRegex(ValueError, '`label_mode` argument must be'):
_ = image_dataset.image_dataset_from_directory(
directory, label_mode='other')
with self.assertRaisesRegex(ValueError, '`color_mode` must be one of'):
_ = image_dataset.image_dataset_from_directory(
directory, color_mode='other')
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if the labels are inferred'):
_ = image_dataset.image_dataset_from_directory(
directory, labels=[0, 0, 1, 1, 1],
class_names=['class_0', 'class_1', 'class_2'])
with self.assertRaisesRegex(
ValueError,
'Expected the lengths of `labels` to match the number of files'):
_ = image_dataset.image_dataset_from_directory(
directory, labels=[0, 0, 1, 1])
with self.assertRaisesRegex(
ValueError, '`class_names` passed did not match'):
_ = image_dataset.image_dataset_from_directory(
directory, class_names=['class_0', 'class_2'])
with self.assertRaisesRegex(ValueError, 'there must exactly 2 classes'):
_ = image_dataset.image_dataset_from_directory(
directory, label_mode='binary')
with self.assertRaisesRegex(ValueError,
'`validation_split` must be between 0 and 1'):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=2)
with self.assertRaisesRegex(ValueError,
'`subset` must be either "training" or'):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=0.2, subset='other')
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import flask
from werkzeug import datastructures
from sahara import context
from sahara import exceptions as ex
from sahara.openstack.common import log as logging
from sahara.openstack.common import wsgi
LOG = logging.getLogger(__name__)
class Rest(flask.Blueprint):
def get(self, rule, status_code=200):
return self._mroute('GET', rule, status_code)
def post(self, rule, status_code=202):
return self._mroute('POST', rule, status_code)
def post_file(self, rule, status_code=202):
return self._mroute('POST', rule, status_code, file_upload=True)
def put(self, rule, status_code=202):
return self._mroute('PUT', rule, status_code)
def put_file(self, rule, status_code=202):
return self._mroute('PUT', rule, status_code, file_upload=True)
def delete(self, rule, status_code=204):
return self._mroute('DELETE', rule, status_code)
def _mroute(self, methods, rule, status_code=None, **kw):
if type(methods) is str:
methods = [methods]
return self.route(rule, methods=methods, status_code=status_code, **kw)
def route(self, rule, **options):
status = options.pop('status_code', None)
file_upload = options.pop('file_upload', False)
def decorator(func):
endpoint = options.pop('endpoint', func.__name__)
def handler(**kwargs):
context.set_ctx(None)
LOG.debug("Rest.route.decorator.handler, kwargs=%s", kwargs)
_init_resp_type(file_upload)
# update status code
if status:
flask.request.status_code = status
kwargs.pop("tenant_id")
ctx = context.Context(
flask.request.headers['X-User-Id'],
flask.request.headers['X-Tenant-Id'],
flask.request.headers['X-Auth-Token'],
flask.request.headers['X-Service-Catalog'],
flask.request.headers['X-User-Name'],
flask.request.headers['X-Tenant-Name'],
flask.request.headers['X-Roles'].split(','))
context.set_ctx(ctx)
if flask.request.method in ['POST', 'PUT']:
kwargs['data'] = request_data()
try:
return func(**kwargs)
except ex.SaharaException as e:
return bad_request(e)
except Exception as e:
return internal_error(500, 'Internal Server Error', e)
f_rule = "/<tenant_id>" + rule
self.add_url_rule(f_rule, endpoint, handler, **options)
self.add_url_rule(f_rule + '.json', endpoint, handler, **options)
self.add_url_rule(f_rule + '.xml', endpoint, handler, **options)
return func
return decorator
RT_JSON = datastructures.MIMEAccept([("application/json", 1)])
RT_XML = datastructures.MIMEAccept([("application/xml", 1)])
def _init_resp_type(file_upload):
"""Extracts response content type."""
# get content type from Accept header
resp_type = flask.request.accept_mimetypes
# url /foo.xml
if flask.request.path.endswith('.xml'):
resp_type = RT_XML
# url /foo.json
if flask.request.path.endswith('.json'):
resp_type = RT_JSON
flask.request.resp_type = resp_type
# set file upload flag
flask.request.file_upload = file_upload
def render(res=None, resp_type=None, status=None, **kwargs):
if not res:
res = {}
if type(res) is dict:
res.update(kwargs)
elif kwargs:
# can't merge kwargs into the non-dict res
abort_and_log(500, "Non-dict and non-empty kwargs passed to render")
status_code = getattr(flask.request, 'status_code', None)
if status:
status_code = status
if not status_code:
status_code = 200
if not resp_type:
resp_type = getattr(flask.request, 'resp_type', RT_JSON)
if not resp_type:
resp_type = RT_JSON
serializer = None
if "application/json" in resp_type:
resp_type = RT_JSON
serializer = wsgi.JSONDictSerializer()
elif "application/xml" in resp_type:
resp_type = RT_XML
serializer = wsgi.XMLDictSerializer()
else:
abort_and_log(400, "Content type '%s' isn't supported" % resp_type)
body = serializer.serialize(res)
resp_type = str(resp_type)
return flask.Response(response=body, status=status_code,
mimetype=resp_type)
def request_data():
if hasattr(flask.request, 'parsed_data'):
return flask.request.parsed_data
if not flask.request.content_length > 0:
LOG.debug("Empty body provided in request")
return dict()
if flask.request.file_upload:
return flask.request.data
deserializer = None
content_type = flask.request.mimetype
if not content_type or content_type in RT_JSON:
deserializer = wsgi.JSONDeserializer()
elif content_type in RT_XML:
abort_and_log(400, "XML requests are not supported yet")
# deserializer = XMLDeserializer()
else:
abort_and_log(400, "Content type '%s' isn't supported" % content_type)
# parsed request data to avoid unwanted re-parsings
parsed_data = deserializer.deserialize(flask.request.data)['body']
flask.request.parsed_data = parsed_data
return flask.request.parsed_data
def get_request_args():
return flask.request.args
def abort_and_log(status_code, descr, exc=None):
LOG.error("Request aborted with status code %s and message '%s'",
status_code, descr)
if exc is not None:
LOG.error(traceback.format_exc())
flask.abort(status_code, description=descr)
def render_error_message(error_code, error_message, error_name):
message = {
"error_code": error_code,
"error_message": error_message,
"error_name": error_name
}
resp = render(message)
resp.status_code = error_code
return resp
def internal_error(status_code, descr, exc=None):
LOG.error("Request aborted with status code %s and message '%s'",
status_code, descr)
if exc is not None:
LOG.error(traceback.format_exc())
error_code = "INTERNAL_SERVER_ERROR"
if status_code == 501:
error_code = "NOT_IMPLEMENTED_ERROR"
return render_error_message(status_code, descr, error_code)
def bad_request(error):
error_code = 400
LOG.debug("Validation Error occurred: "
"error_code=%s, error_message=%s, error_name=%s",
error_code, error.message, error.code)
return render_error_message(error_code, error.message, error.code)
def not_found(error):
error_code = 404
LOG.debug("Not Found exception occurred: "
"error_code=%s, error_message=%s, error_name=%s",
error_code, error.message, error.code)
return render_error_message(error_code, error.message, error.code)
|
|
## Central Flask application script. Includes all routes for the REST API,
## error handlers, and business logic. Authenticates route calls via simple HTTP
## username:password auth. Imports SQLAlchemy ORM models in order to interact with
## PostgreSQL datastore.
##
## TODO: Decompose business logic and import via functions
#!flask/bin/python
import os, types, json
from datetime import datetime
from random import randint
from modules.make_error_response import make_error_response
from flask import Flask
from flask import jsonify, request, abort, url_for
from flask.ext.httpauth import HTTPBasicAuth
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.cors import CORS
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS']) ## load environment settings
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['CORS_HEADERS'] = 'Content-Type'
auth = HTTPBasicAuth()
cors = CORS(app, resources = r'/api/*', supports_credentials = True)
db = SQLAlchemy(app)
### Basic HTTP AUTH ###
@auth.get_password
def get_password(username):
if username == app.config['MASTER_USERNAME']: return app.config['MASTER_PASSWORD']
return None
### HTTP GET ROUTES ###
@app.route('/api/moonlets', methods=['GET'])
@auth.login_required
def get_moonlets():
from models import Moonlet
try:
results = Moonlet.query.all() # query database via sqlalchemy
results = [ item.serialize() for item in results ] # use class method to serialize each item
return jsonify({ 'moonlets': results }), 201 # return as json
except Exception as error:
print error
abort(500)
@app.route('/api/moonlets/<int:moonlet_id>', methods=['GET'])
@auth.login_required
def get_moonlet(moonlet_id):
from models import Moonlet
try:
result = Moonlet.query.filter_by(id = moonlet_id).first() # query for moonlet
if result is None: return make_error_response('User or Moonlet Not Found', 404)
result = result.serialize()
return jsonify({ 'moonlet': result }), 201
except Exception as error:
print error
abort(500)
@app.route('/api/moonlets/sale', methods=['GET'])
@auth.login_required
def get_sales():
from models import Moonlet
try:
results = Moonlet.query.filter(Moonlet.on_sale == True).all()
if results is None: return make_error_response('User or Moonlet Not Found', 404) # returns None if unfound
results = [ item.serialize() for item in results ] # use class method to serialize each item
return jsonify({ 'moonlets': results }), 201 # return as json
except Exception as error:
print error
abort(500)
@app.route('/api/moonlets/limited', methods=['GET'])
@auth.login_required
def get_limited():
from models import Moonlet
try:
results = Moonlet.query.filter(Moonlet.limited == True).all()
if results is None: return make_error_response('User or Moonlet Not Found', 404) # returns None if unfound
results = [ item.serialize() for item in results ] # use class method to serialize each item
return jsonify({ 'moonlets': results }), 201 # return as json
except Exception as error:
print error
abort(500)
@app.route('/api/users', methods=['GET'])
@auth.login_required
def get_users():
from models import User
try:
results = User.query.all() # query database via sqlalchemy
results = [ item.serialize() for item in results ] # use class method to serialize each item
return jsonify({ 'users': results }), 201 # return as json
except Exception as error:
print error
abort(500)
@app.route('/api/users/<string:username>', methods=['GET'])
@auth.login_required
def get_user(username):
from models import User
try:
result = User.query.filter_by(username = username).first()
if result is None: return make_error_response('User or Moonlet Not Found', 404) # returns None if unfound
result = result.serialize() # use class method to serialize each item
return jsonify({ 'user': result }), 201 # return as json
except Exception as error:
print error
abort(500)
### HTTP PUT ROUTES ###
@app.route('/api/moonlets/<int:moonlet_id>', methods=['PUT'])
@auth.login_required
def update_moonlet(moonlet_id):
from models import Moonlet
if not request.json or not 'update' in request.json:
abort(400)
update = request.json['update']
try:
moonlet = Moonlet.query.filter_by(id = moonlet_id).first()
if moonlet is None: return make_error_response('User or Moonlet Not Found', 404)
#check the update object for the field that need updated, then type check and update
if 'img_src' in update:
new_src = update['img_src']
if not isinstance(new_src, types.UnicodeType):
return make_error_response('Update.img_source must be a string!', 400)
moonlet.img_src = new_src
if 'color' in update:
new_color = update['color']
if not isinstance(new_color, types.UnicodeType):
return make_error_response('Update.color must be a string!', 400)
moonlet.color = new_color
if 'price' in update:
new_price = update['price']
if not isinstance(new_price, types.IntType):
return make_error_response('Update.price must be an int!', 400)
moonlet.price = new_price
if 'discount' in update:
new_discount = update['discount']
if not isinstance(new_discount, types.IntType):
return make_error_response('Update.discount must be an int!', 400)
moonlet.discount = new_discount
if 'inventory' in update:
new_inv = update['inventory']
if not isinstance(new_inv, types.IntType):
return make_error_response('Update.inventory must be an int!', 400)
moonlet.inventory = new_inv
if 'description' in update:
new_desc = update['description']
if not isinstance(new_desc, types.UnicodeType):
return make_error_response('Update.description must be a string!', 400)
moonlet.description = new_desc
if 'classification' in update:
new_class = update['classification']
if not isinstance(new_class, types.UnicodeType):
return make_error_response('Update.classification must be a string!', 400)
moonlet.classification = new_class
if 'limited' in update:
new_limited = update['limited']
if not isinstance(new_limited, types.BooleanType):
return make_error_response('Update.limited must be a bool!', 400)
moonlet.limited = new_limited
if 'sale' in update:
new_sale = update['sale']
if not isinstance(new_sale, types.BooleanType):
return make_error_response('Update.featured must be a bool!', 400)
moonlet.on_sale = new_sale
db.session.commit()
moonlet.close()
return(jsonify({ 'message': 'Moonlet updated!'})), 201
except Exception as error:
print error
abort(500)
# currently only updates a user's email
@app.route('/api/users/<string:username>', methods=['PUT'])
@auth.login_required
def update_user(username):
if not request.json or not 'email' in request.json:
abort(400)
from models import User
newEmail = str(request.json['email'])
simpleEmailAuth = newEmail.split('@')
if len(simpleEmailAuth) != 2: return make_error_response('Invalid email address!', 404)
try:
user = User.query.filter_by(username = username).first()
if user is None: return make_error_response('User or Moonlet Not Found', 404)
user.email = newEmail
db.session.commit()
user.close()
return(jsonify({ 'message': 'User email updated'})), 201
except Exception as error:
print error
abort(500)
# update a user's cart
@app.route('/api/users/cart/<string:username>', methods=['PUT'])
@auth.login_required
def update_user_cart(username):
from models import User
if not request.json or not 'cart' in request.json:
abort (400)
cart = request.json['cart']
try:
user = User.query.filter_by(username = username).first()
if user is None: return make_error_response('User or Moonlet Not Found', 404)
user.cart = { 'current': cart }
db.session.commit()
user.close()
return jsonify({ 'message': 'Cart updated!' }), 201
except Exception as error:
print error
abort(500)
# updates a user's refunds
@app.route('/api/users/refund/<string:username>', methods=['PUT'])
@auth.login_required
def update_user_refund(username):
if not request.json or not 'transaction' in request.json:
abort(400)
from models import User, Moonlet
# set up a new transction refund
now = str(datetime.utcnow())
transactionID = int(request.json['transaction'])
newTransaction = {
'timestamp': now,
'transaction': 'refund',
'moonlets': [],
'price': 0,
'id': transactionID
}
try:
user = User.query.filter_by(username = username).first()
if user is None: return make_error_response('User or Moonlet Not Found', 404) # returns None if unfound
## pull out fields to be modified
temp = user.serialize()
currentTransactions = temp['transactions']['history']
currentBalance = temp['balance']
currentMoonlets = temp['moonlets']['inventory']
## new variables to represent clean state
newHistory = [] # new history array to be updated to user store after refund item removed for history
newMoonlets = [] # new moonelts array to be updated to remove a moonlet if refund = 0
updatedMoonlets = [] # holds moonlets that have been altered via this refund
newBalance = 0
refundAmount = 0
refundTransaction = None
# find the transaction to be refunded
for x in currentTransactions:
currentID = int(x['id'])
if currentID == transactionID and x['transaction'] != 'refund':
refundTransaction = x
else:
newHistory.append(x) # add other transactions to the new history array
# if empty, transaction not found, if more than 1 transaction, transaction already refunded
if refundTransaction is None: return make_error_response('Transaction not found!', 404)
# calculate cost of transaction and finish construction of transaction object
for y in refundTransaction['moonlets']:
item = str(y['item'])
identity = int(item)
price = int(y['price'])
amount = int(y['amount'])
newTransaction['moonlets'].append(y)
refundAmount += price * amount # amount of item * price of item
## remove current moonlet from user's moonlet inventory
for i in currentMoonlets:
if item in i:
## capture new inventory amount if refund does not take to 0
if (i[item] - amount) > 0:
a = i[item] - amount
updatedMoonlets.append({ item: a })
## remove from curent regardless
currentMoonlets.remove(i)
## Reflect new refund in moonlet inventory
moonlet = Moonlet.query.filter_by(id = identity).first()
tempInventory = moonlet.inventory
tempInventory = tempInventory + amount
moonlet.inventory = tempInventory
## Finish transaction object and assign clean states
newMoonlets = currentMoonlets + updatedMoonlets
newTransaction['price'] = refundAmount
newBalance = currentBalance + refundAmount # update user's balance entry after refund
newHistory.append(newTransaction) # update user's transaction entries
# Update user's database entry with new values
user.moonlets = { 'inventory': newMoonlets }
user.transactions = { 'history': newHistory }
user.balance = newBalance
db.session.commit()
# return the transction created by the refund to be used by the view
return jsonify({ 'message': 'Refund Made!', 'transaction': newTransaction }), 201
except Exception as error:
print error
abort(500)
# Updates a user's purchases - compares request transaction to user's stored cart
# TODO: Add a check against PUT request and database information
@app.route('/api/users/purchase/<string:username>', methods=['PUT'])
@auth.login_required
def update_user_purchase(username):
from models import User, Moonlet
now = str(datetime.utcnow())
transactionID = randint(100000, 999999) + randint(999999, 99999999)
newTransaction = {
'timestamp': now,
'transaction': 'purchase',
'id': transactionID,
'moonlets': [],
'price': 0
}
try:
user = User.query.filter_by(username = username).first()
if user is None: return make_error_response('User or Moonlet Not Found', 404) # returns None if unfound
# pull out information to be modified
temp = user.serialize()
currentCart = temp['cart']['current']
currentTransactions = temp['transactions']['history']
currentMoonlets = temp['moonlets']['inventory']
currentBalance = temp['balance']
## new variables to represent clean state
newTransactions = []
newMoonlets = []
updatedMoonlets = [] # holds moonlets that have been altered via this purchase
newBalance = 0
transactionCost = 0 # cost of this transaction
# calculate cost of transaction and finish construction of transaction object
for c in currentCart:
item = str(c['item'])
identity = int(item)
price = int(c['price'])
amount = int(c['amount'])
newTransaction['moonlets'].append(c)
transactionCost += price * amount # amount of item * price of item
found = False
## add current moonlet to user's moonlet inventory
for x in currentMoonlets:
if item in x:
a = x[item] + amount
updatedMoonlets.append({ item: a })
currentMoonlets.remove(x)
found = True
break
if found == False: updatedMoonlets.append({ item: amount })
## Reflect new purchase in each moonlets inventory
moonlet = Moonlet.query.filter_by(id = identity).first()
tempInventory = moonlet.inventory
tempInventory = tempInventory - amount
moonlet.inventory = tempInventory
## Finish new transaction and assign clean states
## transfer past transactions and new moonlets
for y in currentTransactions: newTransactions.append(y)
newMoonlets = currentMoonlets + updatedMoonlets
newTransaction['price'] = transactionCost
newBalance = currentBalance - transactionCost # update user's balance entry after transaction
newTransactions.append(newTransaction) # update user's transaction entries
## Update user's database entry with new values
user.moonlets = { 'inventory': newMoonlets }
user.transactions = { 'history': newTransactions }
user.balance = newBalance
user.cart = { 'current': [] }
db.session.commit()
return jsonify({ 'message': 'Purchase Made!', 'transaction': newTransaction }), 201
except Exception as error:
print error
abort(500)
### HTTP POST ROUTES ###
@app.route('/api/moonlets', methods=['POST'])
@auth.login_required
def create_moonlet():
if not request.json or not 'name' in request.json:
abort(400)
from models import Moonlet
newName = request.json['name']
try:
moonlet = Moonlet.query.filter_by(display_name = newName).first()
if moonlet is not None: return make_error_response('Moonlet already exists!', 400)
newMoonlet = Moonlet( # create a new table item out of the posted json or defaults
name = newName,
idNum = randint(10000000, 99999999),
desc = request.json.get('description', 'A newly discovered moonlet!'),
classif = request.json.get('classification', 'AA-Zeus'),
color = request.json.get('color', 'Grey'),
inv = request.json.get('inventory', 100),
price = request.json.get('price', 1000),
disc = request.json.get('discount', 10),
sale = request.json.get('on_sale', False),
ltd = request.json.get('limited', False),
src = request.json.get('img_src', '/assets/moonlets/generic.png')
)
db.session.add(newMoonlet)
db.session.commit()
moonlet.close()
return jsonify({ 'message': 'New moonlet saved to database!' }), 201
except Exception as error:
print error
abort(500)
@app.route('/api/users', methods=['POST'])
@auth.login_required
def create_user():
if not request.json or not 'username' in request.json:
abort(400)
from models import User
username = request.json['username']
try:
user = User.query.filter_by(username = username).first()
if user is not None: return make_error_response('User already exists', 400)
user = User(
usr = username,
email = request.json.get('email', ''),
platform = request.json.get('platform', ''),
name = request.json.get('name', 'J. Doe'),
balance = request.json.get('balance', 10000),
moonlets = { 'inventory': [] },
transactions = { 'history': [] },
cart = { 'current': [] }
)
## serialize new user to return with confirmation
userJSON = user.serialize()
db.session.add(user)
db.session.commit()
user.close()
return jsonify({ 'messsage': 'New user saved to database!', 'user': userJSON }), 201
except Exception as error:
print error
abort(500)
#### HTTP DELETE ROUTES ###
@app.route('/api/moonlets/<int:moonlet_id>', methods=['DELETE'])
@auth.login_required
def delete_moonlet(moonlet_id):
from models import Moonlet
try:
moonlet = Moonlet.query.filter_by(id = moonlet_id).first()
if moonlet is None: return make_error_response('User or Moonlet Not Found', 404)
moonlet.close() # internal session closure to remove conflict
db.session.delete(moonlet)
db.session.commit()
return jsonify({ 'messsage': 'Moonlet successfully deleted!'}), 201
except Exception as error:
print error
abort(500)
@app.route('/api/users/<string:username>', methods=['DELETE'])
@auth.login_required
def delete_user(username):
from models import User
try:
user = User.query.filter_by(username = username).first()
if user is None: return make_error_response('User or Moonlet Not Found', 404)
user.close() # internal session closure to remove conflict
db.session.delete(user)
db.session.commit()
return jsonify({ 'messsage': 'User successfully deleted!'}), 201
except Exception as error:
print error
abort(500)
### ERROR HANDLERS ###
@app.errorhandler(405)
def not_allowed(error):
return make_error_response('Request Not Allowed', 405)
@app.errorhandler(404)
def not_found(error):
return make_error_response('Not Found', 404)
@app.errorhandler(400)
def bad_request(error):
return make_error_response('Bad Request', 400)
@app.errorhandler(500)
def internal_error(error):
return make_error_response('Internal Error', 500)
@auth.error_handler
def unauthorized():
return make_error_response('Unauthorized Access', 403)
if __name__ == '__main__':
app.run(threaded = True)
|
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import linalg, sparse
from . import check_random_state, deprecated
from .fixes import np_version
from .fixes import logsumexp as scipy_logsumexp
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
@deprecated("sklearn.utils.extmath.norm was deprecated in version 0.19 "
"and will be removed in 0.21. Use scipy.linalg.norm instead.")
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
return linalg.norm(x)
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
Parameters
----------
x : array_like
Returns
-------
float
The Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
"""
x = np.ravel(x, order='K')
if np.issubdtype(x.dtype, np.integer):
warnings.warn('Array type is integer, np.dot may overflow. '
'Data should be float type to avoid this issue',
UserWarning)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
Parameters
----------
X : array_like
The input array
squared : bool, optional (default = False)
If True, return squared norms.
Returns
-------
array_like
The row-wise (squared) Euclidean norm of X.
"""
if sparse.issparse(X):
if not isinstance(X, sparse.csr_matrix):
X = sparse.csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
Parameters
----------
A : array_like
The matrix
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
@deprecated("sklearn.utils.extmath.fast_dot was deprecated in version 0.19 "
"and will be removed in 0.21. Use the equivalent np.dot instead.")
def fast_dot(a, b, out=None):
return np.dot(a, b, out)
def density(w, **kwargs):
"""Compute density of a sparse vector
Parameters
----------
w : array_like
The sparse vector
Returns
-------
float
The density of w, between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
Parameters
----------
a : array or sparse matrix
b : array or sparse matrix
dense_output : boolean, default False
When False, either ``a`` or ``b`` being sparse will yield sparse
output. When True, output will always be an array.
Returns
-------
dot_product : array or sparse matrix
sparse if ``a`` or ``b`` is sparse and ``dense_output=False``.
"""
if sparse.issparse(a) or sparse.issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return np.dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix
size : integer
Size of the return array
n_iter : integer
Number of power iterations used to stabilize the result
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Returns
-------
Q : 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
if A.dtype.kind == 'f':
# Ensure f32 is preserved as f32
Q = Q.astype(A.dtype, copy=False)
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter='auto',
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
if isinstance(M, (sparse.lil_matrix, sparse.dok_matrix)):
warnings.warn("Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(
type(M).__name__),
sparse.SparseEfficiencyWarning)
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitly specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
@deprecated("sklearn.utils.extmath.logsumexp was deprecated in version 0.19 "
"and will be removed in 0.21. Use scipy.misc.logsumexp instead.")
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.458...
>>> logsumexp(a) # doctest: +SKIP
9.458...
"""
return scipy_logsumexp(arr, axis)
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([4.]), array([3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([2.]), array([3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
if a.shape != w.shape:
w = np.full(a.shape, w, dtype=w.dtype)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
@deprecated("sklearn.utils.extmath.pinvh was deprecated in version 0.19 "
"and will be removed in 0.21. Use scipy.linalg.pinvh instead.")
def pinvh(a, cond=None, rcond=None, lower=True):
return linalg.pinvh(a, cond, rcond, lower)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``scipy.special.expit``.
Parameters
----------
X : array-like, shape (M, N) or (M, )
Argument to the logistic function
out : array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out : array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like of floats, shape (M, N)
Argument to the logistic function
copy : bool, optional
Copy X or not.
Returns
-------
out : array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
Parameters
----------
X : array_like
The input array or sparse matrix
Returns
-------
Float
The min value of X
"""
if sparse.issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`.
Parameters
----------
X : array_like
The matrix to make non-negative
min_value : float
The threshold value
Returns
-------
array_like
The thresholded array
Raises
------
ValueError
When X is sparse
"""
min_ = safe_min(X)
if min_ < min_value:
if sparse.issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean, last_variance, last_sample_count):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : array-like, shape (n_features,)
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : array, shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = np.nansum(X, axis=0)
new_sample_count = np.sum(~np.isnan(X), axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = np.nanvar(X, axis=0) * new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
with np.errstate(divide='ignore', invalid='ignore'):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance + new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
# sum is as unstable as cumsum for numpy < 1.9
if np_version < (1, 9):
return np.cumsum(arr, axis=axis, dtype=np.float64)
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
|
|
"""weighted_graph.py: This file is part of the feyncop/feyngen package.
Implements the WeightedGraph class. """
# See also: http://people.physik.hu-berlin.de/~borinsky/
__author__ = "Michael Borinsky"
__email__ = "borinsky@physik.hu-berlin.de"
__copyright__ = "Copyright (C) 2014 Michael Borinsky"
__license__ = "MIT License"
__version__ = "1.0"
# Copyright (c) 2014 Michael Borinsky
# This program is distributed under the MIT License:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from math import *
import copy, itertools
from stuff import *
from graph import Graph
class WeightedGraph(Graph):
"""This class extends the basic utilities in the Graph class by the tools
to handle QED and Yang-Mills graphs."""
def __init__( self, edges, edge_weights, symmetry_factor=0 ):
"""Initializes the WeightedGraph class. Edges, edge_weights and
symmetry_factor can be provided."""
if len(edges) != len(edge_weights):
raise
super(WeightedGraph, self).__init__( edges, symmetry_factor )
self.edge_weights = edge_weights
def get_edge_str( self, e ):
"""Return a readable string of the edges of the graph."""
v1,v2 = self.edges[e]
w = self.edge_weights[e]
wDict = [ '0', 'f', 'A', 'c' ]
return "[%d,%d,%c]" % (v1,v2,wDict[w])
def get_edges_tuple( self ):
"""Get a unique tuple to identify the graph. (Unique only for every labeling)."""
return tuple( sorted( ( tuple( sorted(edge) if w==2 else edge ), w) for edge,w in zip(self.edges,self.edge_weights) ) )
def graph_from_sub_edges( self, sub_edges ):
"""Create a new graph from a sub set of its edges."""
sub_graph = super(WeightedGraph, self).graph_from_sub_edges( sub_edges )
sub_graph.edge_weights = tuple( self.edge_weights[e] for e in sorted(sub_edges) )
return sub_graph
def sub_edges_by_weight( self, weight ):
"""Returns all subedges with a certain weight."""
return frozenset( e for e,w in enumerate(self.edge_weights) if w == weight )
@property
def residue_type( self ):
"""Returns the residue type of the graph."""
def dir_e(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return -1
else: return 1
ext_types = [ dir_e(e,v) * self.edge_weights[e] for v in self.external_vtcs_set for e in self.adj_edges( v, self.edges_set ) ]
return tuple(sorted(ext_types))
def get_vtx_type( self, v ):
"""Returns the type of the vertex v in the same format as
residue_type."""
def dir1(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return -1
else: return 1
def dir2(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return 1
else: return -1
adj_types = [ dir1(e,v)*self.edge_weights[e] for e in self.adj_edges( v, self.edges_set ) ]
adj_types += [ dir2(e,v)*self.edge_weights[e] for e in self.edges_set if self.edges[e] == (v,v) ]
return tuple(sorted(adj_types))
def get_vtcs_coloring( self ):
"""Helper function: Calculate the vertex coloring in a format suitable
for the canonical labeling calculation."""
# All vertices with different numbers of selfloops of different type
# are colored in another way.
dictWeights = { edge : self.edge_weights[e] for e,edge in enumerate(self.edges) }
edge_degree_counter = self.edge_degree_counter(self.edges_set)
selfloop_degree_list = [ (edge_degree_counter[(v,v)],dictWeights[(v,v)] if edge_degree_counter[(v,v)] else 2) for v in self.internal_vtcs_set ]
# Sorting is important for the v even for all similar mul!
selfloop_multiplicity_list = sorted( (mul,v) for v, mul in zip(self.internal_vtcs_set, selfloop_degree_list) )
( ( max_selfloop_multiplicity, _), _ ) = selfloop_multiplicity_list[-1] if selfloop_multiplicity_list else ((0,2), 0)
self_loop_list = [ frozenset( vtx for mul, vtx in filter( lambda ((mul, we), vtx) : mul == i and we == w, selfloop_multiplicity_list ) ) for i in range( max_selfloop_multiplicity+1 ) for w in (1,2,3) ]
# External vertices all have the same color still.
return self_loop_list + [ self.external_vtcs_set ]
def get_edges_coloring( self, edges_set ):
"""Helper function: Calculate the edge coloring in a format suitable
for the canonical labeling calculation."""
# Fermions, bosons and ghosts need different color classes.
fermion_edges_set = self.sub_edges_by_weight(1) & edges_set
boson_edges_set = self.sub_edges_by_weight(2) & edges_set
ghost_edges_set = self.sub_edges_by_weight(3) & edges_set
fermion_edges = frozenset( self.edges[i] for i in fermion_edges_set if not self.is_selfloop(self.edges[i]) )
ghost_edges = frozenset( self.edges[i] for i in ghost_edges_set if not self.is_selfloop(self.edges[i]) )
boson_edges = frozenset( self.edges[i] for i in boson_edges_set )
# Fermions and ghosts need orientation. Bosons not!
# For higher performance some special cases of boson-fermion-ghost
# edge combinations are included.
normalize = lambda edge : (max(edge),min(edge))
flip = lambda (x,y) : (y,x)
fermion_loops = frozenset( normalize(edge) for edge in fermion_edges if flip(edge) in fermion_edges )
ghost_loops = frozenset( normalize(edge) for edge in ghost_edges if flip(edge) in ghost_edges )
reduced_fermion_edges = fermion_edges - fermion_loops - frozenset( flip(edge) for edge in fermion_loops )
reduced_ghost_edges = ghost_edges - ghost_loops - frozenset( flip(edge) for edge in ghost_loops )
boson_fermion_loops = frozenset( edge for edge in reduced_fermion_edges if flip(edge) in boson_edges or edge in boson_edges )
boson_ghost_loops = frozenset( edge for edge in reduced_ghost_edges if flip(edge) in boson_edges or edge in boson_edges )
reduced_boson_edges = boson_edges - boson_fermion_loops - frozenset( flip(edge) for edge in boson_fermion_loops ) - boson_ghost_loops - frozenset( flip(edge) for edge in boson_ghost_loops )
dbl_boson_edges = reduced_boson_edges | frozenset( flip(edge) for edge in reduced_boson_edges )
if len(dbl_boson_edges&reduced_fermion_edges) != 0 or \
len(dbl_boson_edges&reduced_ghost_edges) != 0:
print dbl_boson_edges, reduced_fermion_edges
raise
# Calculate the boson coloring as in the Graph class.
boson_coloring = super( WeightedGraph, self).get_edges_coloring( boson_edges_set )
return [ dbl_boson_edges | reduced_fermion_edges | reduced_ghost_edges,
fermion_loops, boson_fermion_loops, ghost_loops, boson_ghost_loops,
reduced_ghost_edges - boson_ghost_loops ] + boson_coloring[1:]
def get_trivial_symmetry_factor( self ):
"""Calculates the trivial factor in the symmetry factor. Only
considers edge multiplicity and self loops."""
grpSize = 1
boson_edges = self.sub_edges_by_weight(2)
edge_degree_counter = self.edge_degree_counter(boson_edges)
for mul_edge_deg in ( m for edge, m in edge_degree_counter.iteritems() if not self.is_selfloop(edge) ):
grpSize*= factorial(mul_edge_deg)
for selfloop_deg in ( m for edge, m in edge_degree_counter.iteritems() if self.is_selfloop(edge) ):
grpSize*= double_factorial(2*selfloop_deg)
return grpSize
def permute_external_edges( self ):
"""Generate all possible graphs with fixed external legs from the
graph provided that the graph is non-leg-fixed."""
class FixedGraph( type(self) ):
def get_vtcs_coloring( self ):
vtcs_coloring = super(FixedGraph, self).get_vtcs_coloring()
vtcs_coloring = [ c - self.external_vtcs_set for c in vtcs_coloring]
vtcs_coloring.extend( frozenset([v]) for v in sorted(self.external_vtcs_set) )
return vtcs_coloring
extern_boson_vtcs = \
frozenset( v for e in self.sub_edges_by_weight(2) for v in self.edges[e] ) \
& self.external_vtcs_set
extern_in_fermion_vtcs = \
frozenset( self.edges[e][0] for e in self.sub_edges_by_weight(1) ) \
& self.external_vtcs_set
extern_out_fermion_vtcs = \
frozenset( self.edges[e][1] for e in self.sub_edges_by_weight(1) ) \
& self.external_vtcs_set
extern_in_ghost_vtcs = \
frozenset( self.edges[e][0] for e in self.sub_edges_by_weight(3) ) \
& self.external_vtcs_set
extern_out_ghost_vtcs = \
frozenset( self.edges[e][1] for e in self.sub_edges_by_weight(3) ) \
& self.external_vtcs_set
extern_vtcs_list = list(extern_boson_vtcs) + \
list(extern_in_fermion_vtcs) + \
list(extern_out_fermion_vtcs) + \
list(extern_in_ghost_vtcs) + \
list(extern_out_ghost_vtcs)
if frozenset(extern_vtcs_list) != self.external_vtcs_set:
raise
vtcs_list = list(self.internal_vtcs_set) + \
extern_vtcs_list
for perm0 in itertools.permutations( extern_boson_vtcs ):
for perm1 in itertools.permutations( extern_in_fermion_vtcs ):
for perm2 in itertools.permutations( extern_out_fermion_vtcs ):
for perm3 in itertools.permutations( extern_in_ghost_vtcs ):
for perm4 in itertools.permutations( extern_out_ghost_vtcs ):
new_vtcs_list = tuple(self.internal_vtcs_set) + \
perm0 + perm1 + perm2 + perm3 + perm4
m = dict( zip( vtcs_list, new_vtcs_list ) )
def relabel_edge( (v1,v2) ):
return (m[v1], m[v2])
yield FixedGraph(
[ relabel_edge(edge) for edge in self.edges ], self.edge_weights, 0 )
@property
def clean_graph( self ):
"""Orders the edge- and weight list of the graph in a transparent manner."""
ext_sorter = ( e in self.external_edges_set for e,edge in enumerate(self.edges) )
norm = lambda (edge) : (max(edge),min(edge))
edges = [ norm(edge) if w == 2 else edge for w,edge in zip(self.edge_weights, self.edges) ]
xwe_list = list(sorted(zip(ext_sorter, self.edge_weights, edges)))
edges = [ edge for x,w,edge in xwe_list ]
weights = [ w for x,w,edge in xwe_list ]
g = copy.copy(self)
g.edges = tuple(edges)
g.edge_weights= tuple(weights)
g.prepare_graph()
return g
|
|
'''HokuyoLX class code'''
import socket
import logging
import time
import numpy as np
from codecs import encode, decode
from .exceptions import HokuyoException, HokuyoStatusException
from .exceptions import HokuyoChecksumMismatch
from .statuses import activation_statuses, laser_states, tsync_statuses
class HokuyoLX(object):
'''Class for working with Hokuyo laser rangefinders, specifically
with the following models: UST-10LX, UST-20LX, UST-30LX'''
addr = ('192.168.0.10', 10940) #: IP address and port of the scanner
dmin = 20 #: Minimum measurable distance (in millimeters)
dmax = 30000 #: Maximum measurable distance (in millimeters)
ares = 1440 #: Angular resolution (number of partitions in 360 degrees)
amin = 0 #: Minimum step number of the scanning area
amax = 1080 #: Maximum step number of the scanning area
aforw = 540 #: Step number of the front direction
scan_freq = 40 #: Scanning frequency in Hz
model = 'UST-10LX' #: Sensor model
tzero = 0 #: Sensor start time
tn = 0 #: Sensor timestamp overflow counter
convert_time = True #: To convert timestamps to UNIX time or not?
_sock = None #: TCP connection socket to the sensor
_logger = None #: Logger instance for performing logging operations
def __init__(self, activate=True, info=True, tsync=True, addr=None,
buf=512, timeout=5, time_tolerance=300, logger=None,
convert_time=True):
'''Creates new object for communications with the sensor.
Parameters
----------
activate : bool, optional
Switch sensor to the standby mode? (the default is True)
info : bool, optional
Update sensor information? (the default is True)
tsync : bool, optional
Perform time synchronization? (the default is True)
addr : tuple, optional
IP address and port of the sensor (the default is
`('192.168.0.10', 10940)`)
buf : int, optional
Buffer size for recieving messages from the sensor
(the default is 512)
timeout : int, optional
Timeout limit for connection with the sensor in seconds
(the default is 5)
time_tolerance : int, optinal
Time tolerance before attempting time synchronization in
milliseconds (the default is 300)
logger : `logging._logger` instance, optional
Logger instance, if none is provided new instance is created
convert_time : bool
Convert timestamps to UNIX time?
'''
super(HokuyoLX, self).__init__()
if addr is not None:
self.addr = addr
self.buf = buf
self.timeout = timeout
self._logger = logging.getLogger('hokuyo') if logger is None else logger
self.time_tolerance = time_tolerance
self.convert_time = convert_time
self._connect_to_laser(False)
if tsync:
self.time_sync()
if info:
self.update_info()
if activate:
self.activate()
#Low-level data converting and checking
@staticmethod
def _check_sum(msg, cc=None):
'''Checks the checkusum inside the given message or if `cc` is provided
checks it for the given message. Returns message without checksum byte
'''
if cc is None:
cmsg, cc = msg[:-1], msg[-1:]
else:
cmsg = msg
conv_msg = cmsg if isinstance(cmsg, bytes) else encode(cmsg, 'ascii')
conv_sum = decode(cc, 'ascii') if isinstance(msg, bytes) else cc
calc_sum = chr((sum(bytearray(conv_msg)) & 0x3f) + 0x30)
if calc_sum != conv_sum:
raise HokuyoChecksumMismatch(
'For message %s sum mismatch: %s vs %s' %
(decode(conv_msg, 'ascii'), calc_sum, cc))
return cmsg
@staticmethod
def _convert2int(chars):
'''Converts given byte chars to integer using 6 bit encoding'''
return sum([(ord(char) - 0x30) << (6*(len(chars) - i - 1))
for i, char in enumerate(chars)])
def _convert2ts(self, chars, convert=None):
'''Converts sensor timestamp in the form of chars to
the UNIX timestamp. If resulting timestamp differs from local timestamp
to more than `self.time_tolerance` performs time syncronization or
detects sensor timestamp overflow and adjiusts to it.'''
ts = self._convert2int(self._check_sum(chars))
if not (self.convert_time if convert is None else convert):
return ts
logging.debug('Sensor timestamp: %d', ts)
t = self.tzero + ts + self._tn*(1 << 24)
logging.debug('Converted timestamp: %d (t0: %d, tn: %d)',
t, self.tzero, self._tn)
dt = int(time.time()*1000) - t
logging.debug('Delta t with local time: %d', dt)
if abs(dt) > self.time_tolerance:
diff = (1 << 24) - self.time_tolerance
if dt > diff and self.tzero != 0:
self._logger.warning('Timestamp overflow detected, '
'%d -- %d' % (dt, diff))
self._tn += 1
else:
self._logger.warning(
'Time difference %d is too big. Resyncing...', dt)
self.time_sync()
return self._convert2ts(chars)
return t
#: Low level connection methods
def _connect_to_laser(self, close=True):
'''Connects to the sensor using parameters stored inside object'''
if close:
self.close()
self._logger.info('Connecting to the laser')
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(self.timeout)
try:
self._sock.connect(self.addr)
except socket.timeout:
raise HokuyoException('Failed to connect to the sensor')
def _send_cmd(self, cmd, params='', string=''):
'''Sends given command to the sensor'''
if not (len(cmd) == 2 or (cmd[0] == '%' and len(cmd) == 3)):
raise HokuyoException(
'Command must be two chars string '
'or three chars starting with %%, got %d chars' % len(cmd))
self._logger.debug(
'Sending command to the sensor; '
'cmd: %s, params: %s, string: %s', cmd, params, string)
req = cmd + params
if string:
req += ';' + string
if self._sock is None:
raise HokuyoException('Not connected to the laser')
n = self._sock.send(encode(req, 'ascii') + b'\n')
if len(req) + 1 != n:
raise HokuyoException('Failed to send all data to the sensor')
return req
def _recv(self, header=None):
'''Recieves data from the sensor and checks recieved data block
using given header.'''
self._logger.debug('Recieving data from sensor')
if self._sock is None:
raise HokuyoException('Not connected to the laser')
try:
while True:
data = b''
while not data.endswith(b'\n\n'):
data += self._sock.recv(self.buf)
self._logger.debug('Recieved data: %s' % data)
split_data = decode(data[:-2], 'ascii').split('\n')
if header is not None and split_data[0] != header:
self._logger.warning(
'Discarded data due header mismatch: %s' % data)
continue
break
except socket.timeout:
raise HokuyoException('Connection timeout')
return split_data
def _send_req(self, cmd, params='', string=''):
'''Sends given command to the sensor and awaits response to it.'''
self._logger.debug(
'Performing request; cmd: %s, params: %s, string: %s',
cmd, params, string)
header = self._send_cmd(cmd, params, string)
resp = self._recv(header)
if resp.pop(0) != header:
raise HokuyoException('Response header mismatch')
status_str = resp.pop(0)
status = self._check_sum(status_str)
self._logger.debug('Got response with status %s', status)
return status, resp
#Processing and filtering scan data
def get_angles(self, start=None, end=None, grouping=0):
'''Returns array of angles for given `start`, `end` and `grouping`
parameters and according to the sensor parameters stored inside object.
Parameters
----------
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
Returns
-------
ndarray
List of angles in radians
Examples
--------
>>> laser.get_angles()
array([-1.17809725, -1.17591558, -1.17373392, ..., 1.17373392,
1.17591558, 1.17809725])
'''
start = self.amin if start is None else start
end = self.amax if end is None else end
grouping = 1 if grouping == 0 else grouping
num = self.amax - self.amin + 1
space = np.linspace(self.amin, self.amax, num) - self.aforw
angles = 2*np.pi*space/self.ares
# TODO remake grouping
return angles[start:end+1:grouping]
def _process_scan_data(self, data, with_intensity):
'''Converts raw scan data into ndarray with neccecary shape'''
raw_data = ''.join([self._check_sum(block) for block in data])
if len(raw_data) % 3 != 0:
raise HokuyoException('Wrong length of scan data')
scan = np.array([
self._convert2int(raw_data[3*i:3*i+3])
for i in range(len(raw_data)//3)], np.uint32)
if with_intensity:
return scan.reshape((len(scan)//2, 2))
return scan
def _filter(self, scan, start=None, end=None, grouping=0,
dmin=None, dmax=None, imin=None, imax=None):
'''Filters scan measured for given parameters and filters it for
given `dmin`, `dmax`, `imin` and `imax`. Note that `imin` and `imax`
should be only used for scans with intensities'''
angles = self.get_angles(start, end, grouping)
if scan.ndim == 1:
tpl = (angles, scan)
elif scan.ndim == 2:
tpl = (angles, scan[:, 0], scan[:, 1])
else:
raise HokuyoException('Unexpected scan dimensions')
data = np.vstack(tpl).T
dmin = self.dmin if dmin is None else dmin
dmax = self.dmax if dmax is None else dmax
data = data[(data[:, 1] >= dmin) & (data[:, 1] <= dmax)]
if imin is not None:
data = data[data[:, 2] >= imin]
if imax is not None:
data = data[data[:, 2] <= imax]
return data
#Control of sensor state
def _force_standby(self):
'''Forces standby state, if it unable to do it throws an exception'''
state, description = self.laser_state()
if state in (3, 4, 5):
self.standby()
elif state == 2:
self.tsync_exit()
elif state != 0:
raise HokuyoException('Unexpected laser state: %s' % description)
def activate(self):
'''Switches the sensor to the measurement state and starts
the measurement process by lighting (activating) the laser.
Valid in the standby state.
Returns
-------
code : int
Command status code
description : str
Command status description
Examples
--------
>>> laser.laser_state()
(0, 'Standby state')
>>> status, description = laser.activate()
>>> status
0
>>> description
'Normal. The sensor is in measurement state and the laser was lighted.'
>>> laser.laser_state()
(3, 'Single scan state')
'''
self._logger.info('Activating sensor')
status, _ = self._send_req('BM')
if status not in activation_statuses:
raise HokuyoStatusException(status)
return int(status), activation_statuses[status]
def standby(self):
'''Stops the current measurement process and switches the sensor to the
standby state. Valid in the measurement state or in the measurement and
scan response state.
Examples
--------
>>> laser.laser_state()
(3, 'Single scan state')
>>> laser.standby()
>>> laser.laser_state()
(0, 'Standby state')
'''
self._logger.info('Switching sensor to the standby state')
status, _ = self._send_req('QT')
if status != '00':
raise HokuyoStatusException(status)
def sleep(self):
'''Switches the sensor to the sleep state. When the sensor receives
the sleep command, it stops the current measurement process,
switches to the sleep state, turns off (deactivates) the laser and
stops the motor. Valid in the standby state or in the
measurement state.
Examples
--------
>>> laser.laser_state()
(0, 'Standby state')
>>> laser.sleep()
>>> laser.laser_state()
(5, 'Sleep state')
'''
self._logger.info('Switching sensor to the sleep state')
self._force_standby()
status, _ = self._send_req('%SL')
if status != '00':
raise HokuyoStatusException(status)
#Single measurments
def _single_measurment(self, with_intensity, start, end, grouping):
'''Generic function for taking single measurment.
Valid only in the measurment state.'''
start = self.amin if start is None else start
end = self.amax if end is None else end
params = '%0.4d%0.4d%0.2d' % (start, end, grouping)
cmd = 'GE' if with_intensity else 'GD'
status, data = self._send_req(cmd, params)
if status != '00':
raise HokuyoStatusException(status)
timestamp = self._convert2ts(data.pop(0))
scan = self._process_scan_data(data, with_intensity)
return timestamp, scan
def get_dist(self, start=None, end=None, grouping=0):
'''Measure distances for the given parameters
Parameters
----------
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
Returns
-------
timestamp : int
Timestamp of the measurment
scan : ndarray
Array with measured distances
'''
return self._single_measurment(False, start, end, grouping)
def get_intens(self, start=None, end=None, grouping=0):
'''Measure distances and intensities for the given parameters
Parameters
----------
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
Returns
-------
timestamp : int
Timestamp of the measurment
scan : ndarray
Array with measured distances and intensities
'''
return self._single_measurment(True, start, end, grouping)
def get_filtered_dist(self, start=None, end=None, grouping=0,
dmin=None, dmax=None):
'''Measure distances for the given parameters and perform basic
filtering. Returns array with angles and distances.
Parameters
----------
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
dmin : int, optional
Minimal distance for filtering (the default is None,
which implies `self.dmin`)
dmax : int, optional
Maximum distance for filtering (the default is None,
which implies `self.dmax`)
Returns
-------
timestamp : int
Timestamp of the measurment
scan : ndarray
Array with measured distances and angles
'''
ts, scan = self.get_dist(start, end, grouping)
return ts, self._filter(scan, start, end, grouping, dmin, dmax)
def get_filtered_intens(self, start=None, end=None, grouping=0,
dmin=None, dmax=None, imin=None, imax=None):
'''Measure distances and intensities for the given parameters and
perform basic filtering. Returns array with angles, distances and
intensities.
Parameters
----------
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
dmin : int, optional
Minimum distance for filtering (the default is None,
which implies `self.dmin`)
dmax : int, optional
Maximum distance for filtering (the default is None,
which implies `self.dmax`)
imin : int, optional
Minimum intensity for filtering (the default is None,
which disables minimum intensity filter)
imax : int, optional
Maximum distance for filtering (the default is None,
which disables maximum intensity filter)
Returns
-------
timestamp : int
Timestamp of the measurment
scan : ndarray
Array with measured angles, distances and intensities
'''
ts, scan = self.get_intens(start, end, grouping)
return ts, self._filter(scan, start, end, grouping,
dmin, dmax, imin, imax)
#Continous measurments
def _iter_meas(self, with_intensity, scans, start, end, grouping, skips):
'''Generic generator for taking continous measurment. If `scan` is
equal to 0 infinite number of scans will be taken until laser is
switched to the standby state.'''
self._logger.info('Initializing continous measurment')
start = self.amin if start is None else start
end = self.amax if end is None else end
params = '%0.4d%0.4d%0.2d%0.1d%0.2d' % (start, end, grouping,
skips, scans)
cmd = 'ME' if with_intensity else 'MD'
status, _ = self._send_req(cmd, params)
if status != '00':
raise HokuyoStatusException(status)
self._logger.info('Starting scan response cycle')
while True:
data = self._recv()
self._logger.debug('Recieved data in the scan response cycle: %s' %
data)
header = data.pop(0)
# TODO add string part check for header
req = cmd + params[:-2]
if not header.startswith(req):
raise HokuyoException('Header mismatch in the scan '
'response message')
pending = int(header[len(req):len(req) + 2])
status = self._check_sum(data.pop(0))
if status == '0M':
self._logger.warning('Unstable scanner condition')
continue
elif status != '99':
raise HokuyoStatusException(status)
timestamp = self._convert2ts(data.pop(0))
scan = self._process_scan_data(data, with_intensity)
self._logger.info('Got new scan, yielding...')
yield (scan, timestamp, pending)
if pending == 0 and scans != 0:
self._logger.info('Last scan recieved, exiting generator')
break
def iter_dist(self, scans=0, start=None, end=None, grouping=0, skips=0):
'''Generator for taking continous measurment of distances. If `scan` is
equal to 0 infinite number of scans will be taken until laser is
switched to the standby state.
Parameters
----------
scans : int, optional
Number of scans to perform (the default is 0, which means infinite
number of scans)
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
skips : int, optional
Number of scans to skip (the default is 0, 0 means all scans
will be yielded, 1 - every second, 2 - every third, etc.)
Yields
-------
timestamp : int
Timestamp of the measurment
scan : ndarray
Array with measured distances
'''
return self._iter_meas(False, scans, start, end, grouping, skips)
def iter_intens(self, scans=0, start=None, end=None, grouping=0, skips=0):
'''Generator for taking continous measurment of distances and
intensities. If `scan` is equal to 0 infinite number of scans will be
taken until laser is switched to the standby state.
Parameters
----------
scans : int, optional
Number of scans to perform (the default is 0, which means infinite
number of scans)
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
skips : int, optional
Number of scans to skip (the default is 0, 0 means all scans
will be yielded, 1 - every second, 2 - every third, etc.)
Yields
-------
timestamp : int
Timestamp of the measurment
scan : ndarray
Array with measured distances and intensities
'''
return self._iter_meas(True, scans, start, end, grouping, skips)
def iter_filtered_dist(self, scans=0, start=None, end=None, grouping=0,
skips=0, dmin=None, dmax=None):
'''Generator for taking continous measurment of distances with
additional filtering. If `scan` is equal to 0 infinite number of scans
will be taken until laser is switched to the standby state.
Parameters
----------
with_intensity : bool
Measure with intensities or only distances
scans : int, optional
Number of scans to perform (the default is 0, which means infinite
number of scans)
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
skips : int, optional
Number of scans to skip (the default is 0, 0 means all scans
will be yielded, 1 - every second, 2 - every third, etc.)
dmin : int, optional
Minimal distance for filtering (the default is None,
which implies `self.dmin`)
dmax : int, optional
Maximum distance for filtering (the default is None,
which implies `self.dmax`)
Yields
-------
timestamp : int
Timestamp of the measurment
scan : ndarray
Array with measured angles and distances
'''
gen = self.iter_dist(scans, start, end, grouping, skips)
for scan, timestamp, pending in gen:
scan = self._filter(scan, start, end, grouping, dmin, dmax)
yield (scan, timestamp, pending)
def iter_filtered_intens(self, scans=0, start=None, end=None, grouping=0,
skips=0, dmin=None, dmax=None,
imin=None, imax=None):
'''Generator for taking continous measurment of distances and
intensities with additional filtering. If `scan` is equal to 0 infinite
number of scans will be taken until laser is switched to the standby
state.
Parameters
----------
with_intensity : bool
Measure with intensities or only distances
scans : int, optional
Number of scans to perform (the default is 0, which means infinite
number of scans)
start : int, optional
Position of the starting step (the default is None,
which implies `self.amin`)
end : int, optional
Position of the ending step (the default is None,
which implies `self.amax`)
grouping : int, optional
Number of grouped steps (the default is 0, which regarded as 1)
skips : int, optional
Number of scans to skip (the default is 0, 0 means all scans
will be yielded, 1 - every second, 2 - every third, etc.)
dmin : int, optional
Minimal distance for filtering (the default is None,
which implies `self.dmin`)
dmax : int, optional
Maximum distance for filtering (the default is None,
which implies `self.dmax`)
imin : int, optional
Minimum intensity for filtering (the default is None,
which disables minimum intensity filter)
imax : int, optional
Maximum distance for filtering (the default is None,
which disables maximum intensity filter)
Yields
-------
timestamp : int
Timestamp of the measurment
scan : ndarray
Array with measured angles, distances and intensities
'''
gen = self.iter_intens(scans, start, end, grouping, skips)
for scan, timestamp, pending in gen:
scan = self._filter(scan, start, end, grouping,
dmin, dmax, imin, imax)
yield (scan, timestamp, pending)
#Time synchronization methods
def _tsync_cmd(self, code):
''' Sends time synchronization command with the given code
Parameters
----------
code : int
Time synchronization command, one of: 0, 1, 2
Returns
-------
status : str
Status code of the executed command
description : str
Status description of the executed command
'''
status, data = self._send_req('TM', str(code))
if status not in tsync_statuses:
raise HokuyoStatusException(status)
if data:
return status, tsync_statuses[status], data[0]
else:
return status, tsync_statuses[status]
def tsync_enter(self):
'''Transition from standby state to time synchronization state.'''
self._logger.info('Entering time sync mode')
return self._tsync_cmd(0)
def tsync_get(self):
'''Get time value for time synchronization'''
resp = self._tsync_cmd(1)
if resp[0] != '00':
raise HokuyoException(
'Failed to get sensor time: %s (%s)' %
(resp[1], resp[0]))
return self._convert2ts(resp[2], False)
def tsync_exit(self):
'''Transition from time synchronization state to standby state.'''
self._logger.info('Exiting time sync mode')
return self._tsync_cmd(2)
def time_sync(self, N=10, dt=0.1):
'''Performs time synchronization by doing `tsync_get`requests each `dt`
seconds N times. After that it finds mean time shift, saving it into
`self.tzero`. This value also can be interpreted as the time when
the sensor was turned in.
Parameters
----------
N : int, optional
Number of times to request time from the sensor (the default is 10)
dt : float, optional
Time between time requests (the default is 0.1)
'''
self._logger.info('Starting time synchronization.')
self._force_standby()
code, description = self.tsync_enter()
if code != '00':
self._logger.info(
'Failed to enter time sync mode: %s (%s)' %
(description, code))
self._logger.info('Collecting timestamps...')
tzero_list = []
for _ in range(N):
tzero_list.append(time.time()*1000 - self.tsync_get())
time.sleep(dt)
self.tzero = int(np.mean(np.rint(np.array(tzero_list))))
self._tn = 0
self._logger.info('Time sync done, t0: %d ms' % self.tzero)
code, description = self.tsync_exit()
if code != '00':
self._logger.info(
'Failed to exit time sync mode: %s (%s)' %
(description, code))
#Sensor information
def _process_info_line(self, line):
'''Processes one line in response on info request and returns processed
key and value from with line
Parameters
----------
line : str
Line of format '<key>:<value>;<checksum>'
Returns
-------
key : str
Information key
value : str, int
Imformation value (converted to int if doable)
'''
key, value = self._check_sum(line[:-2], line[-1:]).split(':')
return key, int(value) if value.isdigit() else value
def _get_info(self, cmd):
'''Generic method for recieving and decoding sensor information,
accepts the following commands: II, VV and PP'''
status, data = self._send_req(cmd)
if status != '00':
raise HokuyoStatusException(status)
return dict(self._process_info_line(line) for line in data if line)
def sensor_state(self):
'''Obtains status information of the sensor.
This command is valid during any sensor state.'''
self._logger.info('Retrieving sensor state')
return self._get_info('II')
def version(self):
'''Obtains manufacturing (version) information of the sensor.
This command is valid during any sensor state.'''
self._logger.info('Retrieving manufacturing information of the sensor')
return self._get_info('VV')
def sensor_parameters(self):
'''Obtains sensor internal parameters information.
This command is valid during any sensor state except
the time synchronization state.'''
self._logger.info('Retrieving sensor internal parameters')
return self._get_info('PP')
def laser_state(self):
'''Return the current sensor state. It is valid during any sensor state.
Returns
-------
int
Sensor state code
str
Sensor state description
'''
status, data = self._send_req('%ST')
if status != '00':
raise HokuyoStatusException(status)
state = self._check_sum(data[0])
if state not in laser_states:
raise HokuyoException('Unknown laser state code: %s' % state)
return int(state), laser_states[state]
def update_info(self):
'''Updates sensor information stored in the object attributes using
`sensor_parameters` method.'''
self._logger.info('Updating sensor information')
params = self.sensor_parameters()
for key in ['dmin', 'dmax', 'ares', 'amin', 'amax', ]:
if key.upper() in params:
self.__dict__[key] = params[key.upper()]
sfreq = params['SCAN']
self.scan_freq = sfreq//60 if sfreq % 60 == 0 else sfreq/60
self.aforw = params['AFRT']
self.model = params['MODL']
#Service methods
def reset(self):
'''This command forces the sensor to switch to the standby state
and performs the following tasks:
1. Turns off (deactivates) the laser.
2. Sets the motor rotational speed (scanning speed) to the default
initialization value.
3. Sets the serial transmission speed (bit rate) to the default
initialization value.
4. Sets the internal sensor timer to zero.
5. Sets the measurement sensitivity to the default (normal) value.
However, when the sensor is in the abnormal condition state,
the `reset` command is not received.
'''
self._logger.info('Performing sensor reset')
status, _ = self._send_req('RS')
if status != '00':
raise HokuyoStatusException(status)
self._logger.info('Finished reset')
def partial_reset(self):
'''This command forces the sensor to switch to the standby state
and performs the following tasks:
1. Turns off (deactivates) the laser.
2. Sets the internal sensor timer to zero.
3. Sets the measurement sensitivity to the default (normal) value.
This is similar to the `reset` command, except the motor rotational
(scanning) speed and the serial transmission speed are not changed.
When the sensor is in the abnormal condition state, the `partial_reset`
command is not received.
'''
self._logger.info('Performing partial sensor reset')
status, _ = self._send_req('RT')
if status != '00':
raise HokuyoStatusException(status)
self._logger.info('Finished partial reset')
def reboot(self):
'''This command reboots the sensor and performs the following tasks:
1. Waits for 1 second, during this time the host system disconnects
from the sensor.
2. The sensor stops all communications.
3. Turns off (deactivates) the laser.
4. Sets the motor rotational speed (scanning speed) to the default
initialization value.
5. Sets the serial transmission speed (bit rate) to the default
initialization value.
6. Sets the internal sensor timer to zero.
7. Sets the measurement sensitivity to the default (normal) value.
8. Initializes other internal parameters, and waits until the
scanning speed is stable.
9. Switches to standby state.
It is the only state transition command that can be received
during abnormal condition state
'''
self._logger.info('Reboot: sending first reboot command')
status, _ = self._send_req('RB')
if status != '01':
raise HokuyoException('Reboot failed on first step '
'recieved status %s not 01' % status)
self._logger.info('Reboot: done first step, sending '
'second reboot command')
status, _ = self._send_req('RB')
if status != '00':
raise HokuyoException('Reboot failed on second step '
'recieved status %s not 00' % status)
self._logger.info('Reboot: second step successful')
def close(self):
'''Disconnects from the sensor closing TCP socket'''
if self._sock is None:
self._logger.info('Close: socket already closed')
return
self._logger.info('Close: closing connection to sensor')
self._sock.close()
self._sock = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.