gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from nose.tools import * # noqa: F403
import datetime as datetime
import pytest
from django.test import RequestFactory
from django.db.models import Q
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError, PermissionDenied
from django.contrib.admin.sites import AdminSite
from django.forms.models import model_to_dict
from django.http import QueryDict
from tests.base import AdminTestCase
from osf_tests.factories import SubjectFactory, UserFactory, RegistrationFactory, PreprintFactory
from osf.models import Subject, OSFUser, Collection
from osf.models.provider import rules_to_subjects
from admin.base.utils import get_subject_rules, change_embargo_date
from osf.admin import OSFUserAdmin
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
pytestmark = pytest.mark.django_db
class TestSubjectRules(AdminTestCase):
def setUp(self):
super(TestSubjectRules, self).setUp()
self.parent_one = SubjectFactory() # 0
self.parent_two = SubjectFactory() # 1
self.child_one_1 = SubjectFactory(parent=self.parent_one) # 2
self.child_one_2 = SubjectFactory(parent=self.parent_one) # 3
self.grandchild_one_1 = SubjectFactory(parent=self.child_one_1) # 4
self.grandchild_one_2 = SubjectFactory(parent=self.child_one_1) # 5
self.child_two_1 = SubjectFactory(parent=self.parent_two) # 6
self.child_two_2 = SubjectFactory(parent=self.parent_two) # 7
def test_error_when_child_called_without_parent(self):
subjects_selected = [self.child_one_1]
with self.assertRaises(AttributeError):
get_subject_rules(subjects_selected)
def test_just_toplevel_subject(self):
subjects_selected = [self.parent_one]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [[[self.parent_one._id], False]]
assert rules_returned == rules_ideal
def test_two_toplevel_subjects(self):
subjects_selected = [
self.parent_one,
self.parent_two
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [
[[self.parent_one._id], False],
[[self.parent_two._id], False]
]
assert rules_returned == rules_ideal
def test_one_child(self):
subjects_selected = [
self.parent_one,
self.child_one_1
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [[[self.parent_one._id, self.child_one_1._id], False]]
assert rules_returned == rules_ideal
def test_one_child_all_grandchildren(self):
subjects_selected = [
self.parent_one,
self.child_one_1,
self.grandchild_one_1,
self.grandchild_one_2,
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [[[self.parent_one._id, self.child_one_1._id], True]]
assert rules_returned == rules_ideal
def test_all_children_all_grandchildren(self):
subjects_selected = [
self.parent_one,
self.child_one_1,
self.grandchild_one_1,
self.grandchild_one_2,
self.child_one_2
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [[[self.parent_one._id], True]]
assert rules_returned == rules_ideal
def test_one_child_with_one_grandchild(self):
subjects_selected = [
self.parent_one,
self.child_one_1,
self.grandchild_one_1
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [
[[self.parent_one._id, self.child_one_1._id, self.grandchild_one_1._id], False]
]
assert rules_returned == rules_ideal
def test_rules_to_subjects(self):
rules = [
[[self.parent_one._id, self.child_one_1._id], False]
]
subject_queryset_ideal = Subject.objects.filter(Q(id=self.parent_one.id) | Q(id=self.child_one_1.id))
returned_subjects = rules_to_subjects(rules)
assert list(subject_queryset_ideal) == list(returned_subjects)
class TestNodeChanges(AdminTestCase):
def setUp(self):
super(TestNodeChanges, self).setUp()
self.registration = RegistrationFactory(is_public=True)
self.user = UserFactory()
self.user.is_staff = True
self.user.groups.add(Group.objects.get(name='osf_admin'))
self.user.save()
self.date_valid = self.registration.registered_date + datetime.timedelta(days=365)
self.date_valid2 = self.registration.registered_date + datetime.timedelta(days=375)
self.date_too_late = self.registration.registered_date + datetime.timedelta(days=1825)
self.date_too_soon = self.registration.registered_date + datetime.timedelta(days=-1)
def test_change_embargo_date(self):
assert_false(self.registration.embargo)
assert_true(self.registration.is_public)
# Note: Date comparisons accept a difference up to a day because embargoes start at midnight
# Create an embargo from a registration with none
change_embargo_date(self.registration, self.user, self.date_valid)
assert_almost_equal(self.registration.embargo.end_date, self.date_valid, delta=datetime.timedelta(days=1))
# Make sure once embargo is set, registration is made private
self.registration.reload()
assert_false(self.registration.is_public)
# Update an embargo end date
change_embargo_date(self.registration, self.user, self.date_valid2)
assert_almost_equal(self.registration.embargo.end_date, self.date_valid2, delta=datetime.timedelta(days=1))
# Test invalid dates
with assert_raises(ValidationError):
change_embargo_date(self.registration, self.user, self.date_too_late)
with assert_raises(ValidationError):
change_embargo_date(self.registration, self.user, self.date_too_soon)
# Test that checks user has permission
with assert_raises(PermissionDenied):
change_embargo_date(self.registration, UserFactory(), self.date_valid)
assert_almost_equal(self.registration.embargo.end_date, self.date_valid2, delta=datetime.timedelta(days=1))
# Add a test to check privatizing
site = AdminSite()
class TestGroupCollectionsPreprints:
@pytest.mark.enable_bookmark_creation
@pytest.fixture()
def user(self):
return UserFactory()
@pytest.fixture()
def admin_url(self, user):
return '/admin/osf/osfuser/{}/change/'.format(user.id)
@pytest.fixture()
def preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def get_request(self, admin_url, user):
request = RequestFactory().get(admin_url)
request.user = user
return request
@pytest.fixture()
def post_request(self, admin_url, user):
request = RequestFactory().post(admin_url)
request.user = user
return request
@pytest.fixture()
def osf_user_admin(self):
return OSFUserAdmin(OSFUser, site)
@pytest.mark.enable_bookmark_creation
def test_admin_app_formfield_collections(self, preprint, user, get_request, osf_user_admin):
""" Testing OSFUserAdmin.formfield_many_to_many.
This should not return any bookmark collections or preprint groups, even if the user is a member.
"""
formfield = (osf_user_admin.formfield_for_manytomany(OSFUser.groups.field, request=get_request))
queryset = formfield.queryset
collections_group = Collection.objects.filter(creator=user, is_bookmark_collection=True)[0].get_group('admin')
assert(collections_group not in queryset)
assert(preprint.get_group('admin') not in queryset)
@pytest.mark.enable_bookmark_creation
def test_admin_app_save_related_collections(self, post_request, osf_user_admin, user, preprint):
""" Testing OSFUserAdmin.save_related
This should maintain the bookmark collections and preprint groups the user is a member of
even though they aren't explicitly returned by the form.
"""
form = osf_user_admin.get_form(request=post_request, obj=user)
data_dict = model_to_dict(user)
post_form = form(data_dict, instance=user)
# post_form.errors.keys() generates a list of fields causing JSON Related errors
# which are preventing the form from being valid (which is required for the form to be saved).
# By setting the field to '{}', this makes the form valid and resolves JSON errors.
for field in post_form.errors.keys():
if field == 'groups':
data_dict['groups'] = []
else:
data_dict[field] = '{}'
post_form = form(data_dict, instance=user)
assert(post_form.is_valid())
post_form.save(commit=False)
qdict = QueryDict('', mutable=True)
qdict.update(data_dict)
post_request.POST = qdict
osf_user_admin.save_related(request=post_request, form=post_form, formsets=[], change=True)
collections_group = Collection.objects.filter(creator=user, is_bookmark_collection=True)[0].get_group('admin')
assert(collections_group in user.groups.all())
assert(preprint.get_group('admin') in user.groups.all())
|
|
from __future__ import division, print_function, absolute_import
from decimal import Decimal
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_almost_equal, assert_array_equal, assert_array_almost_equal, \
assert_raises, assert_
import scipy.signal as signal
from scipy.signal import correlate, convolve, convolve2d, fftconvolve, \
hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, tf2zpk
from numpy import array, arange
import numpy as np
class _TestConvolve(TestCase):
def test_basic(self):
a = [3,4,5,6,5,4]
b = [1,2,3]
c = convolve(a,b)
assert_array_equal(c,array([3,10,22,28,32,32,23,12]))
def test_complex(self):
x = array([1+1j, 2+1j, 3+1j])
y = array([1+1j, 2+1j])
z = convolve(x, y)
assert_array_equal(z, array([2j, 2+6j, 5+8j, 5+5j]))
def test_zero_rank(self):
a = 1289
b = 4567
c = convolve(a,b)
assert_equal(c,a*b)
def test_single_element(self):
a = array([4967])
b = array([3920])
c = convolve(a,b)
assert_equal(c,a*b)
def test_2d_arrays(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve(a,b)
d = array([[2 ,7 ,16,17,12],
[10,30,62,58,38],
[12,31,58,49,30]])
assert_array_equal(c,d)
def test_valid_mode(self):
a = [1,2,3,6,5,3]
b = [2,3,4,5,3,4,2,2,1]
c = convolve(a,b,'valid')
assert_array_equal(c,array([70,78,73,65]))
class TestConvolve(_TestConvolve):
def test_valid_mode(self):
# 'valid' mode if b.size > a.size does not make sense with the new
# behavior
a = [1,2,3,6,5,3]
b = [2,3,4,5,3,4,2,2,1]
def _test():
convolve(a,b,'valid')
self.assertRaises(ValueError, _test)
def test_same_mode(self):
a = [1,2,3,3,1,2]
b = [1,4,3,4,5,6,7,4,3,2,1,1,3]
c = convolve(a,b,'same')
d = array([57,61,63,57,45,36])
assert_array_equal(c,d)
class _TestConvolve2d(TestCase):
def test_2d_arrays(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
d = array([[2 ,7 ,16,17,12],
[10,30,62,58,38],
[12,31,58,49,30]])
e = convolve2d(a, b)
assert_array_equal(e, d)
def test_valid_mode(self):
e = [[2,3,4,5,6,7,8], [4,5,6,7,8,9,10]]
f = [[1,2,3], [3,4,5]]
g = convolve2d(e, f, 'valid')
h = array([[62,80,98,116,134]])
assert_array_equal(g, h)
def test_fillvalue(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
fillval = 1
c = convolve2d(a,b,'full','fill',fillval)
d = array([[24,26,31,34,32],
[28,40,62,64,52],
[32,46,67,62,48]])
assert_array_equal(c, d)
def test_wrap_boundary(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve2d(a,b,'full','wrap')
d = array([[80,80,74,80,80],
[68,68,62,68,68],
[80,80,74,80,80]])
assert_array_equal(c,d)
def test_sym_boundary(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve2d(a,b,'full','symm')
d = array([[34,30,44, 62, 66],
[52,48,62, 80, 84],
[82,78,92,110,114]])
assert_array_equal(c,d)
class TestConvolve2d(_TestConvolve2d):
def test_same_mode(self):
e = [[1,2,3],[3,4,5]]
f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
g = convolve2d(e,f,'same')
h = array([[22,28,34],
[80,98,116]])
assert_array_equal(g,h)
def test_valid_mode2(self):
# Test when in2.size > in1.size
e = [[1,2,3],[3,4,5]]
f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
def _test():
convolve2d(e,f,'valid')
self.assertRaises(ValueError, _test)
def test_consistency_convolve_funcs(self):
# Compare np.convolve, signal.convolve, signal.convolve2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.convolve(a, b, mode=mode),
signal.convolve(a, b, mode=mode))
assert_almost_equal(np.squeeze(signal.convolve2d([a], [b],
mode=mode)),
signal.convolve(a, b, mode=mode))
class TestFFTConvolve(TestCase):
def test_real(self):
x = array([1,2,3])
assert_array_almost_equal(signal.fftconvolve(x,x), [1,4,10,12,9.])
def test_complex(self):
x = array([1+1j,2+2j,3+3j])
assert_array_almost_equal(signal.fftconvolve(x,x),
[0+2.0j, 0+8j, 0+20j, 0+24j, 0+18j])
def test_2d_real_same(self):
a = array([[1,2,3],[4,5,6]])
assert_array_almost_equal(signal.fftconvolve(a,a),
array([[1,4,10,12,9],
[8,26,56,54,36],
[16,40,73,60,36]]))
def test_2d_complex_same(self):
a = array([[1+2j,3+4j,5+6j],[2+1j,4+3j,6+5j]])
c = fftconvolve(a,a)
d = array([[-3+4j,-10+20j,-21+56j,-18+76j,-11+60j],
[10j,44j,118j,156j,122j],
[3+4j,10+20j,21+56j,18+76j,11+60j]])
assert_array_almost_equal(c,d)
def test_real_same_mode(self):
a = array([1,2,3])
b = array([3,3,5,6,8,7,9,0,1])
c = fftconvolve(a,b,'same')
d = array([ 35., 41., 47.])
assert_array_almost_equal(c,d)
def test_real_same_mode2(self):
a = array([3,3,5,6,8,7,9,0,1])
b = array([1,2,3])
c = fftconvolve(a,b,'same')
d = array([9.,20.,25.,35.,41.,47.,39.,28.,2.])
assert_array_almost_equal(c,d)
def test_real_valid_mode(self):
a = array([3,2,1])
b = array([3,3,5,6,8,7,9,0,1])
def _test():
fftconvolve(a,b,'valid')
self.assertRaises(ValueError, _test)
def test_real_valid_mode2(self):
a = array([3,3,5,6,8,7,9,0,1])
b = array([3,2,1])
c = fftconvolve(a,b,'valid')
d = array([24.,31.,41.,43.,49.,25.,12.])
assert_array_almost_equal(c,d)
def test_empty(self):
# Regression test for #1745: crashes with 0-length input.
assert_(fftconvolve([], []).size == 0)
assert_(fftconvolve([5, 6], []).size == 0)
assert_(fftconvolve([], [7]).size == 0)
def test_zero_rank(self):
a = array(4967)
b = array(3920)
c = fftconvolve(a,b)
assert_equal(c,a*b)
def test_single_element(self):
a = array([4967])
b = array([3920])
c = fftconvolve(a,b)
assert_equal(c,a*b)
def test_random_data(self):
np.random.seed(1234)
a = np.random.rand(1233) + 1j*np.random.rand(1233)
b = np.random.rand(1321) + 1j*np.random.rand(1321)
c = fftconvolve(a, b, 'full')
d = np.convolve(a, b, 'full')
assert_(np.allclose(c, d, rtol=1e-10))
class TestMedFilt(TestCase):
def test_basic(self):
f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
[50, 50, 50, 50, 50, 0, 72, 77, 68, 66],
[50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
[50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
[50, 50, 50, 50, 50, 46, 34, 9, 21, 66],
[70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
[64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
[ 3, 33, 53, 67, 1, 78, 74, 55, 12, 83],
[ 7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
[32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]
d = signal.medfilt(f, [7, 3])
e = signal.medfilt2d(np.array(f, np.float), [7, 3])
assert_array_equal(d, [[ 0, 50, 50, 50, 42, 15, 15, 18, 27, 0],
[ 0, 50, 50, 50, 50, 42, 19, 21, 29, 0],
[50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
[50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
[50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
[33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
[32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
[ 7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
[ 0, 32, 33, 39, 32, 32, 43, 43, 43, 0],
[ 0, 7, 11, 7, 4, 4, 19, 19, 24, 0]])
assert_array_equal(d, e)
def test_none(self):
# Ticket #1124. Ensure this does not segfault.
try:
signal.medfilt(None)
except:
pass
# Expand on this test to avoid a regression with possible contiguous
# numpy arrays that have odd strides. The stride value below gets
# us into wrong memory if used (but it does not need to be used)
dummy = np.arange(10, dtype=np.float64)
a = dummy[5:6]
a.strides = 16
assert_(signal.medfilt(a, 1) == 5.)
class TestWiener(TestCase):
def test_basic(self):
g = array([[5,6,4,3],[3,5,6,2],[2,3,5,6],[1,6,9,7]],'d')
h = array([[2.16374269,3.2222222222, 2.8888888889, 1.6666666667],
[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],
[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],
[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
assert_array_almost_equal(signal.wiener(g), h , decimal=6)
class TestCSpline1DEval(TestCase):
def test_basic(self):
y=array([1,2,3,4,3,2,1,2,3.0])
x=arange(len(y))
dx=x[1]-x[0]
cj = signal.cspline1d(y)
x2=arange(len(y)*10.0)/10.0
y2=signal.cspline1d_eval(cj, x2, dx=dx,x0=x[0])
# make sure interpolated values are on knot points
assert_array_almost_equal(y2[::10], y, decimal=5)
class TestOrderFilt(TestCase):
def test_basic(self):
assert_array_equal(signal.order_filter([1,2,3],[1,0,1],1),
[2,3,2])
class _TestLinearFilter(TestCase):
dt = None
def test_rank1(self):
x = np.linspace(0, 5, 6).astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, -0.5]).astype(self.dt)
# Test simple IIR
y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt)
assert_array_almost_equal(lfilter(b, a, x), y_r)
# Test simple FIR
b = np.array([1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
assert_array_almost_equal(lfilter(b, a, x), y_r)
# Test IIR with initial conditions
b = np.array([1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
zi = np.array([1]).astype(self.dt)
y_r = np.array([1, 1, 3, 5, 7, 9.]).astype(self.dt)
zf_r = np.array([5]).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
b = np.array([1, 1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
zi = np.array([1, 1]).astype(self.dt)
y_r = np.array([1, 2, 3, 6, 9, 12.]).astype(self.dt)
zf_r = np.array([9, 5]).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank2(self):
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6 ,4 ,2]],
dtype=self.dt)
y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]], dtype=self.dt)
y = lfilter(b, a, x, axis = 0)
assert_array_almost_equal(y_r2_a0, y)
y = lfilter(b, a, x, axis = 1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank2_init_cond_a1(self):
# Test initial condition handling along axis 1
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0_1 = np.array([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]], dtype=self.dt)
zf_r = np.array([-5, -17, -29, -41])[:, np.newaxis].astype(self.dt)
y, zf = lfilter(b, a, x, axis = 1, zi = np.ones((4, 1)))
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
def test_rank2_init_cond_a0(self):
# Test initial condition handling along axis 0
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0_0 = np.array([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5 ,3 ,1]],
dtype=self.dt)
zf_r = np.array([[-23, -23, -23]], dtype=self.dt)
y, zf = lfilter(b, a, x, axis = 0, zi = np.ones((1, 3)))
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank3(self):
shape = (4, 3, 2)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
# Test last axis
y = lfilter(b, a, x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))
def test_empty_zi(self):
# Regression test for #880: empty array for zi crashes.
a = np.ones(1).astype(self.dt)
b = np.ones(1).astype(self.dt)
x = np.arange(5).astype(self.dt)
zi = np.ones(0).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
self.assertTrue(zf.dtype == self.dt)
self.assertTrue(zf.size == 0)
class TestLinearFilterFloat32(_TestLinearFilter):
dt = np.float32
class TestLinearFilterFloat64(_TestLinearFilter):
dt = np.float64
class TestLinearFilterFloatExtended(_TestLinearFilter):
dt = np.longdouble
class TestLinearFilterComplex64(_TestLinearFilter):
dt = np.complex64
class TestLinearFilterComplex128(_TestLinearFilter):
dt = np.complex128
class TestLinearFilterComplexxxiExtended28(_TestLinearFilter):
dt = np.longcomplex
class TestLinearFilterDecimal(_TestLinearFilter):
dt = np.dtype(Decimal)
class TestLinearFilterObject(_TestLinearFilter):
dt = np.object_
def test_lfilter_bad_object():
# lfilter: object arrays with non-numeric objects raise TypeError.
# Regression test for ticket #1452.
assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0])
assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0])
assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0])
class _TestCorrelateReal(TestCase):
dt = None
def _setup_rank1(self):
# a.size should be greated than b.size for the tests
a = np.linspace(0, 3, 4).astype(self.dt)
b = np.linspace(1, 2, 2).astype(self.dt)
y_r = np.array([0, 2, 5, 8, 3]).astype(self.dt)
return a, b, y_r
def test_rank1_valid(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r[1:4])
self.assertTrue(y.dtype == self.dt)
def test_rank1_same(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r[:-1])
self.assertTrue(y.dtype == self.dt)
def test_rank1_full(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r)
self.assertTrue(y.dtype == self.dt)
def _setup_rank3(self):
a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(self.dt)
b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(self.dt)
y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.,],
[ 46., 432., 1062., 1840., 2672., 1698., 864., 266.,],
[ 134., 736., 1662., 2768., 3920., 2418., 1168., 314.,],
[ 260., 952., 1932., 3056., 4208., 2580., 1240., 332.,] ,
[ 202., 664., 1290., 1984., 2688., 1590., 712., 150.,] ,
[ 114., 344., 642., 960., 1280., 726., 296., 38.,]],
[[ 23., 400., 1035., 1832., 2696., 1737., 904., 293.,],
[ 134., 920., 2166., 3680., 5280., 3306., 1640., 474.,],
[ 325., 1544., 3369., 5512., 7720., 4683., 2192., 535.,],
[ 571., 1964., 3891., 6064., 8272., 4989., 2324., 565.,],
[ 434., 1360., 2586., 3920., 5264., 3054., 1312., 230.,],
[ 241., 700., 1281., 1888., 2496., 1383., 532., 39.,]],
[[ 22., 214., 528., 916., 1332., 846., 430., 132.,],
[ 86., 484., 1098., 1832., 2600., 1602., 772., 206.,],
[ 188., 802., 1698., 2732., 3788., 2256., 1018., 218.,],
[ 308., 1006., 1950., 2996., 4052., 2400., 1078., 230.,],
[ 230., 692., 1290., 1928., 2568., 1458., 596., 78.,],
[ 126., 354., 636., 924., 1212., 654., 234., 0.,]]],
dtype=self.dt)
return a, b, y_r
def test_rank3_valid(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b, "valid")
assert_array_almost_equal(y, y_r[1:2,2:4,3:5])
self.assertTrue(y.dtype == self.dt)
def test_rank3_same(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b, "same")
assert_array_almost_equal(y, y_r[0:-1,1:-1,1:-2])
self.assertTrue(y.dtype == self.dt)
def test_rank3_all(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b)
assert_array_almost_equal(y, y_r)
self.assertTrue(y.dtype == self.dt)
def _get_testcorrelate_class(datatype, base):
class TestCorrelateX(base):
dt = datatype
TestCorrelateX.__name__ = "TestCorrelate%s" % datatype.__name__.title()
return TestCorrelateX
for datatype in [np.ubyte, np.byte, np.ushort, np.short, np.uint, np.int,
np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble,
Decimal]:
cls = _get_testcorrelate_class(datatype, _TestCorrelateReal)
globals()[cls.__name__] = cls
class _TestCorrelateComplex(TestCase):
# The numpy data type to use.
dt = None
# The decimal precision to be used for comparing results.
# This value will be passed as the 'decimal' keyword argument of
# assert_array_almost_equal().
decimal = None
def _setup_rank1(self, mode):
np.random.seed(9)
a = np.random.randn(10).astype(self.dt)
a += 1j * np.random.randn(10).astype(self.dt)
b = np.random.randn(8).astype(self.dt)
b += 1j * np.random.randn(8).astype(self.dt)
y_r = (correlate(a.real, b.real, mode=mode) +
correlate(a.imag, b.imag, mode=mode)).astype(self.dt)
y_r += 1j * (-correlate(a.real, b.imag, mode=mode) +
correlate(a.imag, b.real, mode=mode))
return a, b, y_r
def test_rank1_valid(self):
a, b, y_r = self._setup_rank1('valid')
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r, decimal=self.decimal)
self.assertTrue(y.dtype == self.dt)
def test_rank1_same(self):
a, b, y_r = self._setup_rank1('same')
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r, decimal=self.decimal)
self.assertTrue(y.dtype == self.dt)
def test_rank1_full(self):
a, b, y_r = self._setup_rank1('full')
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal)
self.assertTrue(y.dtype == self.dt)
def test_rank3(self):
a = np.random.randn(10, 8, 6).astype(self.dt)
a += 1j * np.random.randn(10, 8, 6).astype(self.dt)
b = np.random.randn(8, 6, 4).astype(self.dt)
b += 1j * np.random.randn(8, 6, 4).astype(self.dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(self.dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal-1)
self.assertTrue(y.dtype == self.dt)
class TestCorrelate2d(TestCase):
def test_consistency_correlate_funcs(self):
# Compare np.correlate, signal.correlate, signal.correlate2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.correlate(a, b, mode=mode),
signal.correlate(a, b, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([a], [b],
mode=mode)),
signal.correlate(a, b, mode=mode))
# Create three classes, one for each complex data type. The actual class
# name will be TestCorrelateComplex###, where ### is the number of bits.
for datatype in [np.csingle, np.cdouble, np.clongdouble]:
cls = _get_testcorrelate_class(datatype, _TestCorrelateComplex)
cls.decimal = int(2 * np.finfo(datatype).precision / 3)
globals()[cls.__name__] = cls
class TestLFilterZI(TestCase):
def test_basic(self):
a = np.array([1.0, -1.0, 0.5])
b = np.array([1.0, 0.0, 2.0])
zi_expected = np.array([5.0, -1.0])
zi = lfilter_zi(b, a)
assert_array_almost_equal(zi, zi_expected)
class TestFiltFilt(TestCase):
def test_basic(self):
out = signal.filtfilt([1, 2, 3], [1, 2, 3], np.arange(12))
assert_equal(out, arange(12))
def test_sine(self):
rate = 2000
t = np.linspace(0, 1.0, rate + 1)
# A signal with low frequency and a high frequency.
xlow = np.sin(5 * 2 * np.pi * t)
xhigh = np.sin(250 * 2 * np.pi * t)
x = xlow + xhigh
b, a = butter(8, 0.125)
z, p, k = tf2zpk(b, a)
# r is the magnitude of the largest pole.
r = np.abs(p).max()
eps = 1e-5
# n estimates the number of steps for the
# transient to decay by a factor of eps.
n = int(np.ceil(np.log(eps) / np.log(r)))
# High order lowpass filter...
y = filtfilt(b, a, x, padlen=n)
# Result should be just xlow.
err = np.abs(y - xlow).max()
assert_(err < 1e-4)
# A 2D case.
x2d = np.vstack([xlow, xlow + xhigh])
y2d = filtfilt(b, a, x2d, padlen=n, axis=1)
assert_equal(y2d.shape, x2d.shape)
err = np.abs(y2d - xlow).max()
assert_(err < 1e-4)
# Use the previous result to check the use of the axis keyword.
# (Regression test for ticket #1620)
y2dt = filtfilt(b, a, x2d.T, padlen=n, axis=0)
assert_equal(y2d, y2dt.T)
def test_axis(self):
# Test the 'axis' keyword on a 3D array.
x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)
b, a = butter(3, 0.125)
y0 = filtfilt(b, a, x, padlen=0, axis=0)
y1 = filtfilt(b, a, np.swapaxes(x, 0, 1), padlen=0, axis=1)
assert_array_equal(y0, np.swapaxes(y1, 0, 1))
y2 = filtfilt(b, a, np.swapaxes(x, 0, 2), padlen=0, axis=2)
assert_array_equal(y0, np.swapaxes(y2, 0, 2))
class TestDecimate:
def test_basic(self):
x = np.arange(6)
assert_array_equal(signal.decimate(x, 2, n=1).round(), x[::2])
def test_shape(self):
# Regression test for ticket #1480.
z = np.zeros((10, 10))
d0 = signal.decimate(z, 2, axis=0)
assert_equal(d0.shape, (5, 10))
d1 = signal.decimate(z, 2, axis=1)
assert_equal(d1.shape, (10, 5))
class TestHilbert(object):
def test_bad_args(self):
x = np.array([1.0+0.0j])
assert_raises(ValueError, hilbert, x)
x = np.arange(8.0)
assert_raises(ValueError, hilbert, x, N=0)
def test_hilbert_theoretical(self):
#test cases by Ariel Rokem
decimal = 14
pi = np.pi
t = np.arange(0, 2*pi, pi/256)
a0 = np.sin(t)
a1 = np.cos(t)
a2 = np.sin(2*t)
a3 = np.cos(2*t)
a = np.vstack([a0,a1,a2,a3])
h = hilbert(a)
h_abs = np.abs(h)
h_angle = np.angle(h)
h_real = np.real(h)
#The real part should be equal to the original signals:
assert_almost_equal(h_real, a, decimal)
#The absolute value should be one everywhere, for this input:
assert_almost_equal(h_abs, np.ones(a.shape), decimal)
#For the 'slow' sine - the phase should go from -pi/2 to pi/2 in
#the first 256 bins:
assert_almost_equal(h_angle[0,:256], np.arange(-pi/2,pi/2,pi/256),
decimal)
#For the 'slow' cosine - the phase should go from 0 to pi in the
#same interval:
assert_almost_equal(h_angle[1,:256], np.arange(0,pi,pi/256), decimal)
#The 'fast' sine should make this phase transition in half the time:
assert_almost_equal(h_angle[2,:128], np.arange(-pi/2,pi/2,pi/128),
decimal)
#Ditto for the 'fast' cosine:
assert_almost_equal(h_angle[3,:128], np.arange(0,pi,pi/128), decimal)
#The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia
assert_almost_equal(h[1].imag, a0, decimal)
def test_hilbert_axisN(self):
# tests for axis and N arguments
a = np.arange(18).reshape(3,6)
# test axis
aa = hilbert(a, axis=-1)
yield assert_equal, hilbert(a.T, axis=0), aa.T
# test 1d
yield assert_equal, hilbert(a[0]), aa[0]
# test N
aan = hilbert(a, N=20, axis=-1)
yield assert_equal, aan.shape, [3,20]
yield assert_equal, hilbert(a.T, N=20, axis=0).shape, [20,3]
#the next test is just a regression test,
#no idea whether numbers make sense
a0hilb = np.array([ 0.000000000000000e+00-1.72015830311905j ,
1.000000000000000e+00-2.047794505137069j,
1.999999999999999e+00-2.244055555687583j,
3.000000000000000e+00-1.262750302935009j,
4.000000000000000e+00-1.066489252384493j,
5.000000000000000e+00+2.918022706971047j,
8.881784197001253e-17+3.845658908989067j,
-9.444121133484362e-17+0.985044202202061j,
-1.776356839400251e-16+1.332257797702019j,
-3.996802888650564e-16+0.501905089898885j,
1.332267629550188e-16+0.668696078880782j,
-1.192678053963799e-16+0.235487067862679j,
-1.776356839400251e-16+0.286439612812121j,
3.108624468950438e-16+0.031676888064907j,
1.332267629550188e-16-0.019275656884536j,
-2.360035624836702e-16-0.1652588660287j ,
0.000000000000000e+00-0.332049855010597j,
3.552713678800501e-16-0.403810179797771j,
8.881784197001253e-17-0.751023775297729j,
9.444121133484362e-17-0.79252210110103j ])
yield assert_almost_equal, aan[0], a0hilb, 14, 'N regression'
class TestHilbert2(object):
def test_bad_args(self):
# x must be real.
x = np.array([[1.0 + 0.0j]])
assert_raises(ValueError, hilbert2, x)
# x must be rank 2.
x = np.arange(24).reshape(2, 3, 4)
assert_raises(ValueError, hilbert2, x)
# Bad value for N.
x = np.arange(16).reshape(4, 4)
assert_raises(ValueError, hilbert2, x, N=0)
assert_raises(ValueError, hilbert2, x, N=(2,0))
assert_raises(ValueError, hilbert2, x, N=(2,))
if __name__ == "__main__":
run_module_suite()
|
|
"""
Filename: plot_hemispheric_timeseries.py
Author: Damien Irving, irving.damien@gmail.com
Description: Plot ensemble aggregated hemispheric timeseries
"""
# Import general Python modules
import sys, os, pdb, re
import argparse
import numpy
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import iris.plot as iplt
import matplotlib.pyplot as plt
import seaborn
import cmdline_provenance as cmdprov
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import timeseries
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
colors = {'ohc': 'blue', 'hfds': 'orange', 'rndt': 'red'}
names = {'ohc': 'ocean heat content',
'hfds': 'Downward Heat Flux at Sea Water Surface',
'rndt': 'TOA Incoming Net Radiation'}
def ensemble_aggregate(cube_list, operator):
"""Calculate the ensemble mean."""
aggregators = {'mean': iris.analysis.MEAN, 'median': iris.analysis.MEDIAN}
var_name = cube_list[0].var_name
for cube in cube_list:
cube.var_name = var_name
if len(cube_list) > 1:
equalise_attributes(cube_list)
timeseries.equalise_time_axes(cube_list)
ensemble_cube = cube_list.merge_cube()
ensemble_agg = ensemble_cube.collapsed('ensemble_member', aggregators[operator])
else:
ensemble_agg = cube_list[0]
return ensemble_agg
def calc_anomaly(cube):
"""Calculate the anomaly."""
anomaly = cube.copy()
anomaly.data = anomaly.data - anomaly.data[0]
return anomaly
def read_hemisphere_data(file_pairs, variable, time_constraint, ensagg):
"""Read the data for a particular variable."""
hemispheres = ['nh', 'sh']
cube_list = {'nh': iris.cube.CubeList([]), 'sh': iris.cube.CubeList([])}
for ensnum, file_pair in enumerate(file_pairs):
new_aux_coord = iris.coords.AuxCoord(ensnum, long_name='ensemble_member', units='no_unit')
for hemnum, hemisphere in enumerate(hemispheres):
infile = file_pair[hemnum]
print(infile)
var = '%s %s sum' %(names[variable], hemisphere)
cube = iris.load_cube(infile, var & time_constraint)
cube = calc_anomaly(cube)
cube.add_aux_coord(new_aux_coord)
cube.cell_methods = ()
cube_list[hemisphere].append(cube)
ensagg_nh_cube = ensemble_aggregate(cube_list['nh'], ensagg)
ensagg_sh_cube = ensemble_aggregate(cube_list['sh'], ensagg)
return ensagg_nh_cube, ensagg_sh_cube
def read_guide_data(infiles, variable, time_constraint, ensagg):
"""Read the data for the guidelines."""
cube_list = iris.cube.CubeList([])
for ensnum, infile in enumerate(infiles):
new_aux_coord = iris.coords.AuxCoord(ensnum, long_name='ensemble_member', units='no_unit')
print(infile)
var = '%s globe sum' %(names[variable])
cube = iris.load_cube(infile, var & time_constraint)
cube = calc_anomaly(cube)
cube.add_aux_coord(new_aux_coord)
cube.cell_methods = ()
cube_list.append(cube)
ensagg_cube = ensemble_aggregate(cube_list, ensagg)
nh_guide = ensagg_cube * 0.41
sh_guide = ensagg_cube * 0.59
return nh_guide, sh_guide
def main(inargs):
"""Run the program."""
metadata_dict = {}
time_constraint = gio.get_time_constraint([inargs.start_date, inargs.end_date])
fig = plt.figure(figsize=[11, 10])
if inargs.rndt_files:
rndt_nh, rndt_sh = read_hemisphere_data(inargs.rndt_files, 'rndt', time_constraint, inargs.ensagg)
iplt.plot(rndt_nh, label='netTOA, NH', color='red', linestyle='solid')
iplt.plot(rndt_sh, label='netTOA, SH', color='red', linestyle='dashed')
if inargs.hfds_files:
hfds_nh, hfds_sh = read_hemisphere_data(inargs.hfds_files, 'hfds', time_constraint, inargs.ensagg)
iplt.plot(hfds_nh, label='OHU, NH', color='orange', linestyle='solid')
iplt.plot(hfds_sh, label='OHU, SH', color='orange', linestyle='dashed')
if inargs.ohc_files:
ohc_nh, ohc_sh = read_hemisphere_data(inargs.ohc_files, 'ohc', time_constraint, inargs.ensagg)
iplt.plot(ohc_nh, label='OHC, NH', color='blue', linestyle='solid')
iplt.plot(ohc_sh, label='OHC, SH', color='blue', linestyle='dashed')
if inargs.ohc_guide_files:
guide_nh, guide_sh = read_guide_data(inargs.ohc_guide_files, 'ohc', time_constraint, inargs.ensagg)
iplt.plot(guide_nh, label='OHC guide, NH', color='0.5', linestyle='solid')
iplt.plot(guide_sh, label='OHC guide, SH', color='0.5', linestyle='dashed')
plt.legend()
if inargs.ybounds:
ymin, ymax = inargs.ybounds
plt.ylim([ymin, ymax])
dpi = inargs.dpi if inargs.dpi else plt.savefig.__globals__['rcParams']['figure.dpi']
print('dpi =', dpi)
plt.savefig(inargs.outfile, bbox_inches='tight', dpi=dpi)
log_text = cmdprov.new_log(git_repo=repo_dir) # infile_history={nh_file: history}
log_file = re.sub('.png', '.met', inargs.outfile)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description = 'Plot hemisperhic accumulation of OHC, hfds and rndt'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("start_date", type=str, help="Start date (e.g. 1861-01-01)")
parser.add_argument("end_date", type=str, help="End date (e.g. 2005-12-31)")
parser.add_argument("--rndt_files", type=str, nargs=2, action='append', default=[],
help="netTOA file pair for a given model (NH, SH)")
parser.add_argument("--hfds_files", type=str, nargs=2, action='append', default=[],
help="OHU file pair for a given model (NH, SH)")
parser.add_argument("--ohc_files", type=str, nargs=2, action='append', default=[],
help="OHC file pair for a given model (NH, SH)")
parser.add_argument("--ohc_guide_files", type=str, nargs='*', default=None,
help="global files for OHC guidelines to be plotted")
parser.add_argument("--ybounds", type=gio.two_floats, default=None,
help="""Bounds for x-axis. e.g. "-5e20 5e20" """)
parser.add_argument("--ensagg", type=str, default='median', choices=('mean', 'median'),
help="Ensemble mean or median [default=median]")
parser.add_argument("--dpi", type=float, default=None,
help="Figure resolution in dots per square inch [default=auto]")
args = parser.parse_args()
main(args)
|
|
import logging
import requests
from six.moves import urllib
from . import dcos_url, rpcclient
from ..clients.rpcclient import verify_ssl
from ..errors import (DCOSAuthenticationException,
DCOSAuthorizationException,
DCOSBadRequest,
DCOSException,
DCOSHTTPException)
logger = logging.getLogger(__name__)
class Cosmos(object):
"""
A wrapper on cosmos that abstracts away http requests
:param cosmos_url: the url of cosmos
:type cosmos_url: str
"""
def __init__(self, cosmos_url=None):
if cosmos_url is None:
self.cosmos_url = get_cosmos_url()
else:
self.cosmos_url = cosmos_url
self._rpc = rpcclient.create_client(self.cosmos_url)
def _data(versions, http_method):
"""
Create an object with the data for an endpoint.
:param versions: the accept versions in order
of priority.
:type versions: list[str]
:param http_method: should be 'post' or 'get'
:type http_method: str
:return:
"""
return {'versions': versions, 'http_method': http_method}
# This structure holds information about an endpoint. Currently
# the information stored is the return type versions, and the
# http request method. These two field should be stored in a
# dictionary of the form:
# {'versions': versions, 'http_method': http_method}.
self._endpoint_data = {
'capabilities': _data(['v1'], 'get'),
'package/add': _data(['v1'], 'post'),
'package/describe': _data(['v3', 'v2'], 'post'),
'package/install': _data(['v2'], 'post'),
'package/list': _data(['v1'], 'post'),
'package/list-versions': _data(['v1'], 'post'),
'package/render': _data(['v1'], 'post'),
'package/repository/add': _data(['v1'], 'post'),
'package/repository/delete': _data(['v1'], 'post'),
'package/repository/list': _data(['v1'], 'post'),
'package/search': _data(['v1'], 'post'),
'package/uninstall': _data(['v1'], 'post'),
'service/start': _data(['v1'], 'post')
}
self._special_content_types = {
('capabilities', 'v1'):
_format_media_type('capabilities', 'v1', '')
}
self._special_accepts = {
('capabilities', 'v1'):
_format_media_type('capabilities', 'v1', '')
}
def enabled(self):
"""
Returns whether or not cosmos is enabled on specified dcos cluster
:return: true if cosmos is enabled, false otherwise
:rtype: bool
"""
try:
response = self.call_endpoint(
'capabilities')
# return `Authentication failed` error messages
except DCOSAuthenticationException:
raise
# Authorization errors mean endpoint exists, and user could be
# authorized for the command specified, not this endpoint
except DCOSAuthorizationException:
return True
# allow exception through so we can show user actual http exception
# except 404, because then the url is fine, just not cosmos enabled
except DCOSHTTPException as e:
logger.exception(e)
return e.status() != 404
except Exception as e:
logger.exception(e)
return True
return response.status_code == 200
def call_endpoint(self,
endpoint,
headers=None,
data=None,
json=None,
**kwargs):
"""
Gets the Response object returned by comos at endpoint
:param endpoint: a cosmos endpoint, of the form 'x/y',
for example 'package/repo/add' or 'service/start'
:type endpoint: str
:param headers: these header values will appear
in the request headers.
:type headers: None | dict[str, str]
:param data: the request's body
:type data: dict | bytes | file-like object
:param json: JSON request body
:type json: dict
:param kwargs: Additional arguments to requests.request
(see py:func:`request`)
:type kwargs: dict
:return: the Response object returned by cosmos
:rtype: requests.Response
"""
url = self._get_endpoint_url(endpoint)
request_versions = self._get_request_version_preferences(endpoint)
headers_preference = list(map(
lambda version: self._get_header(
endpoint, version, headers),
request_versions))
http_request_type = self._get_http_method(endpoint)
return self._cosmos_request(
url,
http_request_type,
headers_preference,
data,
json,
**kwargs)
def _cosmos_request(self,
url,
http_request_type,
headers_preference,
data=None,
json=None,
**kwargs):
"""
Gets a Response object obtained by calling cosmos
at the url 'url'. Will attempt each of the headers
in headers_preference in order until success.
:param url: the url of a cosmos endpoint
:type url: str
:param headers_preference: a list of request headers
in order of preference. Each header
will be attempted until they all fail or the request succeeds.
:type headers_preference: list[dict[str, str]]
:param data: the request's body
:type data: dict | bytes | file-like object
:param json: JSON request body
:type json: dict
:param kwargs: Additional arguments to requests.request
(see py:func:`request`)
:type kwargs: dict
:return: response returned by calling cosmos at url
:rtype: requests.Response
"""
try:
headers = headers_preference[0]
kwargs['verify'] = verify_ssl()
if http_request_type is 'post':
response = requests.post(url, data=data, json=json, headers=headers, auth=self._rpc.session.auth,
**kwargs)
else:
response = requests.get(url, data=data, json=json, headers=headers, auth=self._rpc.session.auth,
**kwargs)
if not _matches_expected_response_header(headers,
response.headers):
raise DCOSException(
'Server returned incorrect response type, '
'expected {} but got {}'.format(
headers.get('Accept'),
response.headers.get('Content-Type')))
return response
except DCOSBadRequest as e:
if len(headers_preference) > 1:
# reattempt with one less item in headers_preference
return self._cosmos_request(url,
http_request_type,
headers_preference[1:],
data,
json,
**kwargs)
else:
raise e
def _get_endpoint_url(self, endpoint):
"""
Gets the url for the cosmos endpoint 'endpoint'
:param endpoint: a cosmos endpoint, of the form 'x/y',
for example 'package/repo/add' or 'service/start'
:type endpoint: str
:return: the url of endpoint
:rtype: str
"""
return urllib.parse.urljoin(self.cosmos_url, endpoint)
def _get_request_version_preferences(self, endpoint):
"""
Gets the list of versions for endpoint in preference order.
The first item is most preferred, and last is least preferred.
:param endpoint: a cosmos endpoint, of the form 'x/y',
for example 'package/repo/add' or 'service/start'
:type endpoint: str
:return: list of versions in preference order
:rtype: list[str]
"""
return self._endpoint_data.get(endpoint).get('versions')
def _get_http_method(self, endpoint):
"""
Gets the http method cosmos expects for the endpoint
:param endpoint: a cosmos endpoint, of the form 'x/y',
for example 'package/repo/add' or 'service/start'
:type endpoint: str
:return: http method type
:rtype: str
"""
return self._endpoint_data.get(endpoint).get('http_method')
def _get_header(self, endpoint, version, headers=None):
"""
Given an cosmos endpoint, a version, and any extra header values,
gets the header that can be used to query cosmos at endpoint.
Any key in headers will appear in the final header. In effect the
user function can overwrite the default header.
:param endpoint: a cosmos endpoint, of the form 'x/y',
for example 'package/repo/add' or 'service/start'
:type endpoint: str
:param version: The version of the request
:type version: str
:param headers: extra keys for the header
:type headers: dict[str, str]
:return: a header that can be used to query cosmos at endpoint
:rtype: dict[str, str]
"""
simple_header = {
'Content-Type': self._get_content_type(endpoint),
'Accept': self._get_accept(endpoint, version)
}
return _merge_dict(simple_header, headers)
def _endpoint_exists(self, endpoint):
"""
:param endpoint: a possible cosmos endpoint
:type endpoint: str
:return: true if endpoint is a valid cosmos endpoint,
false otherwise
:rtype: bool
"""
return endpoint in self._endpoint_data
def _get_accept(self, endpoint, version):
"""
Gets the value for the Accept header key for
the cosmos request at endpoint.
:param endpoint: a cosmos endpoint, of the form 'x/y',
for example 'package/repo/add' or 'service/start'
:type endpoint: str
:param version: The version of the request
:type version: str
:return: the value for the Accept header key for endpoint
:rtype: str
"""
if (endpoint, version) in self._special_accepts:
return self._special_accepts[(endpoint, version)]
return _format_media_type(endpoint, version, 'response')
def _get_content_type(self, endpoint):
"""
Gets the value for the Content-Type header key for
the cosmos request at endpoint.
:param endpoint: a cosmos endpoint, of the form 'x/y',
for example 'package/repo/add' or 'service/start'
:type endpoint: str
:return: the value for the Content-Type header key for endpoint
:rtype: str
"""
version = 'v1'
if (endpoint, version) in self._special_content_types:
return self._special_content_types[(endpoint, version)]
return _format_media_type(endpoint, version, 'request')
def _format_media_type(endpoint, version, suffix):
"""
Formats a value for a cosmos Content-Type or Accept header key.
:param endpoint: a cosmos endpoint, of the form 'x/y',
for example 'package/repo/add', 'service/start', or 'package/error'
:type endpoint: str
:param version: The version of the request
:type version: str
:param suffix: The string that will be appended to
endpoint type, most commonly 'request' or 'response'
:type suffix: str
:return: a formatted value for a Content-Type or Accept header key
:rtype: str
"""
prefix = endpoint.replace('/', '.')
separator = '-' if suffix else ''
return ('application/vnd.dcos.{}{}{}'
'+json;charset=utf-8;version={}').format(prefix,
separator,
suffix,
version)
def _matches_expected_response_header(request_headers, response_headers):
"""
Returns true if the Content-Type value of the response header matches the
Accept value of the request header, false otherwise
:param request_headers: the headers for a cosmos request
:type request_headers: dict[str, str]
:param response_headers: the headers for a cosmos response
:type response_headers: dict[str, str]
:return: true if the Content-Type value of the response header matches the
Accept value of the request header, false otherwise
:rtype: bool
"""
return (request_headers.get('Accept')
in response_headers.get('Content-Type'))
def get_cosmos_url():
"""
Gets the cosmos url
:returns: cosmos base url
:rtype: str
"""
return dcos_url()
def _merge_dict(a, b):
"""
Given two dicts, merge them into a new dict as a
shallow copy. Keys on dictionary b will overwrite keys
on dictionary a.
:param a: a dictionary, may be None
:type a: None | dict
:param b: a dictionary, may be None
:type b: None | dict
:return: the result of merging a with b
:rtype: dict
"""
if a is None and b is None:
return {}
if a is None:
return b.copy()
if b is None:
return a.copy()
z = a.copy()
z.update(b)
return z
|
|
#!/router/bin/python
from .trex_stl_exceptions import *
from .trex_stl_types import verify_exclusive_arg, validate_type
from .trex_stl_packet_builder_interface import CTrexPktBuilderInterface
from .trex_stl_packet_builder_scapy import STLPktBuilder, Ether, IP, UDP, TCP, RawPcapReader
from collections import OrderedDict, namedtuple
from scapy.utils import ltoa
from scapy.error import Scapy_Exception
import random
import yaml
import base64
import string
import traceback
import copy
import imp
# base class for TX mode
class STLTXMode(object):
""" mode rate speed """
def __init__ (self, pps = None, bps_L1 = None, bps_L2 = None, percentage = None):
"""
Speed can be given in packets per second (pps), L2/L1 bps, or port percent
Use only one unit.
you can enter pps =10000 oe bps_L1=10
:parameters:
pps : float
Packets per second
bps_L1 : float
Bits per second L1 (with IPG)
bps_L2 : float
Bits per second L2 (Ethernet-FCS)
percentage : float
Link interface percent (0-100). Example: 10 is 10% of the port link setup
.. code-block:: python
:caption: STLTXMode Example
mode = STLTXCont(pps = 10)
mode = STLTXCont(bps_L1 = 10000000) #10mbps L1
mode = STLTXCont(bps_L2 = 10000000) #10mbps L2
mode = STLTXCont(percentage = 10) #10%
"""
args = [pps, bps_L1, bps_L2, percentage]
# default
if all([x is None for x in args]):
pps = 1.0
else:
verify_exclusive_arg(args)
self.fields = {'rate': {}}
if pps is not None:
validate_type('pps', pps, [float, int])
self.fields['rate']['type'] = 'pps'
self.fields['rate']['value'] = pps
elif bps_L1 is not None:
validate_type('bps_L1', bps_L1, [float, int])
self.fields['rate']['type'] = 'bps_L1'
self.fields['rate']['value'] = bps_L1
elif bps_L2 is not None:
validate_type('bps_L2', bps_L2, [float, int])
self.fields['rate']['type'] = 'bps_L2'
self.fields['rate']['value'] = bps_L2
elif percentage is not None:
validate_type('percentage', percentage, [float, int])
if not (percentage > 0 and percentage <= 100):
raise STLArgumentError('percentage', percentage)
self.fields['rate']['type'] = 'percentage'
self.fields['rate']['value'] = percentage
def to_json (self):
return self.fields
# continuous mode
class STLTXCont(STLTXMode):
""" Continuous mode """
def __init__ (self, **kwargs):
"""
Continuous mode
see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
.. code-block:: python
:caption: STLTXCont Example
mode = STLTXCont(pps = 10)
"""
super(STLTXCont, self).__init__(**kwargs)
self.fields['type'] = 'continuous'
@staticmethod
def __str__ ():
return "Continuous"
# single burst mode
class STLTXSingleBurst(STLTXMode):
""" Single burst mode """
def __init__ (self, total_pkts = 1, **kwargs):
"""
Single burst mode
:parameters:
total_pkts : int
Number of packets for this burst
see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
.. code-block:: python
:caption: STLTXSingleBurst Example
mode = STLTXSingleBurst( pps = 10, total_pkts = 1)
"""
if not isinstance(total_pkts, int):
raise STLArgumentError('total_pkts', total_pkts)
super(STLTXSingleBurst, self).__init__(**kwargs)
self.fields['type'] = 'single_burst'
self.fields['total_pkts'] = total_pkts
@staticmethod
def __str__ ():
return "Single Burst"
# multi burst mode
class STLTXMultiBurst(STLTXMode):
""" Multi-burst mode """
def __init__ (self,
pkts_per_burst = 1,
ibg = 0.0, # usec not SEC
count = 1,
**kwargs):
"""
Multi-burst mode
:parameters:
pkts_per_burst: int
Number of packets per burst
ibg : float
Inter-burst gap in usec 1,000,000.0 is 1 sec
count : int
Number of bursts
see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
.. code-block:: python
:caption: STLTXMultiBurst Example
mode = STLTXMultiBurst(pps = 10, pkts_per_burst = 1,count 10, ibg=10.0)
"""
if not isinstance(pkts_per_burst, int):
raise STLArgumentError('pkts_per_burst', pkts_per_burst)
if not isinstance(ibg, (int, float)):
raise STLArgumentError('ibg', ibg)
if not isinstance(count, int):
raise STLArgumentError('count', count)
super(STLTXMultiBurst, self).__init__(**kwargs)
self.fields['type'] = 'multi_burst'
self.fields['pkts_per_burst'] = pkts_per_burst
self.fields['ibg'] = ibg
self.fields['count'] = count
@staticmethod
def __str__ ():
return "Multi Burst"
STLStreamDstMAC_CFG_FILE=0
STLStreamDstMAC_PKT =1
STLStreamDstMAC_ARP =2
class STLFlowStatsInterface(object):
def __init__ (self, pg_id):
self.fields = {}
self.fields['enabled'] = True
self.fields['stream_id'] = pg_id
def to_json (self):
""" Dump as json"""
return dict(self.fields)
@staticmethod
def defaults ():
return {'enabled' : False}
class STLFlowStats(STLFlowStatsInterface):
""" Define per stream basic stats
.. code-block:: python
:caption: STLFlowStats Example
flow_stats = STLFlowStats(pg_id = 7)
"""
def __init__(self, pg_id):
super(STLFlowStats, self).__init__(pg_id)
self.fields['rule_type'] = 'stats'
class STLFlowLatencyStats(STLFlowStatsInterface):
""" Define per stream basic stats + latency, jitter, packet reorder/loss
.. code-block:: python
:caption: STLFlowLatencyStats Example
flow_stats = STLFlowLatencyStats(pg_id = 7)
"""
def __init__(self, pg_id):
super(STLFlowLatencyStats, self).__init__(pg_id)
self.fields['rule_type'] = 'latency'
class STLStream(object):
""" One stream object. Includes mode, Field Engine mode packet template and Rx stats
.. code-block:: python
:caption: STLStream Example
base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
pad = max(0, size - len(base_pkt)) * 'x'
STLStream( isg = 10.0, # star in delay
name ='S0',
packet = STLPktBuilder(pkt = base_pkt/pad),
mode = STLTXSingleBurst( pps = 10, total_pkts = 1),
next = 'S1'), # point to next stream
"""
def __init__ (self,
name = None,
packet = None,
mode = STLTXCont(pps = 1),
enabled = True,
self_start = True,
isg = 0.0,
flow_stats = None,
next = None,
stream_id = None,
action_count = 0,
random_seed =0,
mac_src_override_by_pkt=None,
mac_dst_override_mode=None #see STLStreamDstMAC_xx
):
"""
Stream object
:parameters:
name : string
Name of the stream. Required if this stream is dependent on another stream, and another stream needs to refer to this stream by name.
packet : STLPktBuilder see :class:`trex_stl_lib.trex_stl_packet_builder_scapy.STLPktBuilder`
Template packet and field engine program. Example: packet = STLPktBuilder(pkt = base_pkt/pad)
mode : :class:`trex_stl_lib.trex_stl_streams.STLTXCont` or :class:`trex_stl_lib.trex_stl_streams.STLTXSingleBurst` or :class:`trex_stl_lib.trex_stl_streams.STLTXMultiBurst`
enabled : bool
Indicates whether the stream is enabled.
self_start : bool
If False, another stream activates it.
isg : float
Inter-stream gap in usec. Time to wait until the stream sends the first packet.
flow_stats : :class:`trex_stl_lib.trex_stl_streams.STLFlowStats`
Per stream statistic object. See: STLFlowStats
next : string
Name of the stream to activate.
stream_id :
For use by HLTAPI.
action_count : uint16_t
If there is a next stream, number of loops before stopping. Default: 0 (unlimited).
random_seed: uint16_t
If given, the seed for this stream will be this value. Useful if you need a deterministic random value.
mac_src_override_by_pkt : bool
Template packet sets src MAC.
mac_dst_override_mode=None : STLStreamDstMAC_xx
Template packet sets dst MAC.
"""
# type checking
validate_type('mode', mode, STLTXMode)
validate_type('packet', packet, (type(None), CTrexPktBuilderInterface))
validate_type('flow_stats', flow_stats, (type(None), STLFlowStatsInterface))
validate_type('enabled', enabled, bool)
validate_type('self_start', self_start, bool)
validate_type('isg', isg, (int, float))
validate_type('stream_id', stream_id, (type(None), int))
validate_type('random_seed',random_seed,int);
if (type(mode) == STLTXCont) and (next != None):
raise STLError("Continuous stream cannot have a next stream ID")
# tag for the stream and next - can be anything
self.name = name
self.next = next
self.mac_src_override_by_pkt = mac_src_override_by_pkt # save for easy construct code from stream object
self.mac_dst_override_mode = mac_dst_override_mode
self.id = stream_id
self.fields = {}
int_mac_src_override_by_pkt = 0;
int_mac_dst_override_mode = 0;
if mac_src_override_by_pkt == None:
int_mac_src_override_by_pkt=0
if packet :
if packet.is_default_src_mac ()==False:
int_mac_src_override_by_pkt=1
else:
int_mac_src_override_by_pkt = int(mac_src_override_by_pkt);
if mac_dst_override_mode == None:
int_mac_dst_override_mode = 0;
if packet :
if packet.is_default_dst_mac ()==False:
int_mac_dst_override_mode=STLStreamDstMAC_PKT
else:
int_mac_dst_override_mode = int(mac_dst_override_mode);
self.is_default_mac = not (int_mac_src_override_by_pkt or int_mac_dst_override_mode)
self.fields['flags'] = (int_mac_src_override_by_pkt&1) + ((int_mac_dst_override_mode&3)<<1)
self.fields['action_count'] = action_count
# basic fields
self.fields['enabled'] = enabled
self.fields['self_start'] = self_start
self.fields['isg'] = isg
if random_seed !=0 :
self.fields['random_seed'] = random_seed # optional
# mode
self.fields['mode'] = mode.to_json()
self.mode_desc = str(mode)
# packet
self.fields['packet'] = {}
self.fields['vm'] = {}
if not packet:
packet = STLPktBuilder(pkt = Ether()/IP())
self.scapy_pkt_builder = packet
# packet builder
packet.compile()
# packet and VM
self.fields['packet'] = packet.dump_pkt()
self.fields['vm'] = packet.get_vm_data()
self.pkt = base64.b64decode(self.fields['packet']['binary'])
# this is heavy, calculate lazy
self.packet_desc = None
if not flow_stats:
self.fields['flow_stats'] = STLFlowStats.defaults()
else:
self.fields['flow_stats'] = flow_stats.to_json()
def __str__ (self):
s = "Stream Name: {0}\n".format(self.name)
s += "Stream Next: {0}\n".format(self.next)
s += "Stream JSON:\n{0}\n".format(json.dumps(self.fields, indent = 4, separators=(',', ': '), sort_keys = True))
return s
def to_json (self):
"""
Return json format
"""
return dict(self.fields)
def get_id (self):
""" Get the stream id after resolution """
return self.id
def has_custom_mac_addr (self):
""" Return True if src or dst MAC were set as custom """
return not self.is_default_mac
def get_name (self):
""" Get the stream name """
return self.name
def get_next (self):
""" Get next stream object """
return self.next
def has_flow_stats (self):
""" Return True if stream was configured with flow stats """
return self.fields['flow_stats']['enabled']
def get_pkt (self):
""" Get packet as string """
return self.pkt
def get_pkt_len (self, count_crc = True):
""" Get packet number of bytes """
pkt_len = len(self.get_pkt())
if count_crc:
pkt_len += 4
return pkt_len
def get_pkt_type (self):
""" Get packet description. Example: IP:UDP """
if self.packet_desc == None:
self.packet_desc = STLPktBuilder.pkt_layers_desc_from_buffer(self.get_pkt())
return self.packet_desc
def get_mode (self):
return self.mode_desc
@staticmethod
def get_rate_from_field (rate_json):
""" Get rate from json """
t = rate_json['type']
v = rate_json['value']
if t == "pps":
return format_num(v, suffix = "pps")
elif t == "bps_L1":
return format_num(v, suffix = "bps (L1)")
elif t == "bps_L2":
return format_num(v, suffix = "bps (L2)")
elif t == "percentage":
return format_num(v, suffix = "%")
def get_rate (self):
return self.get_rate_from_field(self.fields['mode']['rate'])
def to_pkt_dump (self):
""" Print packet description from Scapy """
if self.name:
print("Stream Name: ",self.name)
scapy_b = self.scapy_pkt_builder;
if scapy_b and isinstance(scapy_b,STLPktBuilder):
scapy_b.to_pkt_dump()
else:
print("Nothing to dump")
def to_yaml (self):
""" Convert to YAML """
y = {}
if self.name:
y['name'] = self.name
if self.next:
y['next'] = self.next
y['stream'] = copy.deepcopy(self.fields)
# some shortcuts for YAML
rate_type = self.fields['mode']['rate']['type']
rate_value = self.fields['mode']['rate']['value']
y['stream']['mode'][rate_type] = rate_value
del y['stream']['mode']['rate']
return y
# returns the Python code (text) to build this stream, inside the code it will be in variable "stream"
def to_code (self):
""" Convert to Python code as profile """
packet = Ether(self.pkt)
layer = packet
while layer: # remove checksums
for chksum_name in ('cksum', 'chksum'):
if chksum_name in layer.fields:
del layer.fields[chksum_name]
layer = layer.payload
packet.hide_defaults() # remove fields with default values
payload = packet.getlayer('Raw')
packet_command = packet.command()
imports_arr = []
if 'MPLS(' in packet_command:
imports_arr.append('from scapy.contrib.mpls import MPLS')
imports = '\n'.join(imports_arr)
if payload:
payload.remove_payload() # fcs etc.
data = payload.fields.get('load', '')
good_printable = [c for c in string.printable if ord(c) not in range(32)]
good_printable.remove("'")
if type(data) is str:
new_data = ''.join([c if c in good_printable else r'\x{0:02x}'.format(ord(c)) for c in data])
else:
new_data = ''.join([chr(c) if chr(c) in good_printable else r'\x{0:02x}'.format(c) for c in data])
payload_start = packet_command.find("Raw(load=")
if payload_start != -1:
packet_command = packet_command[:payload_start-1]
layers = packet_command.split('/')
if payload:
if len(new_data) and new_data == new_data[0] * len(new_data):
layers.append("Raw(load='%s' * %s)" % (new_data[0], len(new_data)))
else:
layers.append("Raw(load='%s')" % new_data)
packet_code = 'packet = (' + (' / \n ').join(layers) + ')'
vm_list = []
for inst in self.fields['vm']['instructions']:
if inst['type'] == 'flow_var':
vm_list.append("STLVmFlowVar(name='{name}', size={size}, op='{op}', init_value={init_value}, min_value={min_value}, max_value={max_value}, step={step})".format(**inst))
elif inst['type'] == 'write_flow_var':
vm_list.append("STLVmWrFlowVar(fv_name='{name}', pkt_offset={pkt_offset}, add_val={add_value}, is_big={is_big_endian})".format(**inst))
elif inst['type'] == 'write_mask_flow_var':
inst = copy.copy(inst)
inst['mask'] = hex(inst['mask'])
vm_list.append("STLVmWrMaskFlowVar(fv_name='{name}', pkt_offset={pkt_offset}, pkt_cast_size={pkt_cast_size}, mask={mask}, shift={shift}, add_value={add_value}, is_big={is_big_endian})".format(**inst))
elif inst['type'] == 'fix_checksum_ipv4':
vm_list.append("STLVmFixIpv4(offset={pkt_offset})".format(**inst))
elif inst['type'] == 'trim_pkt_size':
vm_list.append("STLVmTrimPktSize(fv_name='{name}')".format(**inst))
elif inst['type'] == 'tuple_flow_var':
inst = copy.copy(inst)
inst['ip_min'] = ltoa(inst['ip_min'])
inst['ip_max'] = ltoa(inst['ip_max'])
vm_list.append("STLVmTupleGen(name='{name}', ip_min='{ip_min}', ip_max='{ip_max}', port_min={port_min}, port_max={port_max}, limit_flows={limit_flows}, flags={flags})".format(**inst))
vm_code = 'vm = STLScVmRaw([' + ',\n '.join(vm_list) + '], split_by_field = %s)' % STLStream.__add_quotes(self.fields['vm'].get('split_by_var'))
stream_params_list = []
stream_params_list.append('packet = STLPktBuilder(pkt = packet, vm = vm)')
if default_STLStream.name != self.name:
stream_params_list.append('name = %s' % STLStream.__add_quotes(self.name))
if default_STLStream.fields['enabled'] != self.fields['enabled']:
stream_params_list.append('enabled = %s' % self.fields['enabled'])
if default_STLStream.fields['self_start'] != self.fields['self_start']:
stream_params_list.append('self_start = %s' % self.fields['self_start'])
if default_STLStream.fields['isg'] != self.fields['isg']:
stream_params_list.append('isg = %s' % self.fields['isg'])
if default_STLStream.fields['flow_stats'] != self.fields['flow_stats']:
stream_params_list.append('flow_stats = STLFlowStats(%s)' % self.fields['flow_stats']['stream_id'])
if default_STLStream.next != self.next:
stream_params_list.append('next = %s' % STLStream.__add_quotes(self.next))
if default_STLStream.id != self.id:
stream_params_list.append('stream_id = %s' % self.id)
if default_STLStream.fields['action_count'] != self.fields['action_count']:
stream_params_list.append('action_count = %s' % self.fields['action_count'])
if 'random_seed' in self.fields:
stream_params_list.append('random_seed = %s' % self.fields.get('random_seed', 0))
if default_STLStream.mac_src_override_by_pkt != self.mac_src_override_by_pkt:
stream_params_list.append('mac_src_override_by_pkt = %s' % self.mac_src_override_by_pkt)
if default_STLStream.mac_dst_override_mode != self.mac_dst_override_mode:
stream_params_list.append('mac_dst_override_mode = %s' % self.mac_dst_override_mode)
mode_args = ''
for key, value in self.fields['mode'].items():
if key not in ('rate', 'type'):
mode_args += '%s = %s, ' % (key, value)
mode_args += '%s = %s' % (self.fields['mode']['rate']['type'], self.fields['mode']['rate']['value'])
if self.mode_desc == STLTXCont.__str__():
stream_params_list.append('mode = STLTXCont(%s)' % mode_args)
elif self.mode_desc == STLTXSingleBurst().__str__():
stream_params_list.append('mode = STLTXSingleBurst(%s)' % mode_args)
elif self.mode_desc == STLTXMultiBurst().__str__():
stream_params_list.append('mode = STLTXMultiBurst(%s)' % mode_args)
else:
raise STLError('Could not determine mode: %s' % self.mode_desc)
stream = "stream = STLStream(" + ',\n '.join(stream_params_list) + ')'
return '\n'.join([imports, packet_code, vm_code, stream])
# add quoted for string, or leave as is if other type
@staticmethod
def __add_quotes(arg):
if type(arg) is str:
return "'%s'" % arg
return arg
# used to replace non-printable characters with hex
@staticmethod
def __replchars_to_hex(match):
return r'\x{0:02x}'.format(ord(match.group()))
def dump_to_yaml (self, yaml_file = None):
""" Print as yaml """
yaml_dump = yaml.dump([self.to_yaml()], default_flow_style = False)
# write to file if provided
if yaml_file:
with open(yaml_file, 'w') as f:
f.write(yaml_dump)
return yaml_dump
class YAMLLoader(object):
def __init__ (self, yaml_file):
self.yaml_path = os.path.dirname(yaml_file)
self.yaml_file = yaml_file
def __parse_packet (self, packet_dict):
packet_type = set(packet_dict).intersection(['binary', 'pcap'])
if len(packet_type) != 1:
raise STLError("Packet section must contain either 'binary' or 'pcap'")
if 'binary' in packet_type:
try:
pkt_str = base64.b64decode(packet_dict['binary'])
except TypeError:
raise STLError("'binary' field is not a valid packet format")
builder = STLPktBuilder(pkt_buffer = pkt_str)
elif 'pcap' in packet_type:
pcap = os.path.join(self.yaml_path, packet_dict['pcap'])
if not os.path.exists(pcap):
raise STLError("'pcap' - cannot find '{0}'".format(pcap))
builder = STLPktBuilder(pkt = pcap)
return builder
def __parse_mode (self, mode_obj):
if not mode_obj:
return None
rate_parser = set(mode_obj).intersection(['pps', 'bps_L1', 'bps_L2', 'percentage'])
if len(rate_parser) != 1:
raise STLError("'rate' must contain exactly one from 'pps', 'bps_L1', 'bps_L2', 'percentage'")
rate_type = rate_parser.pop()
rate = {rate_type : mode_obj[rate_type]}
mode_type = mode_obj.get('type')
if mode_type == 'continuous':
mode = STLTXCont(**rate)
elif mode_type == 'single_burst':
defaults = STLTXSingleBurst()
mode = STLTXSingleBurst(total_pkts = mode_obj.get('total_pkts', defaults.fields['total_pkts']),
**rate)
elif mode_type == 'multi_burst':
defaults = STLTXMultiBurst()
mode = STLTXMultiBurst(pkts_per_burst = mode_obj.get('pkts_per_burst', defaults.fields['pkts_per_burst']),
ibg = mode_obj.get('ibg', defaults.fields['ibg']),
count = mode_obj.get('count', defaults.fields['count']),
**rate)
else:
raise STLError("mode type can be 'continuous', 'single_burst' or 'multi_burst")
return mode
def __parse_flow_stats (self, flow_stats_obj):
# no such object
if not flow_stats_obj or flow_stats_obj.get('enabled') == False:
return None
pg_id = flow_stats_obj.get('stream_id')
if pg_id == None:
raise STLError("Enabled RX stats section must contain 'stream_id' field")
return STLFlowStats(pg_id = pg_id)
def __parse_stream (self, yaml_object):
s_obj = yaml_object['stream']
# parse packet
packet = s_obj.get('packet')
if not packet:
raise STLError("YAML file must contain 'packet' field")
builder = self.__parse_packet(packet)
# mode
mode = self.__parse_mode(s_obj.get('mode'))
# rx stats
flow_stats = self.__parse_flow_stats(s_obj.get('flow_stats'))
defaults = default_STLStream
# create the stream
stream = STLStream(name = yaml_object.get('name'),
packet = builder,
mode = mode,
flow_stats = flow_stats,
enabled = s_obj.get('enabled', defaults.fields['enabled']),
self_start = s_obj.get('self_start', defaults.fields['self_start']),
isg = s_obj.get('isg', defaults.fields['isg']),
next = yaml_object.get('next'),
action_count = s_obj.get('action_count', defaults.fields['action_count']),
mac_src_override_by_pkt = s_obj.get('mac_src_override_by_pkt', 0),
mac_dst_override_mode = s_obj.get('mac_src_override_by_pkt', 0)
)
# hack the VM fields for now
if 'vm' in s_obj:
stream.fields['vm'].update(s_obj['vm'])
return stream
def parse (self):
with open(self.yaml_file, 'r') as f:
# read YAML and pass it down to stream object
yaml_str = f.read()
try:
objects = yaml.load(yaml_str)
except yaml.parser.ParserError as e:
raise STLError(str(e))
streams = [self.__parse_stream(object) for object in objects]
return streams
# profile class
class STLProfile(object):
""" Describe a list of streams
.. code-block:: python
:caption: STLProfile Example
profile = STLProfile( [ STLStream( isg = 10.0, # star in delay
name ='S0',
packet = STLPktBuilder(pkt = base_pkt/pad),
mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size),
next = 'S1'), # point to next stream
STLStream( self_start = False, # stream is disabled enable trow S0
name ='S1',
packet = STLPktBuilder(pkt = base_pkt1/pad),
mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size),
next = 'S2' ),
STLStream( self_start = False, # stream is disabled enable trow S0
name ='S2',
packet = STLPktBuilder(pkt = base_pkt2/pad),
mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size )
)
]).get_streams()
"""
def __init__ (self, streams = None):
"""
:parameters:
streams : list of :class:`trex_stl_lib.trex_stl_streams.STLStream`
a list of stream objects
"""
if streams == None:
streams = []
if not type(streams) == list:
streams = [streams]
if not all([isinstance(stream, STLStream) for stream in streams]):
raise STLArgumentError('streams', streams, valid_values = STLStream)
self.streams = streams
self.meta = None
def get_streams (self):
""" Get the list of streams"""
return self.streams
def __str__ (self):
return '\n'.join([str(stream) for stream in self.streams])
def is_pauseable (self):
return all([x.get_mode() == "Continuous" for x in self.get_streams()])
def has_custom_mac_addr (self):
return any([x.has_custom_mac_addr() for x in self.get_streams()])
def has_flow_stats (self):
return any([x.has_flow_stats() for x in self.get_streams()])
@staticmethod
def load_yaml (yaml_file):
""" Load (from YAML file) a profile with a number of streams"""
# check filename
if not os.path.isfile(yaml_file):
raise STLError("file '{0}' does not exists".format(yaml_file))
yaml_loader = YAMLLoader(yaml_file)
streams = yaml_loader.parse()
profile = STLProfile(streams)
profile.meta = {'type': 'yaml'}
return profile
@staticmethod
def get_module_tunables(module):
# remove self and variables
func = module.register().get_streams
argc = func.__code__.co_argcount
tunables = func.__code__.co_varnames[1:argc]
# fetch defaults
defaults = func.__defaults__
if len(defaults) != (argc - 1):
raise STLError("Module should provide default values for all arguments on get_streams()")
output = {}
for t, d in zip(tunables, defaults):
output[t] = d
return output
@staticmethod
def load_py (python_file, direction = 0, port_id = 0, **kwargs):
""" Load from Python profile """
# check filename
if not os.path.isfile(python_file):
raise STLError("File '{0}' does not exist".format(python_file))
basedir = os.path.dirname(python_file)
sys.path.append(basedir)
try:
file = os.path.basename(python_file).split('.')[0]
module = __import__(file, globals(), locals(), [], 0)
imp.reload(module) # reload the update
t = STLProfile.get_module_tunables(module)
for arg in kwargs:
if not arg in t:
raise STLError("Profile {0} does not support tunable '{1}' - supported tunables are: '{2}'".format(python_file, arg, t))
streams = module.register().get_streams(direction = direction,
port_id = port_id,
**kwargs)
profile = STLProfile(streams)
profile.meta = {'type': 'python',
'tunables': t}
return profile
except Exception as e:
a, b, tb = sys.exc_info()
x =''.join(traceback.format_list(traceback.extract_tb(tb)[1:])) + a.__name__ + ": " + str(b) + "\n"
summary = "\nPython Traceback follows:\n\n" + x
raise STLError(summary)
finally:
sys.path.remove(basedir)
# loop_count = 0 means loop forever
@staticmethod
def load_pcap (pcap_file, ipg_usec = None, speedup = 1.0, loop_count = 1, vm = None, packet_hook = None):
""" Convert a pcap file with a number of packets to a list of connected streams.
packet1->packet2->packet3 etc
:parameters:
pcap_file : string
Name of the pcap file
ipg_usec : float
Inter packet gap in usec. If IPG is None, IPG is taken from pcap file
speedup : float
When reading the pcap file, divide IPG by this "speedup" factor. Resulting IPG is sped up by this factor.
loop_count : uint16_t
Number of loops to repeat the pcap file
vm : list
List of Field engine instructions
packet_hook : Callable or function
will be applied to every packet
:return: STLProfile
"""
# check filename
if not os.path.isfile(pcap_file):
raise STLError("file '{0}' does not exists".format(pcap_file))
# make sure IPG is not less than 1 usec
if ipg_usec is not None and ipg_usec < 0.001:
raise STLError("ipg_usec cannot be less than 0.001 usec: '{0}'".format(ipg_usec))
if loop_count < 0:
raise STLError("'loop_count' cannot be negative")
streams = []
last_ts_usec = 0
try:
pkts = RawPcapReader(pcap_file).read_all()
except Scapy_Exception as e:
raise STLError("failed to open PCAP file '{0}'".format(pcap_file))
if packet_hook:
pkts = [(packet_hook(cap), meta) for (cap, meta) in pkts]
for i, (cap, meta) in enumerate(pkts, start = 1):
# IPG - if not provided, take from cap
if ipg_usec == None:
ts_usec = (meta[0] * 1e6 + meta[1]) / float(speedup)
else:
ts_usec = (ipg_usec * i) / float(speedup)
# handle last packet
if i == len(pkts):
next = 1
action_count = loop_count
else:
next = i + 1
action_count = 0
streams.append(STLStream(name = i,
packet = STLPktBuilder(pkt_buffer = cap, vm = vm),
mode = STLTXSingleBurst(total_pkts = 1, percentage = 100),
self_start = True if (i == 1) else False,
isg = (ts_usec - last_ts_usec), # seconds to usec
action_count = action_count,
next = next))
last_ts_usec = ts_usec
profile = STLProfile(streams)
profile.meta = {'type': 'pcap'}
return profile
@staticmethod
def load (filename, direction = 0, port_id = 0, **kwargs):
""" Load a profile by its type. Supported types are:
* py
* yaml
* pcap file that converted to profile automaticly
:Parameters:
filename : string as filename
direction : profile's direction (if supported by the profile)
port_id : which port ID this profile is being loaded to
kwargs : forward those key-value pairs to the profile
"""
x = os.path.basename(filename).split('.')
suffix = x[1] if (len(x) == 2) else None
if suffix == 'py':
profile = STLProfile.load_py(filename, direction, port_id, **kwargs)
elif suffix == 'yaml':
profile = STLProfile.load_yaml(filename)
elif suffix in ['cap', 'pcap']:
profile = STLProfile.load_pcap(filename, speedup = 1, ipg_usec = 1e6)
else:
raise STLError("unknown profile file type: '{0}'".format(suffix))
profile.meta['stream_count'] = len(profile.get_streams()) if isinstance(profile.get_streams(), list) else 1
return profile
@staticmethod
def get_info (filename):
profile = STLProfile.load(filename)
return profile.meta
def dump_as_pkt (self):
""" Dump the profile as Scapy packet. If the packet is raw, convert it to Scapy before dumping it."""
cnt=0;
for stream in self.streams:
print("=======================")
print("Stream %d" % cnt)
print("=======================")
cnt = cnt +1
stream.to_pkt_dump()
def dump_to_yaml (self, yaml_file = None):
""" Convert the profile to yaml """
yaml_list = [stream.to_yaml() for stream in self.streams]
yaml_str = yaml.dump(yaml_list, default_flow_style = False)
# write to file if provided
if yaml_file:
with open(yaml_file, 'w') as f:
f.write(yaml_str)
return yaml_str
def dump_to_code (self, profile_file = None):
""" Convert the profile to Python native profile. """
profile_dump = '''# !!! Auto-generated code !!!
from trex_stl_lib.api import *
class STLS1(object):
def get_streams(self, direction = 0, **kwargs):
streams = []
'''
for stream in self.streams:
profile_dump += ' '*8 + stream.to_code().replace('\n', '\n' + ' '*8) + '\n'
profile_dump += ' '*8 + 'streams.append(stream)\n'
profile_dump += '''
return streams
def register():
return STLS1()
'''
# write to file if provided
if profile_file:
with open(profile_file, 'w') as f:
f.write(profile_dump)
return profile_dump
def __len__ (self):
return len(self.streams)
default_STLStream = STLStream()
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# (c) 2012 Mike Lewis
import logging; log = logging.getLogger(__name__)
from . import image_util
class Connection(object):
"""Enclosing object through which the system is managed"""
def __init__(self, s3_connection, bucket_name, bucket_url):
"""Set up the AWS creds and bucket details"""
self.s3 = s3_connection
self.bucket_name = bucket_name
self.bucket_url = bucket_url
@property
def bucket(self):
"""Get the S3 bucket being used by this manager"""
if not hasattr(self, '_bucket'):
self._bucket = self.s3.get_bucket(self.bucket_name)
return self._bucket
def save_image(self, key_name, image, format):
"""Saves this image to an S3 bucket, using a specified file format"""
log.debug(u'Saving key [{key_name} to {bucket}]'.format(
key_name=key_name,
bucket=self.bucket))
key = self.bucket.new_key(key_name)
format = format.upper()
if format == u'JPEG':
image_str = image_util.ImageHelper(image).to_jpeg_string()
key.set_metadata('Content-Type', 'image/jpeg')
elif format == u'PNG':
image_str = image_util.ImageHelper(image).to_png_string()
key.set_metadata('Content-Type', 'image/png')
else:
log.error(u'{0} is not a supported image format'.format(format))
return
return key.set_contents_from_string(image_str)
def delete_image(self, key):
"""Remove this image from S3"""
return self.bucket.delete_key(key)
def get_collection_manager(self, *args, **kwargs):
"""Gets a collection manager stemming from this connection"""
return CollectionManager(self, *args, **kwargs)
class CollectionManager(object):
"""Management object through which collections of images are processed using the same settings.
The collection_spec dictates the functionality that an instance will provide.
Parameters (with examples):
key_prefix=u'users/' # Allows "foldering" of different collections within the same bucket
format=u'JPEG', # format to save original and derivatives in
derivative_specs=[ # A list of specs to create derivative images
{
'filters': [ThumbnailFilter(120, 80)], # chained list of filters to apply
'key_suffix': u'(120x80)', # suffix to apply to the key, identifying this derivative
},
{
'filters': [ThumbnailFilter(20, 10)],
'key_suffix': u'(20x10)',
},
]
blacklist=[ # Blacklist of md5 hashes to ignore incoming images of
u'917aa09622f73d57a50294dde50cfdc8',
u'404b31849f87463d1b51284a0a1c6b65',
u'59610c7d0716126dc89c299bb92e4ca8',
u'49f83104c9a168a633314f64723ee7a5',
]
"""
def __init__(self, connection, key_prefix=u'', default_image=None, format=u'JPEG', derivative_specs=[], blacklist=[]):
"""Stash the parameters for use on individual processing"""
self.connection = connection
self.key_prefix = key_prefix
self.default_image = default_image
self.format = format
self.derivative_specs = derivative_specs
self.blacklist = blacklist
def process_image_string(self, image_string):
"""Process an image string"""
image = image_util.load_image_from_string(image_string)
return self.process_image(image)
def process_image_url(self, image_url):
"""Process an image url"""
image = image_util.load_image_from_url(image_url)
return self.process_image(image)
def process_image_file(self, image_file):
"""Process an image file"""
image = image_util.load_image_from_file(image_file)
return self.process_image(image)
def save_image(self, image, image_id):
"""Save this image to persistence"""
# Make sure we're playing with a valid image
if not image:
log.error(u'image is invalid: {0}'.format(image))
return None
key = self.id_to_key(image_id)
self.connection.save_image(key, image, self.format)
def process_derivatives(self, image, image_id, **kwargs):
"""Did your spec change? Make sure your derivatives are up to date"""
if not image:
log.error(u'image is invalid: {0}'.format(image))
return None
key = self.id_to_key(image_id)
derivs = dict()
for derivative_spec in self.derivative_specs:
deriv = self._save_derivative_image(key, image, derivative_spec, **kwargs)
if deriv:
derivs[derivative_spec.get('key_suffix')] = deriv
return derivs
def id_to_key(self, image_id):
"""Combines self.key_prefix with this id"""
return u'{key_prefix}{id}'.format(
key_prefix=self.key_prefix,
id=image_id)
def get_url(self, image_id):
"""Get the url, given this hash. Gets default if present and needed"""
key = image_id if image_id else self.default_image
if key:
return u'{bucket_url}{key}'.format(
bucket_url=self.connection.bucket_url,
key=self.id_to_key(key))
else:
return None
def get_image(self, image_id):
"""Get the actual image of this id"""
url = self.get_url(image_id)
return image_util.load_image_from_url(url) if url else None
def delete_image_by_id(self, image_id):
"""Removes this image and derivatives from S3"""
base_key = self.id_to_key(image_id)
self.connection.delete_image(base_key)
for spec in self.derivative_specs:
derivative_key = u'{base_key}{suffix}'.format(
base_key=base_key,
suffix=spec.get('key_suffix', u''))
self.connection.delete_image(derivative_key)
def _save_derivative_image(self, base_key, image, spec, force=False):
"""Generates and stores the derivative based upon a spec"""
derivative_key = u'{base_key}{suffix}'.format(
base_key=base_key,
suffix=spec.get('key_suffix', u''))
# If force or if key does not exist
if force or not self.connection.bucket.get_key(derivative_key):
derivative_image = self._apply_image_filters(image, spec['filters'])
self.connection.save_image(derivative_key, derivative_image, self.format)
return derivative_image
return None
def _apply_image_filters(self, image, filters=[]):
"""Creates a derivative image from an original using a filter chain (first-to-last)"""
derivative = image
for filter in filters:
derivative = filter(derivative)
return derivative
"""
Old-style
"""
def process_image(self, image, save_original=True):
"""Process this image according to this collection's spec"""
# Make sure we're playing with a valid image
if not image:
log.error(u'image is invalid: {0}'.format(image))
return None
# Get the md5 hash of the original image. We'll use this as the base s3 key.
hash = image_util.ImageHelper(image).md5_hash()
# Make sure this isn't in the blacklist
if hash in self.blacklist:
log.debug(u'image found in blacklist: {0}'.format(hash))
return None
key = self.id_to_key(hash)
# Store the original
if save_original:
self.connection.save_image(key, image, self.format)
# Process each requested derivative
for derivative_spec in self.derivative_specs:
self._save_derivative_image(key, image, derivative_spec)
# Return the image hash used
return hash
def reprocess_derivatives(self, hash, force=False):
"""Did your spec change? Make sure your derivatives are up to date"""
image = self.get_image(hash)
key = self.id_to_key(hash)
if image:
for derivative_spec in self.derivative_specs:
self._save_derivative_image(key, image, derivative_spec, force)
else:
log.warning(u'Couldn\'t find image: {0}'.format(hash))
|
|
'''
Created on Aug 19, 2012
@author: eric
'''
from ambry.sourcesupport.uscensus import UsCensusDimBundle
from ambry.sourcesupport.uscensus import UsCensusFactBundle
class Us2000CensusDimBundle(UsCensusDimBundle):
'''
Bundle code for US 2000 Census, Summary File 1
'''
def __init__(self,directory=None):
self.super_ = super(Us2000CensusDimBundle, self)
self.super_.__init__(directory)
def _scrape_urls(self, rootUrl, states_file, suffix='_uf1'):
'''Extract all of the URLS from the Census website and store them'''
import urllib
import urlparse
import re
from bs4 import BeautifulSoup
log = self.log
tick = self.ptick
# Load in a list of states, so we know which links to follow
with open(states_file) as f:
states = map(lambda s: s.strip(),f.readlines())
# Root URL for downloading files.
self.log('Getting URLS from '+rootUrl)
doc = urllib.urlretrieve(rootUrl)
# Get all of the links
log('S = state, T = segment table, g = geo')
tables = {}
geos = {}
with open(doc[0]) as df:
for link in BeautifulSoup(df).find_all('a'):
tick('S')
if not link.get('href') or not link.string or not link.contents:
continue# Didn't get a sensible link
# Only descend into links that name a state
state = link.get('href').strip('/')
if link.string and link.contents[0] and state in states :
stateUrl = urlparse.urljoin(rootUrl, link.get('href'))
stateIndex = urllib.urlretrieve(stateUrl)
# Get all of the zip files in the directory
with open(stateIndex[0]) as f:
for link in BeautifulSoup(f).find_all('a'):
if link.get('href') and '.zip' in link.get('href'):
final_url = urlparse.urljoin(stateUrl, link.get('href')).encode('ascii', 'ignore')
if 'geo'+suffix in final_url:
tick('g')
state = re.match('.*/(\w{2})geo'+suffix, final_url).group(1)
geos[state] = final_url
return {'tables':tables,'geos':geos}
def build_generate_rows(self, state):
'''A generator that yields rows from the state geo files. It will
unpack the fixed width file and return a dict'''
import struct
import zipfile
table = self.schema.table('geofile')
header, unpack_str, length = table.get_fixed_unpack() #@UnusedVariable
rows = 0
def test_zip_file(f):
try:
with zipfile.ZipFile(f) as zf:
return zf.testzip() is None
except zipfile.BadZipfile:
return False
geo_source = self.urls['geos'][state]
geo_zip_file = self.filesystem.download(geo_source, test_zip_file)
grf = self.filesystem.unzip(geo_zip_file)
geofile = open(grf, 'rbU', buffering=1*1024*1024)
for line in geofile.readlines():
rows += 1
if rows > 20000 and self.run_args.test:
break
try:
geo = struct.unpack(unpack_str, line[:-1])
except struct.error as e:
self.error("Struct error for state={}, file={}, line_len={}, row={}, \nline={}"
.format(state,grf,len(line),rows, line))
if not geo:
raise ValueError("Failed to match regex on line: "+line)
yield dict(zip(header,geo))
geofile.close()
class Us2000CensusFactBundle(UsCensusFactBundle):
'''
Bundle code for US 2000 Census, Summary File 1
'''
def __init__(self,directory=None):
self.super_ = super(Us2000CensusFactBundle, self)
self.super_.__init__(directory)
def _scrape_urls(self, rootUrl, states_file, suffix='_uf1'):
'''Extract all of the URLS from the Census website and store them'''
import urllib
import urlparse
import re
from bs4 import BeautifulSoup
log = self.log
tick = self.ptick
# Load in a list of states, so we know which links to follow
with open(states_file) as f:
states = map(lambda s: s.strip(),f.readlines())
# Root URL for downloading files.
doc = urllib.urlretrieve(rootUrl)
log('Getting URLS from '+rootUrl)
# Get all of the links
log('S = state, T = segment table, g = geo')
tables = {}
geos = {}
with open(doc[0]) as df:
for link in BeautifulSoup(df).find_all('a'):
tick('S')
if not link.get('href') or not link.string or not link.contents:
continue# Didn't get a sensible link
# Only descend into links that name a state
state = link.get('href').strip('/')
if link.string and link.contents[0] and state in states :
stateUrl = urlparse.urljoin(rootUrl, link.get('href'))
stateIndex = urllib.urlretrieve(stateUrl)
# Get all of the zip files in the directory
with open(stateIndex[0]) as f:
for link in BeautifulSoup(f).find_all('a'):
if link.get('href') and '.zip' in link.get('href'):
final_url = urlparse.urljoin(stateUrl, link.get('href')).encode('ascii', 'ignore')
if 'geo'+suffix in final_url:
tick('g')
state = re.match('.*/(\w{2})geo'+suffix, final_url).group(1)
geos[state] = final_url
else:
tick('T')
res = '.*/(\w{2})(\d{5})'+suffix
m = re.match(res, final_url)
if not m:
raise Exception("Failed to match {} to {} ".format(res, final_url))
state,segment = m.groups()
segment = int(segment.lstrip('0'))
if not state in tables:
tables[state] = {}
tables[state][segment] = final_url
return {'tables':tables,'geos':geos}
def generate_schema_rows(self):
'''This generator yields schema rows from the schema defineition
files. This one is specific to the files produced by dumpoing the Access97
shell for the 2000 census '''
import csv
with open(self.headers_file, 'rbU') as f:
reader = csv.DictReader(f )
last_seg = None
table = None
for row in reader:
if not row['TABLE']:
continue
if row['SEG'] and row['SEG'] != last_seg:
last_seg = row['SEG']
text = row['TEXT'].decode('utf8','ignore').strip()
# The first two rows for the table give information about the title
# and population universe, but don't have any column info.
if( not row['FIELDNUM'] or row['FIELDNUM'] == 'A' ):
if row['TABNO']:
table = {'type': 'table',
'name':row['TABLE'],'description':text
}
else:
table['universe'] = text.replace('Universe:','').strip()
else:
# The whole table will exist in one segment ( file number )
# but the segment id is not included on the same lines ast the
# table name.
if table:
# This is yielded here so we can get the segment number.
table['segment'] = row['SEG']
table['data'] = {'segment':row['SEG'], 'fact':True}
yield table
table = None
col_pos = int(row['FIELDNUM'][-3:])
yield {
'type':'column','name':row['FIELDNUM'],
'description':text.strip(),
'segment':int(row['SEG']),
'col_pos':col_pos,
'decimal':int(row['DECIMAL'])
}
def build_generate_seg_rows(self, seg_number, source):
'''Generate rows for a segment file. Call this generator with send(),
passing in the lexpected logrecno. If the next row does not have that
value, return a blank row until the logrecno values match. '''
import csv, io, zipfile
next_logrecno = None
l = 0
def test_zip_file(f):
try:
with zipfile.ZipFile(f) as zf:
return zf.testzip() is None
except zipfile.BadZipfile:
return False
zip_file = self.filesystem.download(source, test_zip_file)
rf = self.filesystem.unzip(zip_file)
of = open(rf, 'rbU', buffering=1*1024*1024)
for row in csv.reader(of):
l += 1
# The next_logrec bit takes care of a differece in the
# segment files -- the PCT tables to not have entries for
# tracts, so there are gaps in the logrecno sequence for those files.
while next_logrecno is not None and next_logrecno != row[4]:
next_logrecno = (yield seg_number, [])
next_logrecno = (yield seg_number, row)
of.close()
if l == 0:
raise RuntimeError("Didn't get any lines from {} ".format(zip_file))
return
def build_generate_rows(self, state, geodim=False):
'''A Generator that yelds a tuple that has the logrecno row
for all of the segment files and the geo file. '''
import struct
table = self.schema.table('geofile')
header, unpack_str, length = table.get_fixed_unpack() #@UnusedVariable
geo_source = self.urls['geos'][state]
gens = [self.build_generate_seg_rows(n,source) for n,source in self.urls['tables'][state].items() ]
geodim_gen = self.build_generate_geodim_rows(state) if geodim else None
rows = 0
def test_zip_file(f):
import zipfile
try:
with zipfile.ZipFile(f) as zf:
return zf.testzip() is None
except zipfile.BadZipfile:
return False
geo_zip_file = self.filesystem.download(geo_source, test_zip_file)
grf = self.filesystem.unzip(geo_zip_file)
geofile = open(grf, 'rbU', buffering=1*1024*1024)
first = True
for line in geofile.readlines():
rows += 1
if rows > 20000 and self.run_args.test:
break
try:
geo = struct.unpack(unpack_str, line[:-1])
except struct.error as e:
self.error("Struct error for state={}, file={}, line_len={}, row={}, \nline={}"
.format(state,grf,len(line),rows, line))
raise e
if not geo:
raise ValueError("Failed to match regex on line: "+line)
segments = {}
lrn = geo[6]
# load segment data from all of the files.
for index, g in enumerate(gens):
try:
seg_number, row = g.send(None if first else lrn)
segments[seg_number] = row
# The logrecno must match up across all files, except
# when ( in PCT tables ) there is no entry
if len(row) > 5 and row[4] != lrn:
raise Exception("Logrecno mismatch for seg {} : {} != {}"
.format(seg_number, row[4],lrn))
except StopIteration:
# Apparently, the StopIteration exception, raised in
# a generator function, gets propagated all the way up,
# ending all higher level generators. thanks for nuthin.
#self.log("Got StopIteration in build_generate_rows at logrec={}. Is seg file state={} index={} seg_number={} shorter?"
# .format(lrn,state, index, seg_number))
break
geodim = geodim_gen.next() if geodim_gen is not None else None
if geodim and geodim[0] != int(lrn):
m = "Logrecno mismatch for geodim : {} != {}".format(geodim[0],lrn)
self.error(m)
raise Exception(m)
first = False
if not 1 in segments:
# There are segments that are shorter than others ( There are two groups
# of sizes, but the first segment is always the same size ( in lines )
# as the geo file. If not, it is an error.
m = "Segment 1 is short for state={}".format(state)
self.error(m)
raise Exception(m)
yield state, segments[1][4], dict(zip(header,geo)), segments, geodim
geofile.close()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpRetry(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head408(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 408 status code, then 200 after retry
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/retry/408'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put500(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 500 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch500(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 500 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get502(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 502 status code, then 200 after retry
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/retry/502'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post503(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 503 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete503(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 503 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put504(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 504 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/retry/504'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch504(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 504 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/retry/504'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
|
# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Set, Tuple, cast
import attr
from synapse.api.constants import EventContentFields, RelationTypes
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_tuple_comparison_clause,
)
from synapse.storage.databases.main.events import PersistEventsStore
from synapse.storage.types import Cursor
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
_REPLACE_STREAM_ORDERING_SQL_COMMANDS = (
# there should be no leftover rows without a stream_ordering2, but just in case...
"UPDATE events SET stream_ordering2 = stream_ordering WHERE stream_ordering2 IS NULL",
# now we can drop the rule and switch the columns
"DROP RULE populate_stream_ordering2 ON events",
"ALTER TABLE events DROP COLUMN stream_ordering",
"ALTER TABLE events RENAME COLUMN stream_ordering2 TO stream_ordering",
# ... and finally, rename the indexes into place for consistency with sqlite
"ALTER INDEX event_contains_url_index2 RENAME TO event_contains_url_index",
"ALTER INDEX events_order_room2 RENAME TO events_order_room",
"ALTER INDEX events_room_stream2 RENAME TO events_room_stream",
"ALTER INDEX events_ts2 RENAME TO events_ts",
)
class _BackgroundUpdates:
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2"
INDEX_STREAM_ORDERING2 = "index_stream_ordering2"
INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url"
INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order"
INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream"
INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts"
REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column"
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _CalculateChainCover:
"""Return value for _calculate_chain_cover_txn."""
# The last room_id/depth/stream processed.
room_id: str
depth: int
stream: int
# Number of rows processed
processed_count: int
# Map from room_id to last depth/stream processed for each room that we have
# processed all events for (i.e. the rooms we can flip the
# `has_auth_chain_index` for)
finished_room_map: Dict[str, Tuple[int, int]]
class EventsBackgroundUpdatesStore(SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME,
self._background_reindex_origin_server_ts,
)
self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
self._background_reindex_fields_sender,
)
self.db_pool.updates.register_background_index_update(
"event_contains_url_index",
index_name="event_contains_url_index",
table="events",
columns=["room_id", "topological_ordering", "stream_ordering"],
where_clause="contains_url = true AND outlier = false",
)
# an event_id index on event_search is useful for the purge_history
# api. Plus it means we get to enforce some integrity with a UNIQUE
# clause
self.db_pool.updates.register_background_index_update(
"event_search_event_id_idx",
index_name="event_search_event_id_idx",
table="event_search",
columns=["event_id"],
unique=True,
psql_only=True,
)
self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES,
self._cleanup_extremities_bg_update,
)
self.db_pool.updates.register_background_update_handler(
"redactions_received_ts", self._redactions_received_ts
)
# This index gets deleted in `event_fix_redactions_bytes` update
self.db_pool.updates.register_background_index_update(
"event_fix_redactions_bytes_create_index",
index_name="redactions_censored_redacts",
table="redactions",
columns=["redacts"],
where_clause="have_censored",
)
self.db_pool.updates.register_background_update_handler(
"event_fix_redactions_bytes", self._event_fix_redactions_bytes
)
self.db_pool.updates.register_background_update_handler(
"event_store_labels", self._event_store_labels
)
self.db_pool.updates.register_background_index_update(
"redactions_have_censored_ts_idx",
index_name="redactions_have_censored_ts",
table="redactions",
columns=["received_ts"],
where_clause="NOT have_censored",
)
self.db_pool.updates.register_background_index_update(
"users_have_local_media",
index_name="users_have_local_media",
table="local_media_repository",
columns=["user_id", "created_ts"],
)
self.db_pool.updates.register_background_update_handler(
"rejected_events_metadata",
self._rejected_events_metadata,
)
self.db_pool.updates.register_background_update_handler(
"chain_cover",
self._chain_cover_index,
)
self.db_pool.updates.register_background_update_handler(
"purged_chain_cover",
self._purged_chain_cover_index,
)
# The event_thread_relation background update was replaced with the
# event_arbitrary_relations one, which handles any relation to avoid
# needed to potentially crawl the entire events table in the future.
self.db_pool.updates.register_noop_background_update("event_thread_relation")
self.db_pool.updates.register_background_update_handler(
"event_arbitrary_relations",
self._event_arbitrary_relations,
)
################################################################################
# bg updates for replacing stream_ordering with a BIGINT
# (these only run on postgres.)
self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.POPULATE_STREAM_ORDERING2,
self._background_populate_stream_ordering2,
)
# CREATE UNIQUE INDEX events_stream_ordering ON events(stream_ordering2);
self.db_pool.updates.register_background_index_update(
_BackgroundUpdates.INDEX_STREAM_ORDERING2,
index_name="events_stream_ordering",
table="events",
columns=["stream_ordering2"],
unique=True,
)
# CREATE INDEX event_contains_url_index ON events(room_id, topological_ordering, stream_ordering) WHERE contains_url = true AND outlier = false;
self.db_pool.updates.register_background_index_update(
_BackgroundUpdates.INDEX_STREAM_ORDERING2_CONTAINS_URL,
index_name="event_contains_url_index2",
table="events",
columns=["room_id", "topological_ordering", "stream_ordering2"],
where_clause="contains_url = true AND outlier = false",
)
# CREATE INDEX events_order_room ON events(room_id, topological_ordering, stream_ordering);
self.db_pool.updates.register_background_index_update(
_BackgroundUpdates.INDEX_STREAM_ORDERING2_ROOM_ORDER,
index_name="events_order_room2",
table="events",
columns=["room_id", "topological_ordering", "stream_ordering2"],
)
# CREATE INDEX events_room_stream ON events(room_id, stream_ordering);
self.db_pool.updates.register_background_index_update(
_BackgroundUpdates.INDEX_STREAM_ORDERING2_ROOM_STREAM,
index_name="events_room_stream2",
table="events",
columns=["room_id", "stream_ordering2"],
)
# CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);
self.db_pool.updates.register_background_index_update(
_BackgroundUpdates.INDEX_STREAM_ORDERING2_TS,
index_name="events_ts2",
table="events",
columns=["origin_server_ts", "stream_ordering2"],
)
self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.REPLACE_STREAM_ORDERING_COLUMN,
self._background_replace_stream_ordering_column,
)
################################################################################
async def _background_reindex_fields_sender(
self, progress: JsonDict, batch_size: int
) -> int:
target_min_stream_id = progress["target_min_stream_id_inclusive"]
max_stream_id = progress["max_stream_id_exclusive"]
rows_inserted = progress.get("rows_inserted", 0)
def reindex_txn(txn: LoggingTransaction) -> int:
sql = (
"SELECT stream_ordering, event_id, json FROM events"
" INNER JOIN event_json USING (event_id)"
" WHERE ? <= stream_ordering AND stream_ordering < ?"
" ORDER BY stream_ordering DESC"
" LIMIT ?"
)
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
rows = txn.fetchall()
if not rows:
return 0
min_stream_id = rows[-1][0]
update_rows = []
for row in rows:
try:
event_id = row[1]
event_json = db_to_json(row[2])
sender = event_json["sender"]
content = event_json["content"]
contains_url = "url" in content
if contains_url:
contains_url &= isinstance(content["url"], str)
except (KeyError, AttributeError):
# If the event is missing a necessary field then
# skip over it.
continue
update_rows.append((sender, contains_url, event_id))
sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?"
txn.execute_batch(sql, update_rows)
progress = {
"target_min_stream_id_inclusive": target_min_stream_id,
"max_stream_id_exclusive": min_stream_id,
"rows_inserted": rows_inserted + len(rows),
}
self.db_pool.updates._background_update_progress_txn(
txn, _BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
)
return len(rows)
result = await self.db_pool.runInteraction(
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
)
if not result:
await self.db_pool.updates._end_background_update(
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME
)
return result
async def _background_reindex_origin_server_ts(
self, progress: JsonDict, batch_size: int
) -> int:
target_min_stream_id = progress["target_min_stream_id_inclusive"]
max_stream_id = progress["max_stream_id_exclusive"]
rows_inserted = progress.get("rows_inserted", 0)
def reindex_search_txn(txn: LoggingTransaction) -> int:
sql = (
"SELECT stream_ordering, event_id FROM events"
" WHERE ? <= stream_ordering AND stream_ordering < ?"
" ORDER BY stream_ordering DESC"
" LIMIT ?"
)
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
rows = txn.fetchall()
if not rows:
return 0
min_stream_id = rows[-1][0]
event_ids = [row[1] for row in rows]
rows_to_update = []
chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)]
for chunk in chunks:
ev_rows = self.db_pool.simple_select_many_txn(
txn,
table="event_json",
column="event_id",
iterable=chunk,
retcols=["event_id", "json"],
keyvalues={},
)
for row in ev_rows:
event_id = row["event_id"]
event_json = db_to_json(row["json"])
try:
origin_server_ts = event_json["origin_server_ts"]
except (KeyError, AttributeError):
# If the event is missing a necessary field then
# skip over it.
continue
rows_to_update.append((origin_server_ts, event_id))
sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
txn.execute_batch(sql, rows_to_update)
progress = {
"target_min_stream_id_inclusive": target_min_stream_id,
"max_stream_id_exclusive": min_stream_id,
"rows_inserted": rows_inserted + len(rows_to_update),
}
self.db_pool.updates._background_update_progress_txn(
txn, _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, progress
)
return len(rows_to_update)
result = await self.db_pool.runInteraction(
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
)
if not result:
await self.db_pool.updates._end_background_update(
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME
)
return result
async def _cleanup_extremities_bg_update(
self, progress: JsonDict, batch_size: int
) -> int:
"""Background update to clean out extremities that should have been
deleted previously.
Mainly used to deal with the aftermath of #5269.
"""
# This works by first copying all existing forward extremities into the
# `_extremities_to_check` table at start up, and then checking each
# event in that table whether we have any descendants that are not
# soft-failed/rejected. If that is the case then we delete that event
# from the forward extremities table.
#
# For efficiency, we do this in batches by recursively pulling out all
# descendants of a batch until we find the non soft-failed/rejected
# events, i.e. the set of descendants whose chain of prev events back
# to the batch of extremities are all soft-failed or rejected.
# Typically, we won't find any such events as extremities will rarely
# have any descendants, but if they do then we should delete those
# extremities.
def _cleanup_extremities_bg_update_txn(txn: LoggingTransaction) -> int:
# The set of extremity event IDs that we're checking this round
original_set = set()
# A dict[str, Set[str]] of event ID to their prev events.
graph: Dict[str, Set[str]] = {}
# The set of descendants of the original set that are not rejected
# nor soft-failed. Ancestors of these events should be removed
# from the forward extremities table.
non_rejected_leaves = set()
# Set of event IDs that have been soft failed, and for which we
# should check if they have descendants which haven't been soft
# failed.
soft_failed_events_to_lookup = set()
# First, we get `batch_size` events from the table, pulling out
# their successor events, if any, and the successor events'
# rejection status.
txn.execute(
"""SELECT prev_event_id, event_id, internal_metadata,
rejections.event_id IS NOT NULL, events.outlier
FROM (
SELECT event_id AS prev_event_id
FROM _extremities_to_check
LIMIT ?
) AS f
LEFT JOIN event_edges USING (prev_event_id)
LEFT JOIN events USING (event_id)
LEFT JOIN event_json USING (event_id)
LEFT JOIN rejections USING (event_id)
""",
(batch_size,),
)
for prev_event_id, event_id, metadata, rejected, outlier in txn:
original_set.add(prev_event_id)
if not event_id or outlier:
# Common case where the forward extremity doesn't have any
# descendants.
continue
graph.setdefault(event_id, set()).add(prev_event_id)
soft_failed = False
if metadata:
soft_failed = db_to_json(metadata).get("soft_failed")
if soft_failed or rejected:
soft_failed_events_to_lookup.add(event_id)
else:
non_rejected_leaves.add(event_id)
# Now we recursively check all the soft-failed descendants we
# found above in the same way, until we have nothing left to
# check.
while soft_failed_events_to_lookup:
# We only want to do 100 at a time, so we split given list
# into two.
batch = list(soft_failed_events_to_lookup)
to_check, to_defer = batch[:100], batch[100:]
soft_failed_events_to_lookup = set(to_defer)
sql = """SELECT prev_event_id, event_id, internal_metadata,
rejections.event_id IS NOT NULL
FROM event_edges
INNER JOIN events USING (event_id)
INNER JOIN event_json USING (event_id)
LEFT JOIN rejections USING (event_id)
WHERE
NOT events.outlier
AND
"""
clause, args = make_in_list_sql_clause(
self.database_engine, "prev_event_id", to_check
)
txn.execute(sql + clause, list(args))
for prev_event_id, event_id, metadata, rejected in txn:
if event_id in graph:
# Already handled this event previously, but we still
# want to record the edge.
graph[event_id].add(prev_event_id)
continue
graph[event_id] = {prev_event_id}
soft_failed = db_to_json(metadata).get("soft_failed")
if soft_failed or rejected:
soft_failed_events_to_lookup.add(event_id)
else:
non_rejected_leaves.add(event_id)
# We have a set of non-soft-failed descendants, so we recurse up
# the graph to find all ancestors and add them to the set of event
# IDs that we can delete from forward extremities table.
to_delete = set()
while non_rejected_leaves:
event_id = non_rejected_leaves.pop()
prev_event_ids = graph.get(event_id, set())
non_rejected_leaves.update(prev_event_ids)
to_delete.update(prev_event_ids)
to_delete.intersection_update(original_set)
deleted = self.db_pool.simple_delete_many_txn(
txn=txn,
table="event_forward_extremities",
column="event_id",
values=to_delete,
keyvalues={},
)
logger.info(
"Deleted %d forward extremities of %d checked, to clean up #5269",
deleted,
len(original_set),
)
if deleted:
# We now need to invalidate the caches of these rooms
rows = self.db_pool.simple_select_many_txn(
txn,
table="events",
column="event_id",
iterable=to_delete,
keyvalues={},
retcols=("room_id",),
)
room_ids = {row["room_id"] for row in rows}
for room_id in room_ids:
txn.call_after(
self.get_latest_event_ids_in_room.invalidate, (room_id,) # type: ignore[attr-defined]
)
self.db_pool.simple_delete_many_txn(
txn=txn,
table="_extremities_to_check",
column="event_id",
values=original_set,
keyvalues={},
)
return len(original_set)
num_handled = await self.db_pool.runInteraction(
"_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn
)
if not num_handled:
await self.db_pool.updates._end_background_update(
_BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES
)
def _drop_table_txn(txn: LoggingTransaction) -> None:
txn.execute("DROP TABLE _extremities_to_check")
await self.db_pool.runInteraction(
"_cleanup_extremities_bg_update_drop_table", _drop_table_txn
)
return num_handled
async def _redactions_received_ts(self, progress: JsonDict, batch_size: int) -> int:
"""Handles filling out the `received_ts` column in redactions."""
last_event_id = progress.get("last_event_id", "")
def _redactions_received_ts_txn(txn: LoggingTransaction) -> int:
# Fetch the set of event IDs that we want to update
sql = """
SELECT event_id FROM redactions
WHERE event_id > ?
ORDER BY event_id ASC
LIMIT ?
"""
txn.execute(sql, (last_event_id, batch_size))
rows = txn.fetchall()
if not rows:
return 0
(upper_event_id,) = rows[-1]
# Update the redactions with the received_ts.
#
# Note: Not all events have an associated received_ts, so we
# fallback to using origin_server_ts. If we for some reason don't
# have an origin_server_ts, lets just use the current timestamp.
#
# We don't want to leave it null, as then we'll never try and
# censor those redactions.
sql = """
UPDATE redactions
SET received_ts = (
SELECT COALESCE(received_ts, origin_server_ts, ?) FROM events
WHERE events.event_id = redactions.event_id
)
WHERE ? <= event_id AND event_id <= ?
"""
txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id))
self.db_pool.updates._background_update_progress_txn(
txn, "redactions_received_ts", {"last_event_id": upper_event_id}
)
return len(rows)
count = await self.db_pool.runInteraction(
"_redactions_received_ts", _redactions_received_ts_txn
)
if not count:
await self.db_pool.updates._end_background_update("redactions_received_ts")
return count
async def _event_fix_redactions_bytes(
self, progress: JsonDict, batch_size: int
) -> int:
"""Undoes hex encoded censored redacted event JSON."""
def _event_fix_redactions_bytes_txn(txn: LoggingTransaction) -> None:
# This update is quite fast due to new index.
txn.execute(
"""
UPDATE event_json
SET
json = convert_from(json::bytea, 'utf8')
FROM redactions
WHERE
redactions.have_censored
AND event_json.event_id = redactions.redacts
AND json NOT LIKE '{%';
"""
)
txn.execute("DROP INDEX redactions_censored_redacts")
await self.db_pool.runInteraction(
"_event_fix_redactions_bytes", _event_fix_redactions_bytes_txn
)
await self.db_pool.updates._end_background_update("event_fix_redactions_bytes")
return 1
async def _event_store_labels(self, progress: JsonDict, batch_size: int) -> int:
"""Background update handler which will store labels for existing events."""
last_event_id = progress.get("last_event_id", "")
def _event_store_labels_txn(txn: LoggingTransaction) -> int:
txn.execute(
"""
SELECT event_id, json FROM event_json
LEFT JOIN event_labels USING (event_id)
WHERE event_id > ? AND label IS NULL
ORDER BY event_id LIMIT ?
""",
(last_event_id, batch_size),
)
results = list(txn)
nbrows = 0
last_row_event_id = ""
for (event_id, event_json_raw) in results:
try:
event_json = db_to_json(event_json_raw)
self.db_pool.simple_insert_many_txn(
txn=txn,
table="event_labels",
keys=("event_id", "label", "room_id", "topological_ordering"),
values=[
(
event_id,
label,
event_json["room_id"],
event_json["depth"],
)
for label in event_json["content"].get(
EventContentFields.LABELS, []
)
if isinstance(label, str)
],
)
except Exception as e:
logger.warning(
"Unable to load event %s (no labels will be imported): %s",
event_id,
e,
)
nbrows += 1
last_row_event_id = event_id
self.db_pool.updates._background_update_progress_txn(
txn, "event_store_labels", {"last_event_id": last_row_event_id}
)
return nbrows
num_rows = await self.db_pool.runInteraction(
desc="event_store_labels", func=_event_store_labels_txn
)
if not num_rows:
await self.db_pool.updates._end_background_update("event_store_labels")
return num_rows
async def _rejected_events_metadata(self, progress: dict, batch_size: int) -> int:
"""Adds rejected events to the `state_events` and `event_auth` metadata
tables.
"""
last_event_id = progress.get("last_event_id", "")
def get_rejected_events(
txn: Cursor,
) -> List[Tuple[str, str, JsonDict, bool, bool]]:
# Fetch rejected event json, their room version and whether we have
# inserted them into the state_events or auth_events tables.
#
# Note we can assume that events that don't have a corresponding
# room version are V1 rooms.
sql = """
SELECT DISTINCT
event_id,
COALESCE(room_version, '1'),
json,
state_events.event_id IS NOT NULL,
event_auth.event_id IS NOT NULL
FROM rejections
INNER JOIN event_json USING (event_id)
LEFT JOIN rooms USING (room_id)
LEFT JOIN state_events USING (event_id)
LEFT JOIN event_auth USING (event_id)
WHERE event_id > ?
ORDER BY event_id
LIMIT ?
"""
txn.execute(
sql,
(
last_event_id,
batch_size,
),
)
return cast(
List[Tuple[str, str, JsonDict, bool, bool]],
[(row[0], row[1], db_to_json(row[2]), row[3], row[4]) for row in txn],
)
results = await self.db_pool.runInteraction(
desc="_rejected_events_metadata_get", func=get_rejected_events
)
if not results:
await self.db_pool.updates._end_background_update(
"rejected_events_metadata"
)
return 0
state_events = []
auth_events = []
for event_id, room_version, event_json, has_state, has_event_auth in results:
last_event_id = event_id
if has_state and has_event_auth:
continue
room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version)
if not room_version_obj:
# We no longer support this room version, so we just ignore the
# events entirely.
logger.info(
"Ignoring event with unknown room version %r: %r",
room_version,
event_id,
)
continue
event = make_event_from_dict(event_json, room_version_obj)
if not event.is_state():
continue
if not has_state:
state_events.append(
(event.event_id, event.room_id, event.type, event.state_key)
)
if not has_event_auth:
# Old, dodgy, events may have duplicate auth events, which we
# need to deduplicate as we have a unique constraint.
for auth_id in set(event.auth_event_ids()):
auth_events.append((event.event_id, event.room_id, auth_id))
if state_events:
await self.db_pool.simple_insert_many(
table="state_events",
keys=("event_id", "room_id", "type", "state_key"),
values=state_events,
desc="_rejected_events_metadata_state_events",
)
if auth_events:
await self.db_pool.simple_insert_many(
table="event_auth",
keys=("event_id", "room_id", "auth_id"),
values=auth_events,
desc="_rejected_events_metadata_event_auth",
)
await self.db_pool.updates._background_update_progress(
"rejected_events_metadata", {"last_event_id": last_event_id}
)
if len(results) < batch_size:
await self.db_pool.updates._end_background_update(
"rejected_events_metadata"
)
return len(results)
async def _chain_cover_index(self, progress: dict, batch_size: int) -> int:
"""A background updates that iterates over all rooms and generates the
chain cover index for them.
"""
current_room_id = progress.get("current_room_id", "")
# Where we've processed up to in the room, defaults to the start of the
# room.
last_depth = progress.get("last_depth", -1)
last_stream = progress.get("last_stream", -1)
result = await self.db_pool.runInteraction(
"_chain_cover_index",
self._calculate_chain_cover_txn,
current_room_id,
last_depth,
last_stream,
batch_size,
single_room=False,
)
finished = result.processed_count == 0
total_rows_processed = result.processed_count
current_room_id = result.room_id
last_depth = result.depth
last_stream = result.stream
for room_id, (depth, stream) in result.finished_room_map.items():
# If we've done all the events in the room we flip the
# `has_auth_chain_index` in the DB. Note that its possible for
# further events to be persisted between the above and setting the
# flag without having the chain cover calculated for them. This is
# fine as a) the code gracefully handles these cases and b) we'll
# calculate them below.
await self.db_pool.simple_update(
table="rooms",
keyvalues={"room_id": room_id},
updatevalues={"has_auth_chain_index": True},
desc="_chain_cover_index",
)
# Handle any events that might have raced with us flipping the
# bit above.
result = await self.db_pool.runInteraction(
"_chain_cover_index",
self._calculate_chain_cover_txn,
room_id,
depth,
stream,
batch_size=None,
single_room=True,
)
total_rows_processed += result.processed_count
if finished:
await self.db_pool.updates._end_background_update("chain_cover")
return total_rows_processed
await self.db_pool.updates._background_update_progress(
"chain_cover",
{
"current_room_id": current_room_id,
"last_depth": last_depth,
"last_stream": last_stream,
},
)
return total_rows_processed
def _calculate_chain_cover_txn(
self,
txn: LoggingTransaction,
last_room_id: str,
last_depth: int,
last_stream: int,
batch_size: Optional[int],
single_room: bool,
) -> _CalculateChainCover:
"""Calculate the chain cover for `batch_size` events, ordered by
`(room_id, depth, stream)`.
Args:
txn,
last_room_id, last_depth, last_stream: The `(room_id, depth, stream)`
tuple to fetch results after.
batch_size: The maximum number of events to process. If None then
no limit.
single_room: Whether to calculate the index for just the given
room.
"""
# Get the next set of events in the room (that we haven't already
# computed chain cover for). We do this in topological order.
# We want to do a `(topological_ordering, stream_ordering) > (?,?)`
# comparison, but that is not supported on older SQLite versions
tuple_clause, tuple_args = make_tuple_comparison_clause(
[
("events.room_id", last_room_id),
("topological_ordering", last_depth),
("stream_ordering", last_stream),
],
)
extra_clause = ""
if single_room:
extra_clause = "AND events.room_id = ?"
tuple_args.append(last_room_id)
sql = """
SELECT
event_id, state_events.type, state_events.state_key,
topological_ordering, stream_ordering,
events.room_id
FROM events
INNER JOIN state_events USING (event_id)
LEFT JOIN event_auth_chains USING (event_id)
LEFT JOIN event_auth_chain_to_calculate USING (event_id)
WHERE event_auth_chains.event_id IS NULL
AND event_auth_chain_to_calculate.event_id IS NULL
AND %(tuple_cmp)s
%(extra)s
ORDER BY events.room_id, topological_ordering, stream_ordering
%(limit)s
""" % {
"tuple_cmp": tuple_clause,
"limit": "LIMIT ?" if batch_size is not None else "",
"extra": extra_clause,
}
if batch_size is not None:
tuple_args.append(batch_size)
txn.execute(sql, tuple_args)
rows = txn.fetchall()
# Put the results in the necessary format for
# `_add_chain_cover_index`
event_to_room_id = {row[0]: row[5] for row in rows}
event_to_types = {row[0]: (row[1], row[2]) for row in rows}
# Calculate the new last position we've processed up to.
new_last_depth: int = rows[-1][3] if rows else last_depth
new_last_stream: int = rows[-1][4] if rows else last_stream
new_last_room_id: str = rows[-1][5] if rows else ""
# Map from room_id to last depth/stream_ordering processed for the room,
# excluding the last room (which we're likely still processing). We also
# need to include the room passed in if it's not included in the result
# set (as we then know we've processed all events in said room).
#
# This is the set of rooms that we can now safely flip the
# `has_auth_chain_index` bit for.
finished_rooms = {
row[5]: (row[3], row[4]) for row in rows if row[5] != new_last_room_id
}
if last_room_id not in finished_rooms and last_room_id != new_last_room_id:
finished_rooms[last_room_id] = (last_depth, last_stream)
count = len(rows)
# We also need to fetch the auth events for them.
auth_events = self.db_pool.simple_select_many_txn(
txn,
table="event_auth",
column="event_id",
iterable=event_to_room_id,
keyvalues={},
retcols=("event_id", "auth_id"),
)
event_to_auth_chain: Dict[str, List[str]] = {}
for row in auth_events:
event_to_auth_chain.setdefault(row["event_id"], []).append(row["auth_id"])
# Calculate and persist the chain cover index for this set of events.
#
# Annoyingly we need to gut wrench into the persit event store so that
# we can reuse the function to calculate the chain cover for rooms.
PersistEventsStore._add_chain_cover_index(
txn,
self.db_pool,
self.event_chain_id_gen, # type: ignore[attr-defined]
event_to_room_id,
event_to_types,
cast(Dict[str, Sequence[str]], event_to_auth_chain),
)
return _CalculateChainCover(
room_id=new_last_room_id,
depth=new_last_depth,
stream=new_last_stream,
processed_count=count,
finished_room_map=finished_rooms,
)
async def _purged_chain_cover_index(self, progress: dict, batch_size: int) -> int:
"""
A background updates that iterates over the chain cover and deletes the
chain cover for events that have been purged.
This may be due to fully purging a room or via setting a retention policy.
"""
current_event_id = progress.get("current_event_id", "")
def purged_chain_cover_txn(txn: LoggingTransaction) -> int:
# The event ID from events will be null if the chain ID / sequence
# number points to a purged event.
sql = """
SELECT event_id, chain_id, sequence_number, e.event_id IS NOT NULL
FROM event_auth_chains
LEFT JOIN events AS e USING (event_id)
WHERE event_id > ? ORDER BY event_auth_chains.event_id ASC LIMIT ?
"""
txn.execute(sql, (current_event_id, batch_size))
rows = txn.fetchall()
if not rows:
return 0
# The event IDs and chain IDs / sequence numbers where the event has
# been purged.
unreferenced_event_ids = []
unreferenced_chain_id_tuples = []
event_id = ""
for event_id, chain_id, sequence_number, has_event in rows:
if not has_event:
unreferenced_event_ids.append((event_id,))
unreferenced_chain_id_tuples.append((chain_id, sequence_number))
# Delete the unreferenced auth chains from event_auth_chain_links and
# event_auth_chains.
txn.executemany(
"""
DELETE FROM event_auth_chains WHERE event_id = ?
""",
unreferenced_event_ids,
)
# We should also delete matching target_*, but there is no index on
# target_chain_id. Hopefully any purged events are due to a room
# being fully purged and they will be removed from the origin_*
# searches.
txn.executemany(
"""
DELETE FROM event_auth_chain_links WHERE
origin_chain_id = ? AND origin_sequence_number = ?
""",
unreferenced_chain_id_tuples,
)
progress = {
"current_event_id": event_id,
}
self.db_pool.updates._background_update_progress_txn(
txn, "purged_chain_cover", progress
)
return len(rows)
result = await self.db_pool.runInteraction(
"_purged_chain_cover_index",
purged_chain_cover_txn,
)
if not result:
await self.db_pool.updates._end_background_update("purged_chain_cover")
return result
async def _event_arbitrary_relations(
self, progress: JsonDict, batch_size: int
) -> int:
"""Background update handler which will store previously unknown relations for existing events."""
last_event_id = progress.get("last_event_id", "")
def _event_arbitrary_relations_txn(txn: LoggingTransaction) -> int:
# Fetch events and then filter based on whether the event has a
# relation or not.
txn.execute(
"""
SELECT event_id, json FROM event_json
WHERE event_id > ?
ORDER BY event_id LIMIT ?
""",
(last_event_id, batch_size),
)
results = list(txn)
# (event_id, parent_id, rel_type) for each relation
relations_to_insert: List[Tuple[str, str, str]] = []
for (event_id, event_json_raw) in results:
try:
event_json = db_to_json(event_json_raw)
except Exception as e:
logger.warning(
"Unable to load event %s (no relations will be updated): %s",
event_id,
e,
)
continue
# If there's no relation, skip!
relates_to = event_json["content"].get("m.relates_to")
if not relates_to or not isinstance(relates_to, dict):
continue
# If the relation type or parent event ID is not a string, skip it.
#
# Do not consider relation types that have existed for a long time,
# since they will already be listed in the `event_relations` table.
rel_type = relates_to.get("rel_type")
if not isinstance(rel_type, str) or rel_type in (
RelationTypes.ANNOTATION,
RelationTypes.REFERENCE,
RelationTypes.REPLACE,
):
continue
parent_id = relates_to.get("event_id")
if not isinstance(parent_id, str):
continue
relations_to_insert.append((event_id, parent_id, rel_type))
# Insert the missing data, note that we upsert here in case the event
# has already been processed.
if relations_to_insert:
self.db_pool.simple_upsert_many_txn(
txn=txn,
table="event_relations",
key_names=("event_id",),
key_values=[(r[0],) for r in relations_to_insert],
value_names=("relates_to_id", "relation_type"),
value_values=[r[1:] for r in relations_to_insert],
)
# Iterate the parent IDs and invalidate caches.
for parent_id in {r[1] for r in relations_to_insert}:
cache_tuple = (parent_id,)
self._invalidate_cache_and_stream( # type: ignore[attr-defined]
txn, self.get_relations_for_event, cache_tuple # type: ignore[attr-defined]
)
self._invalidate_cache_and_stream( # type: ignore[attr-defined]
txn, self.get_aggregation_groups_for_event, cache_tuple # type: ignore[attr-defined]
)
self._invalidate_cache_and_stream( # type: ignore[attr-defined]
txn, self.get_thread_summary, cache_tuple # type: ignore[attr-defined]
)
if results:
latest_event_id = results[-1][0]
self.db_pool.updates._background_update_progress_txn(
txn, "event_arbitrary_relations", {"last_event_id": latest_event_id}
)
return len(results)
num_rows = await self.db_pool.runInteraction(
desc="event_arbitrary_relations", func=_event_arbitrary_relations_txn
)
if not num_rows:
await self.db_pool.updates._end_background_update(
"event_arbitrary_relations"
)
return num_rows
async def _background_populate_stream_ordering2(
self, progress: JsonDict, batch_size: int
) -> int:
"""Populate events.stream_ordering2, then replace stream_ordering
This is to deal with the fact that stream_ordering was initially created as a
32-bit integer field.
"""
batch_size = max(batch_size, 1)
def process(txn: LoggingTransaction) -> int:
last_stream = progress.get("last_stream", -(1 << 31))
txn.execute(
"""
UPDATE events SET stream_ordering2=stream_ordering
WHERE stream_ordering IN (
SELECT stream_ordering FROM events WHERE stream_ordering > ?
ORDER BY stream_ordering LIMIT ?
)
RETURNING stream_ordering;
""",
(last_stream, batch_size),
)
row_count = txn.rowcount
if row_count == 0:
return 0
last_stream = max(row[0] for row in txn)
logger.info("populated stream_ordering2 up to %i", last_stream)
self.db_pool.updates._background_update_progress_txn(
txn,
_BackgroundUpdates.POPULATE_STREAM_ORDERING2,
{"last_stream": last_stream},
)
return row_count
result = await self.db_pool.runInteraction(
"_background_populate_stream_ordering2", process
)
if result != 0:
return result
await self.db_pool.updates._end_background_update(
_BackgroundUpdates.POPULATE_STREAM_ORDERING2
)
return 0
async def _background_replace_stream_ordering_column(
self, progress: JsonDict, batch_size: int
) -> int:
"""Drop the old 'stream_ordering' column and rename 'stream_ordering2' into its place."""
def process(txn: Cursor) -> None:
for sql in _REPLACE_STREAM_ORDERING_SQL_COMMANDS:
logger.info("completing stream_ordering migration: %s", sql)
txn.execute(sql)
# ANALYZE the new column to build stats on it, to encourage PostgreSQL to use the
# indexes on it.
# We need to pass execute a dummy function to handle the txn's result otherwise
# it tries to call fetchall() on it and fails because there's no result to fetch.
await self.db_pool.execute(
"background_analyze_new_stream_ordering_column",
lambda txn: None,
"ANALYZE events(stream_ordering2)",
)
await self.db_pool.runInteraction(
"_background_replace_stream_ordering_column", process
)
await self.db_pool.updates._end_background_update(
_BackgroundUpdates.REPLACE_STREAM_ORDERING_COLUMN
)
return 0
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for exploration-related statistics."""
__author__ = 'Sean Lip'
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import stats_jobs
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
import feconf
IMPROVE_TYPE_DEFAULT = 'default'
IMPROVE_TYPE_INCOMPLETE = 'incomplete'
def get_top_unresolved_answers_for_default_rule(exploration_id, state_name):
return {
answer: count for (answer, count) in
stats_domain.StateRuleAnswerLog.get(
exploration_id, state_name, exp_domain.DEFAULT_RULESPEC_STR
).get_top_answers(3)
}
def get_state_rules_stats(exploration_id, state_name):
"""Gets statistics for the answer groups and rules of this state.
Returns:
A dict, keyed by the string '{HANDLER_NAME}.{RULE_STR}', whose
values are the corresponding stats_domain.StateRuleAnswerLog
instances.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
state = exploration.states[state_name]
# TODO(bhenning): Everything is handler name submit; therefore, it is
# pointless and should be removed.
_OLD_SUBMIT_HANDLER_NAME = 'submit'
rule_keys = []
for group in state.interaction.answer_groups:
for rule in group.rule_specs:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, rule.stringify_classified_rule()))
if state.interaction.default_outcome:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, exp_domain.DEFAULT_RULESPEC_STR))
answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': rule_key[1]
} for rule_key in rule_keys])
results = {}
for ind, answer_log in enumerate(answer_logs):
results['.'.join(rule_keys[ind])] = {
'answers': answer_log.get_top_answers(5),
'rule_hits': answer_log.total_answer_count
}
return results
def get_top_state_rule_answers(
exploration_id, state_name, rule_str_list, top_answer_count_per_rule):
"""Returns a list of top answers (by submission frequency) submitted to the
given state in the given exploration which were mapped to any of the rules
listed in 'rule_str_list'. The number of answers returned is the number of
rule spec strings based in multiplied by top_answer_count_per_rule.
"""
answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': rule_str
} for rule_str in rule_str_list])
all_top_answers = []
for answer_log in answer_logs:
top_answers = answer_log.get_top_answers(top_answer_count_per_rule)
all_top_answers += [
{'value': top_answer[0], 'count': top_answer[1]}
for top_answer in top_answers
]
return all_top_answers
def get_state_improvements(exploration_id, exploration_version):
"""Returns a list of dicts, each representing a suggestion for improvement
to a particular state.
"""
ranked_states = []
exploration = exp_services.get_exploration_by_id(exploration_id)
state_names = exploration.states.keys()
default_rule_answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': exp_domain.DEFAULT_RULESPEC_STR
} for state_name in state_names])
statistics = stats_jobs.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
state_hit_counts = statistics['state_hit_counts']
for ind, state_name in enumerate(state_names):
total_entry_count = 0
no_answer_submitted_count = 0
if state_name in state_hit_counts:
total_entry_count = (
state_hit_counts[state_name]['total_entry_count'])
no_answer_submitted_count = state_hit_counts[state_name].get(
'no_answer_count', 0)
if total_entry_count == 0:
continue
threshold = 0.2 * total_entry_count
default_rule_answer_log = default_rule_answer_logs[ind]
default_count = default_rule_answer_log.total_answer_count
eligible_flags = []
state = exploration.states[state_name]
if (default_count > threshold and
state.interaction.default_outcome is not None and
state.interaction.default_outcome.dest == state_name):
eligible_flags.append({
'rank': default_count,
'improve_type': IMPROVE_TYPE_DEFAULT})
if no_answer_submitted_count > threshold:
eligible_flags.append({
'rank': no_answer_submitted_count,
'improve_type': IMPROVE_TYPE_INCOMPLETE})
if eligible_flags:
eligible_flags = sorted(
eligible_flags, key=lambda flag: flag['rank'], reverse=True)
ranked_states.append({
'rank': eligible_flags[0]['rank'],
'state_name': state_name,
'type': eligible_flags[0]['improve_type'],
})
return sorted(
[state for state in ranked_states if state['rank'] != 0],
key=lambda x: -x['rank'])
def get_versions_for_exploration_stats(exploration_id):
"""Returns list of versions for this exploration."""
return stats_models.ExplorationAnnotationsModel.get_versions(
exploration_id)
def get_exploration_stats(exploration_id, exploration_version):
"""Returns a dict with state statistics for the given exploration id.
Note that exploration_version should be a string.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
exp_stats = stats_jobs.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
last_updated = exp_stats['last_updated']
state_hit_counts = exp_stats['state_hit_counts']
return {
'improvements': get_state_improvements(
exploration_id, exploration_version),
'last_updated': last_updated,
'num_completions': exp_stats['complete_exploration_count'],
'num_starts': exp_stats['start_exploration_count'],
'state_stats': {
state_name: {
'name': state_name,
'firstEntryCount': (
state_hit_counts[state_name]['first_entry_count']
if state_name in state_hit_counts else 0),
'totalEntryCount': (
state_hit_counts[state_name]['total_entry_count']
if state_name in state_hit_counts else 0),
} for state_name in exploration.states
},
}
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for converting proto to other formats, such as JSON."""
import base64
import collections
import datetime
import json
import logging
import os
import sys
import six
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import protojson
from apitools.base.py import exceptions
__all__ = [
'CopyProtoMessage',
'JsonToMessage',
'MessageToJson',
'DictToMessage',
'MessageToDict',
'PyValueToMessage',
'MessageToPyValue',
'MessageToRepr',
'GetCustomJsonFieldMapping',
'AddCustomJsonFieldMapping',
'GetCustomJsonEnumMapping',
'AddCustomJsonEnumMapping',
]
_Codec = collections.namedtuple('_Codec', ['encoder', 'decoder'])
CodecResult = collections.namedtuple('CodecResult', ['value', 'complete'])
# TODO(craigcitro): Make these non-global.
_UNRECOGNIZED_FIELD_MAPPINGS = {}
_CUSTOM_MESSAGE_CODECS = {}
_CUSTOM_FIELD_CODECS = {}
_FIELD_TYPE_CODECS = {}
def MapUnrecognizedFields(field_name):
"""Register field_name as a container for unrecognized fields."""
def Register(cls):
_UNRECOGNIZED_FIELD_MAPPINGS[cls] = field_name
return cls
return Register
def RegisterCustomMessageCodec(encoder, decoder):
"""Register a custom encoder/decoder for this message class."""
def Register(cls):
_CUSTOM_MESSAGE_CODECS[cls] = _Codec(encoder=encoder, decoder=decoder)
return cls
return Register
def RegisterCustomFieldCodec(encoder, decoder):
"""Register a custom encoder/decoder for this field."""
def Register(field):
_CUSTOM_FIELD_CODECS[field] = _Codec(encoder=encoder, decoder=decoder)
return field
return Register
def RegisterFieldTypeCodec(encoder, decoder):
"""Register a custom encoder/decoder for all fields of this type."""
def Register(field_type):
_FIELD_TYPE_CODECS[field_type] = _Codec(
encoder=encoder, decoder=decoder)
return field_type
return Register
# TODO(craigcitro): Delete this function with the switch to proto2.
def CopyProtoMessage(message):
codec = protojson.ProtoJson()
return codec.decode_message(type(message), codec.encode_message(message))
def MessageToJson(message, include_fields=None):
"""Convert the given message to JSON."""
result = _ProtoJsonApiTools.Get().encode_message(message)
return _IncludeFields(result, message, include_fields)
def JsonToMessage(message_type, message):
"""Convert the given JSON to a message of type message_type."""
return _ProtoJsonApiTools.Get().decode_message(message_type, message)
# TODO(craigcitro): Do this directly, instead of via JSON.
def DictToMessage(d, message_type):
"""Convert the given dictionary to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(d))
def MessageToDict(message):
"""Convert the given message to a dictionary."""
return json.loads(MessageToJson(message))
def PyValueToMessage(message_type, value):
"""Convert the given python value to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(value))
def MessageToPyValue(message):
"""Convert the given message to a python value."""
return json.loads(MessageToJson(message))
def MessageToRepr(msg, multiline=False, **kwargs):
"""Return a repr-style string for a protorpc message.
protorpc.Message.__repr__ does not return anything that could be considered
python code. Adding this function lets us print a protorpc message in such
a way that it could be pasted into code later, and used to compare against
other things.
Args:
msg: protorpc.Message, the message to be repr'd.
multiline: bool, True if the returned string should have each field
assignment on its own line.
**kwargs: {str:str}, Additional flags for how to format the string.
Known **kwargs:
shortstrings: bool, True if all string values should be
truncated at 100 characters, since when mocking the contents
typically don't matter except for IDs, and IDs are usually
less than 100 characters.
no_modules: bool, True if the long module name should not be printed with
each type.
Returns:
str, A string of valid python (assuming the right imports have been made)
that recreates the message passed into this function.
"""
# TODO(user): craigcitro suggests a pretty-printer from apitools/gen.
indent = kwargs.get('indent', 0)
def IndentKwargs(kwargs):
kwargs = dict(kwargs)
kwargs['indent'] = kwargs.get('indent', 0) + 4
return kwargs
if isinstance(msg, list):
s = '['
for item in msg:
if multiline:
s += '\n' + ' ' * (indent + 4)
s += MessageToRepr(
item, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' ' * indent
s += ']'
return s
if isinstance(msg, messages.Message):
s = type(msg).__name__ + '('
if not kwargs.get('no_modules'):
s = msg.__module__ + '.' + s
names = sorted([field.name for field in msg.all_fields()])
for name in names:
field = msg.field_by_name(name)
if multiline:
s += '\n' + ' ' * (indent + 4)
value = getattr(msg, field.name)
s += field.name + '=' + MessageToRepr(
value, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' ' * indent
s += ')'
return s
if isinstance(msg, six.string_types):
if kwargs.get('shortstrings') and len(msg) > 100:
msg = msg[:100]
if isinstance(msg, datetime.datetime):
class SpecialTZInfo(datetime.tzinfo):
def __init__(self, offset):
super(SpecialTZInfo, self).__init__()
self.offset = offset
def __repr__(self):
s = 'TimeZoneOffset(' + repr(self.offset) + ')'
if not kwargs.get('no_modules'):
s = 'apitools.base.protorpclite.util.' + s
return s
msg = datetime.datetime(
msg.year, msg.month, msg.day, msg.hour, msg.minute, msg.second,
msg.microsecond, SpecialTZInfo(msg.tzinfo.utcoffset(0)))
return repr(msg)
def _GetField(message, field_path):
for field in field_path:
if field not in dir(message):
raise KeyError('no field "%s"' % field)
message = getattr(message, field)
return message
def _SetField(dictblob, field_path, value):
for field in field_path[:-1]:
dictblob = dictblob.setdefault(field, {})
dictblob[field_path[-1]] = value
def _IncludeFields(encoded_message, message, include_fields):
"""Add the requested fields to the encoded message."""
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split('.'))
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
'No field named %s in message of type %s' % (
field_name, type(message)))
_SetField(result, field_name.split('.'), nullvalue)
return json.dumps(result)
def _GetFieldCodecs(field, attr):
result = [
getattr(_CUSTOM_FIELD_CODECS.get(field), attr, None),
getattr(_FIELD_TYPE_CODECS.get(type(field)), attr, None),
]
return [x for x in result if x is not None]
class _ProtoJsonApiTools(protojson.ProtoJson):
"""JSON encoder used by apitools clients."""
_INSTANCE = None
@classmethod
def Get(cls):
if cls._INSTANCE is None:
cls._INSTANCE = cls()
return cls._INSTANCE
def decode_message(self, message_type, encoded_message):
if message_type in _CUSTOM_MESSAGE_CODECS:
return _CUSTOM_MESSAGE_CODECS[
message_type].decoder(encoded_message)
# We turn off the default logging in protorpc. We may want to
# remove this later.
old_level = logging.getLogger().level
logging.getLogger().setLevel(logging.ERROR)
try:
result = _DecodeCustomFieldNames(message_type, encoded_message)
result = super(_ProtoJsonApiTools, self).decode_message(
message_type, result)
finally:
logging.getLogger().setLevel(old_level)
result = _ProcessUnknownEnums(result, encoded_message)
result = _ProcessUnknownMessages(result, encoded_message)
return _DecodeUnknownFields(result, encoded_message)
def decode_field(self, field, value):
"""Decode the given JSON value.
Args:
field: a messages.Field for the field we're decoding.
value: a python value we'd like to decode.
Returns:
A value suitable for assignment to field.
"""
for decoder in _GetFieldCodecs(field, 'decoder'):
result = decoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.MessageField):
field_value = self.decode_message(
field.message_type, json.dumps(value))
elif isinstance(field, messages.EnumField):
value = GetCustomJsonEnumMapping(
field.type, json_name=value) or value
try:
field_value = super(
_ProtoJsonApiTools, self).decode_field(field, value)
except messages.DecodeError:
if not isinstance(value, six.string_types):
raise
field_value = None
else:
field_value = super(
_ProtoJsonApiTools, self).decode_field(field, value)
return field_value
def encode_message(self, message):
if isinstance(message, messages.FieldList):
return '[%s]' % (', '.join(self.encode_message(x)
for x in message))
# pylint: disable=unidiomatic-typecheck
if type(message) in _CUSTOM_MESSAGE_CODECS:
return _CUSTOM_MESSAGE_CODECS[type(message)].encoder(message)
message = _EncodeUnknownFields(message)
result = super(_ProtoJsonApiTools, self).encode_message(message)
result = _EncodeCustomFieldNames(message, result)
return json.dumps(json.loads(result), sort_keys=True)
def encode_field(self, field, value):
"""Encode the given value as JSON.
Args:
field: a messages.Field for the field we're encoding.
value: a value for field.
Returns:
A python value suitable for json.dumps.
"""
for encoder in _GetFieldCodecs(field, 'encoder'):
result = encoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.EnumField):
if field.repeated:
remapped_value = [GetCustomJsonEnumMapping(
field.type, python_name=e.name) or e.name for e in value]
else:
remapped_value = GetCustomJsonEnumMapping(
field.type, python_name=value.name)
if remapped_value:
return remapped_value
if (isinstance(field, messages.MessageField) and
not isinstance(field, message_types.DateTimeField)):
value = json.loads(self.encode_message(value))
return super(_ProtoJsonApiTools, self).encode_field(field, value)
# TODO(craigcitro): Fold this and _IncludeFields in as codecs.
def _DecodeUnknownFields(message, encoded_message):
"""Rewrite unknown fields in message into message.destination."""
destination = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if destination is None:
return message
pair_field = message.field_by_name(destination)
if not isinstance(pair_field, messages.MessageField):
raise exceptions.InvalidDataFromServerError(
'Unrecognized fields must be mapped to a compound '
'message type.')
pair_type = pair_field.message_type
# TODO(craigcitro): Add more error checking around the pair
# type being exactly what we suspect (field names, etc).
if isinstance(pair_type.value, messages.MessageField):
new_values = _DecodeUnknownMessages(
message, json.loads(encoded_message), pair_type)
else:
new_values = _DecodeUnrecognizedFields(message, pair_type)
setattr(message, destination, new_values)
# We could probably get away with not setting this, but
# why not clear it?
setattr(message, '_Message__unrecognized_fields', {})
return message
def _DecodeUnknownMessages(message, encoded_message, pair_type):
"""Process unknown fields in encoded_message of a message type."""
field_type = pair_type.value.type
new_values = []
all_field_names = [x.name for x in message.all_fields()]
for name, value_dict in six.iteritems(encoded_message):
if name in all_field_names:
continue
value = PyValueToMessage(field_type, value_dict)
if pair_type.value.repeated:
value = _AsMessageList(value)
new_pair = pair_type(key=name, value=value)
new_values.append(new_pair)
return new_values
def _DecodeUnrecognizedFields(message, pair_type):
"""Process unrecognized fields in message."""
new_values = []
for unknown_field in message.all_unrecognized_fields():
# TODO(craigcitro): Consider validating the variant if
# the assignment below doesn't take care of it. It may
# also be necessary to check it in the case that the
# type has multiple encodings.
value, _ = message.get_unrecognized_field_info(unknown_field)
value_type = pair_type.field_by_name('value')
if isinstance(value_type, messages.MessageField):
decoded_value = DictToMessage(value, pair_type.value.message_type)
else:
decoded_value = protojson.ProtoJson().decode_field(
pair_type.value, value)
new_pair = pair_type(key=str(unknown_field), value=decoded_value)
new_values.append(new_pair)
return new_values
def _EncodeUnknownFields(message):
"""Remap unknown fields in message out of message.source."""
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if source is None:
return message
result = CopyProtoMessage(message)
pairs_field = message.field_by_name(source)
if not isinstance(pairs_field, messages.MessageField):
raise exceptions.InvalidUserInputError(
'Invalid pairs field %s' % pairs_field)
pairs_type = pairs_field.message_type
value_variant = pairs_type.field_by_name('value').variant
pairs = getattr(message, source)
for pair in pairs:
if value_variant == messages.Variant.MESSAGE:
encoded_value = MessageToDict(pair.value)
else:
encoded_value = pair.value
result.set_unrecognized_field(pair.key, encoded_value, value_variant)
setattr(result, source, [])
return result
def _SafeEncodeBytes(field, value):
"""Encode the bytes in value as urlsafe base64."""
try:
if field.repeated:
result = [base64.urlsafe_b64encode(byte) for byte in value]
else:
result = base64.urlsafe_b64encode(value)
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete)
def _SafeDecodeBytes(unused_field, value):
"""Decode the urlsafe base64 value into bytes."""
try:
result = base64.urlsafe_b64decode(str(value))
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete)
def _ProcessUnknownEnums(message, encoded_message):
"""Add unknown enum values from encoded_message as unknown fields.
ProtoRPC diverges from the usual protocol buffer behavior here and
doesn't allow unknown fields. Throwing on unknown fields makes it
impossible to let servers add new enum values and stay compatible
with older clients, which isn't reasonable for us. We simply store
unrecognized enum values as unknown fields, and all is well.
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any unknown enums stored as unrecognized fields.
"""
if not encoded_message:
return message
decoded_message = json.loads(encoded_message)
for field in message.all_fields():
if (isinstance(field, messages.EnumField) and
field.name in decoded_message and
message.get_assigned_value(field.name) is None):
message.set_unrecognized_field(
field.name, decoded_message[field.name], messages.Variant.ENUM)
return message
def _ProcessUnknownMessages(message, encoded_message):
"""Store any remaining unknown fields as strings.
ProtoRPC currently ignores unknown values for which no type can be
determined (and logs a "No variant found" message). For the purposes
of reserializing, this is quite harmful (since it throws away
information). Here we simply add those as unknown fields of type
string (so that they can easily be reserialized).
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any remaining unrecognized fields saved.
"""
if not encoded_message:
return message
decoded_message = json.loads(encoded_message)
message_fields = [x.name for x in message.all_fields()] + list(
message.all_unrecognized_fields())
missing_fields = [x for x in decoded_message.keys()
if x not in message_fields]
for field_name in missing_fields:
message.set_unrecognized_field(field_name, decoded_message[field_name],
messages.Variant.STRING)
return message
RegisterFieldTypeCodec(_SafeEncodeBytes, _SafeDecodeBytes)(messages.BytesField)
# Note that these could share a dictionary, since they're keyed by
# distinct types, but it's not really worth it.
_JSON_ENUM_MAPPINGS = {}
_JSON_FIELD_MAPPINGS = {}
def _GetTypeKey(message_type, package):
"""Get the prefix for this message type in mapping dicts."""
key = message_type.definition_name()
if package and key.startswith(package + '.'):
module_name = message_type.__module__
# We normalize '__main__' to something unique, if possible.
if module_name == '__main__':
try:
file_name = sys.modules[module_name].__file__
except (AttributeError, KeyError):
pass
else:
base_name = os.path.basename(file_name)
split_name = os.path.splitext(base_name)
if len(split_name) == 1:
module_name = unicode(base_name)
else:
module_name = u'.'.join(split_name[:-1])
key = module_name + '.' + key.partition('.')[2]
return key
def AddCustomJsonEnumMapping(enum_type, python_name, json_name,
package=''):
"""Add a custom wire encoding for a given enum value.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
enum_type: (messages.Enum) An enum type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (basestring, optional) Package prefix for this enum, if
present. We strip this off the enum name in order to generate
unique keys.
"""
if not issubclass(enum_type, messages.Enum):
raise exceptions.TypecheckError(
'Cannot set JSON enum mapping for non-enum "%s"' % enum_type)
enum_name = _GetTypeKey(enum_type, package)
if python_name not in enum_type.names():
raise exceptions.InvalidDataError(
'Enum value %s not a value for type %s' % (python_name, enum_type))
field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_name, {})
_CheckForExistingMappings('enum', enum_type, python_name, json_name)
field_mappings[python_name] = json_name
def AddCustomJsonFieldMapping(message_type, python_name, json_name,
package=''):
"""Add a custom wire encoding for a given message field.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
message_type: (messages.Message) A message type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (basestring, optional) Package prefix for this message, if
present. We strip this off the message name in order to generate
unique keys.
"""
if not issubclass(message_type, messages.Message):
raise exceptions.TypecheckError(
'Cannot set JSON field mapping for '
'non-message "%s"' % message_type)
message_name = _GetTypeKey(message_type, package)
try:
_ = message_type.field_by_name(python_name)
except KeyError:
raise exceptions.InvalidDataError(
'Field %s not recognized for type %s' % (
python_name, message_type))
field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_name, {})
_CheckForExistingMappings('field', message_type, python_name, json_name)
field_mappings[python_name] = json_name
def GetCustomJsonEnumMapping(enum_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given enum, or None."""
return _FetchRemapping(enum_type.definition_name(), 'enum',
python_name=python_name, json_name=json_name,
mappings=_JSON_ENUM_MAPPINGS)
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given field, or None."""
return _FetchRemapping(message_type.definition_name(), 'field',
python_name=python_name, json_name=json_name,
mappings=_JSON_FIELD_MAPPINGS)
def _FetchRemapping(type_name, mapping_type, python_name=None, json_name=None,
mappings=None):
"""Common code for fetching a key or value from a remapping dict."""
if python_name and json_name:
raise exceptions.InvalidDataError(
'Cannot specify both python_name and json_name '
'for %s remapping' % mapping_type)
if not (python_name or json_name):
raise exceptions.InvalidDataError(
'Must specify either python_name or json_name for %s remapping' % (
mapping_type,))
field_remappings = mappings.get(type_name, {})
if field_remappings:
if python_name:
return field_remappings.get(python_name)
elif json_name:
if json_name in list(field_remappings.values()):
return [k for k in field_remappings
if field_remappings[k] == json_name][0]
return None
def _CheckForExistingMappings(mapping_type, message_type,
python_name, json_name):
"""Validate that no mappings exist for the given values."""
if mapping_type == 'field':
getter = GetCustomJsonFieldMapping
elif mapping_type == 'enum':
getter = GetCustomJsonEnumMapping
remapping = getter(message_type, python_name=python_name)
if remapping is not None and remapping != json_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, python_name, remapping))
remapping = getter(message_type, json_name=json_name)
if remapping is not None and remapping != python_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, json_name, remapping))
def _EncodeCustomFieldNames(message, encoded_value):
message_name = type(message).definition_name()
field_remappings = list(_JSON_FIELD_MAPPINGS.get(message_name, {}).items())
if field_remappings:
decoded_value = json.loads(encoded_value)
for python_name, json_name in field_remappings:
if python_name in encoded_value:
decoded_value[json_name] = decoded_value.pop(python_name)
encoded_value = json.dumps(decoded_value)
return encoded_value
def _DecodeCustomFieldNames(message_type, encoded_message):
message_name = message_type.definition_name()
field_remappings = _JSON_FIELD_MAPPINGS.get(message_name, {})
if field_remappings:
decoded_message = json.loads(encoded_message)
for python_name, json_name in list(field_remappings.items()):
if json_name in decoded_message:
decoded_message[python_name] = decoded_message.pop(json_name)
encoded_message = json.dumps(decoded_message)
return encoded_message
def _AsMessageList(msg):
"""Convert the provided list-as-JsonValue to a list."""
# This really needs to live in extra_types, but extra_types needs
# to import this file to be able to register codecs.
# TODO(craigcitro): Split out a codecs module and fix this ugly
# import.
from apitools.base.py import extra_types
def _IsRepeatedJsonValue(msg):
"""Return True if msg is a repeated value as a JsonValue."""
if isinstance(msg, extra_types.JsonArray):
return True
if isinstance(msg, extra_types.JsonValue) and msg.array_value:
return True
return False
if not _IsRepeatedJsonValue(msg):
raise ValueError('invalid argument to _AsMessageList')
if isinstance(msg, extra_types.JsonValue):
msg = msg.array_value
if isinstance(msg, extra_types.JsonArray):
msg = msg.entries
return msg
|
|
# Copyright 2015 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from oslo_db import api as db_api
_BACKEND_MAPPING = {
'sqlalchemy': 'mistral.db.v2.sqlalchemy.api',
}
IMPL = db_api.DBAPI('sqlalchemy', backend_mapping=_BACKEND_MAPPING)
def setup_db():
IMPL.setup_db()
def drop_db():
IMPL.drop_db()
# Transaction control.
def start_tx():
IMPL.start_tx()
def commit_tx():
IMPL.commit_tx()
def rollback_tx():
IMPL.rollback_tx()
def end_tx():
IMPL.end_tx()
@contextlib.contextmanager
def transaction(read_only=False):
with IMPL.transaction(read_only):
yield
def refresh(model):
IMPL.refresh(model)
# Locking.
def acquire_lock(model, id):
return IMPL.acquire_lock(model, id)
# Workbooks.
def get_workbook(name):
return IMPL.get_workbook(name)
def load_workbook(name):
"""Unlike get_workbook this method is allowed to return None."""
return IMPL.load_workbook(name)
def get_workbooks(limit=None, marker=None, sort_keys=None,
sort_dirs=None, fields=None, **kwargs):
return IMPL.get_workbooks(
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**kwargs
)
def create_workbook(values):
return IMPL.create_workbook(values)
def update_workbook(name, values):
return IMPL.update_workbook(name, values)
def create_or_update_workbook(name, values):
return IMPL.create_or_update_workbook(name, values)
def delete_workbook(name):
IMPL.delete_workbook(name)
def delete_workbooks(**kwargs):
IMPL.delete_workbooks(**kwargs)
# Workflow definitions.
def get_workflow_definition(identifier, namespace=''):
return IMPL.get_workflow_definition(identifier, namespace=namespace)
def get_workflow_definition_by_id(id):
return IMPL.get_workflow_definition_by_id(id)
def load_workflow_definition(name, namespace=''):
"""Unlike get_workflow_definition this method is allowed to return None."""
return IMPL.load_workflow_definition(name, namespace)
def get_workflow_definitions(limit=None, marker=None, sort_keys=None,
sort_dirs=None, fields=None, **kwargs):
return IMPL.get_workflow_definitions(
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**kwargs
)
def create_workflow_definition(values):
return IMPL.create_workflow_definition(values)
def update_workflow_definition(identifier, values, namespace):
return IMPL.update_workflow_definition(identifier, values, namespace)
def create_or_update_workflow_definition(name, values):
return IMPL.create_or_update_workflow_definition(name, values)
def delete_workflow_definition(identifier, namespace=''):
IMPL.delete_workflow_definition(identifier, namespace)
def delete_workflow_definitions(**kwargs):
IMPL.delete_workflow_definitions(**kwargs)
# Action definitions.
def get_action_definition_by_id(id):
return IMPL.get_action_definition_by_id(id)
def get_action_definition(name):
return IMPL.get_action_definition(name)
def load_action_definition(name):
"""Unlike get_action_definition this method is allowed to return None."""
return IMPL.load_action_definition(name)
def get_action_definitions(limit=None, marker=None, sort_keys=None,
sort_dirs=None, **kwargs):
return IMPL.get_action_definitions(
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
**kwargs
)
def create_action_definition(values):
return IMPL.create_action_definition(values)
def update_action_definition(identifier, values):
return IMPL.update_action_definition(identifier, values)
def create_or_update_action_definition(name, values):
return IMPL.create_or_update_action_definition(name, values)
def delete_action_definition(name):
return IMPL.delete_action_definition(name)
def delete_action_definitions(**kwargs):
return IMPL.delete_action_definitions(**kwargs)
# Action executions.
def get_action_execution(id):
return IMPL.get_action_execution(id)
def load_action_execution(name):
"""Unlike get_action_execution this method is allowed to return None."""
return IMPL.load_action_execution(name)
def get_action_executions(**kwargs):
return IMPL.get_action_executions(**kwargs)
def create_action_execution(values):
return IMPL.create_action_execution(values)
def update_action_execution(id, values):
return IMPL.update_action_execution(id, values)
def create_or_update_action_execution(id, values):
return IMPL.create_or_update_action_execution(id, values)
def delete_action_execution(id):
return IMPL.delete_action_execution(id)
def delete_action_executions(**kwargs):
IMPL.delete_action_executions(**kwargs)
# Workflow executions.
def get_workflow_execution(id):
return IMPL.get_workflow_execution(id)
def load_workflow_execution(name):
"""Unlike get_workflow_execution this method is allowed to return None."""
return IMPL.load_workflow_execution(name)
def get_workflow_executions(limit=None, marker=None, sort_keys=None,
sort_dirs=None, **kwargs):
return IMPL.get_workflow_executions(
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
**kwargs
)
def create_workflow_execution(values):
return IMPL.create_workflow_execution(values)
def update_workflow_execution(id, values):
return IMPL.update_workflow_execution(id, values)
def create_or_update_workflow_execution(id, values):
return IMPL.create_or_update_workflow_execution(id, values)
def delete_workflow_execution(id):
return IMPL.delete_workflow_execution(id)
def delete_workflow_executions(**kwargs):
IMPL.delete_workflow_executions(**kwargs)
def update_workflow_execution_state(**kwargs):
return IMPL.update_workflow_execution_state(**kwargs)
# Tasks executions.
def get_task_execution(id):
return IMPL.get_task_execution(id)
def load_task_execution(id):
"""Unlike get_task_execution this method is allowed to return None."""
return IMPL.load_task_execution(id)
def get_task_executions(limit=None, marker=None, sort_keys=None,
sort_dirs=None, **kwargs):
return IMPL.get_task_executions(
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
**kwargs
)
def get_completed_task_executions(**kwargs):
return IMPL.get_completed_task_executions(**kwargs)
def get_incomplete_task_executions(**kwargs):
return IMPL.get_incomplete_task_executions(**kwargs)
def get_incomplete_task_executions_count(**kwargs):
return IMPL.get_incomplete_task_executions_count(**kwargs)
def create_task_execution(values):
return IMPL.create_task_execution(values)
def update_task_execution(id, values):
return IMPL.update_task_execution(id, values)
def create_or_update_task_execution(id, values):
return IMPL.create_or_update_task_execution(id, values)
def delete_task_execution(id):
return IMPL.delete_task_execution(id)
def delete_task_executions(**kwargs):
return IMPL.delete_task_executions(**kwargs)
def update_task_execution_state(**kwargs):
return IMPL.update_task_execution_state(**kwargs)
# Delayed calls.
def get_delayed_calls_to_start(time, batch_size=None):
return IMPL.get_delayed_calls_to_start(time, batch_size)
def create_delayed_call(values):
return IMPL.create_delayed_call(values)
def delete_delayed_call(id):
return IMPL.delete_delayed_call(id)
def update_delayed_call(id, values, query_filter=None):
return IMPL.update_delayed_call(id, values, query_filter)
def get_delayed_call(id):
return IMPL.get_delayed_call(id)
def get_delayed_calls(**kwargs):
return IMPL.get_delayed_calls(**kwargs)
def delete_delayed_calls(**kwargs):
return IMPL.delete_delayed_calls(**kwargs)
# Cron triggers.
def get_cron_trigger(identifier):
return IMPL.get_cron_trigger(identifier)
def get_cron_trigger_by_id(id):
return IMPL.get_cron_trigger_by_id(id)
def load_cron_trigger(identifier):
"""Unlike get_cron_trigger this method is allowed to return None."""
return IMPL.load_cron_trigger(identifier)
def get_cron_triggers(**kwargs):
return IMPL.get_cron_triggers(**kwargs)
def get_next_cron_triggers(time):
return IMPL.get_next_cron_triggers(time)
def get_expired_executions(expiration_time, limit=None, columns=(),
session=None):
return IMPL.get_expired_executions(
expiration_time,
limit,
columns
)
def get_superfluous_executions(max_finished_executions, limit=None, columns=(),
session=None):
return IMPL.get_superfluous_executions(
max_finished_executions,
limit,
columns
)
def create_cron_trigger(values):
return IMPL.create_cron_trigger(values)
def update_cron_trigger(identifier, values, query_filter=None):
return IMPL.update_cron_trigger(identifier, values,
query_filter=query_filter)
def create_or_update_cron_trigger(identifier, values):
return IMPL.create_or_update_cron_trigger(identifier, values)
def delete_cron_trigger(identifier):
return IMPL.delete_cron_trigger(identifier)
def delete_cron_triggers(**kwargs):
return IMPL.delete_cron_triggers(**kwargs)
# Environments.
def get_environment(name):
return IMPL.get_environment(name)
def load_environment(name):
"""Unlike get_environment this method is allowed to return None."""
return IMPL.load_environment(name)
def get_environments(limit=None, marker=None, sort_keys=None,
sort_dirs=None, **kwargs):
return IMPL.get_environments(
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
**kwargs
)
def create_environment(values):
return IMPL.create_environment(values)
def update_environment(name, values):
return IMPL.update_environment(name, values)
def create_or_update_environment(name, values):
return IMPL.create_or_update_environment(name, values)
def delete_environment(name):
IMPL.delete_environment(name)
def delete_environments(**kwargs):
IMPL.delete_environments(**kwargs)
# Resource members.
def create_resource_member(values):
return IMPL.create_resource_member(values)
def get_resource_member(resource_id, res_type, member_id):
return IMPL.get_resource_member(resource_id, res_type, member_id)
def get_resource_members(resource_id, res_type):
return IMPL.get_resource_members(resource_id, res_type)
def update_resource_member(resource_id, res_type, member_id, values):
return IMPL.update_resource_member(
resource_id,
res_type,
member_id,
values
)
def delete_resource_member(resource_id, res_type, member_id):
IMPL.delete_resource_member(resource_id, res_type, member_id)
def delete_resource_members(**kwargs):
IMPL.delete_resource_members(**kwargs)
# Event triggers.
def get_event_trigger(id, insecure=False):
return IMPL.get_event_trigger(id, insecure)
def load_event_trigger(id, insecure=False):
return IMPL.load_event_trigger(id, insecure)
def get_event_triggers(insecure=False, limit=None, marker=None, sort_keys=None,
sort_dirs=None, fields=None, **kwargs):
return IMPL.get_event_triggers(
insecure=insecure,
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**kwargs
)
def create_event_trigger(values):
return IMPL.create_event_trigger(values)
def update_event_trigger(id, values):
return IMPL.update_event_trigger(id, values)
def delete_event_trigger(id):
return IMPL.delete_event_trigger(id)
def delete_event_triggers(**kwargs):
return IMPL.delete_event_triggers(**kwargs)
# Locks.
def create_named_lock(name):
return IMPL.create_named_lock(name)
def get_named_locks(limit=None, marker=None):
return IMPL.get_named_locks(limit=limit, marker=marker)
def delete_named_lock(lock_id):
return IMPL.delete_named_lock(lock_id)
@contextlib.contextmanager
def named_lock(name):
with IMPL.named_lock(name):
yield
|
|
#!/usr/bin/env python
"""A command line client for Google Tasks
It's done by following the tutorials in Google Developers:
https://developers.google.com/google-apps/tasks/quickstart/python.
In order to use this script, please look at the "Using scripts involve Google
App API" section of the sorno-py-scripts README (can be found in
https://github.com/hermantai/sorno-scripts/tree/master/sorno-py-scripts). The
API needed for this script is "Tasks API" with the scope
'https://www.googleapis.com/auth/tasks'.
Examples:
To print tasks for all of your task lists:
$ sorno_gtasks.py get_tasks
To print task only for your task list "list1" and "list2":
$ sorno_gtasks.py get_tasks list1 list2
Copyright 2014 Heung Ming Tai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import httplib2
import logging
import os
import pprint
import re
import subprocess
import sys
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client import tools
from sorno import consoleutil
from sorno import loggingutil
from sorno import stringutil
# The oauth scope needed for Google Tasks API
OAUTH_SCOPE = 'https://www.googleapis.com/auth/tasks'
# The file path that stores the access token returned by Google from oauth
# authentication
CREDENTIALS_FILE = os.path.expanduser("~/.sorno_gtasks-google-drive-api.cred")
_log = logging.getLogger()
_plain_logger = None # will be created in main()
_plain_error_logger = None # will be created in main()
class GoogleTasksConsoleApp(object):
"""The controller of the sorno_gtasks script"""
# error codes
EXIT_CODE_USER_ABORT = 2
EXIT_CODE_USER_INPUT_ERROR = 3
def __init__(
self,
):
self.tasks_service = None
def auth(self, flags, use_credentials_cache=True):
"""
Authenticates either by an existing credentials or by prompting the
user to grant permissions. If succeeds, set self.tasks_service to the
service client that can call tasks api. Otherwise, it aborts the
script.
Args:
flags (argparse.Namespace): The flags for this script.
use_credentials_cache (Optional[bool]): If true, uses the
credentials stored in ``CREDENTIALS_FILE``.
"""
# Copy your credentials from the console
client_id = os.getenv('GOOGLE_APP_PROJECT_CLIENT_ID')
client_secret = os.getenv('GOOGLE_APP_PROJECT_CLIENT_SECRET')
if not client_id:
_log.info(
"Please set the environment variable"
" GOOGLE_APP_PROJECT_CLIENT_ID"
)
sys.exit(1)
if not client_secret:
_log.info(
"Please set the environment variable"
" GOOGLE_APP_PROJECT_CLIENT_SECRET"
)
sys.exit(1)
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(
client_id,
client_secret,
OAUTH_SCOPE,
)
# Indicate we need the user to grant us permissions and give the auth
# code or not
need_get_code = True
storage = Storage(CREDENTIALS_FILE)
if os.path.exists(CREDENTIALS_FILE) and use_credentials_cache:
credentials = storage.get()
_log.debug("Use old credentials")
need_get_code = False
if need_get_code:
credentials = tools.run_flow(flow, storage, flags)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
self.tasks_service = build('tasks', 'v1', http=http)
def copy_tasks_action(self, args):
self.auth(args, use_credentials_cache=args.use_credentials_cache)
# ask the user to choose a task list that contains the tasks
tasklists = self.get_tasklists()
for index, tasklist in enumerate(tasklists, 1):
print(
"{0}) {1} (id: {2})".format(
index,
tasklist['title'],
tasklist['id'],
)
)
ans = consoleutil.input(
"Please choose the list that contains the tasks: "
)
intvs = consoleutil.parse_intervals(ans)
list_number = intvs[0].start
chosen_tasklist = tasklists[list_number - 1]
_plain_logger.info("")
# ask the user to choose the tasks from the chosen task list
tasks = self.get_tasks_from_tasklist(chosen_tasklist['id'])
_plain_logger.info(
"Tasklist [%s] has the following tasks:",
chosen_tasklist['title'],
)
self._print_tasks_with_ids(tasks)
ans = consoleutil.input(
"Please choose the tasks that you want to copy over: "
)
intvs = consoleutil.parse_intervals(ans)
chosen_tasks = []
for intv in intvs:
chosen_tasks.extend(
# the start and end in interals are one-based, so need to
# reduce them each by 1
tasks[intv.start - 1:intv.end]
)
dups = self._get_duplicated_items(chosen_tasks)
if dups:
_plain_logger.error(
"The following tasks are chosen more than once:"
)
self._print_tasks_with_ids(dups, ref=tasks)
return GoogleTasksConsoleApp.EXIT_CODE_USER_INPUT_ERROR
_plain_logger.info("Chosen tasks:")
self._print_tasks_with_ids(chosen_tasks, ref=tasks)
_plain_logger.info("")
# ask the user to choose the destination task list
ans = consoleutil.input(
"Please choose the destination task list:"
)
intvs = consoleutil.parse_intervals(ans)
dest_list_number = intvs[0].start
chosen_dest_tasklist = tasklists[dest_list_number - 1]
if chosen_tasklist == chosen_dest_tasklist:
_plain_error_logger.error(
"Source and destination lists cannot be the same."
)
return GoogleTasksConsoleApp.EXIT_CODE_USER_INPUT_ERROR
_plain_logger.info("")
# ask the user to confirm
confirm_msg = re.sub(
r"\s+",
" ",
"""
Are you sure you want to copy the chosen tasks from task list
[{0}] to task list [{1}]
""".strip().format(
chosen_tasklist['title'],
chosen_dest_tasklist['title'],
),
)
if not consoleutil.confirm(confirm_msg):
_plain_error_logger.error("Aborted")
return GoogleTasksConsoleApp.EXIT_CODE_USER_ABORT
# copy the tasks
for task in chosen_tasks:
self.insert_task(
chosen_dest_tasklist['id'],
task,
)
return 0
def insert_task(self, tasklist_id, task):
"""Inserts a task.
Args:
tasklist_id (str): The tasklist that contains the task.
task (dict): The representation of a task. See
https://developers.google.com/google-apps/tasks/v1/reference/tasks#resource-representations.
"""
t = task.copy()
t.pop('id', ' ')
self.tasks_service.tasks().insert(
tasklist=tasklist_id,
body=t,
).execute()
def delete_task(self, tasklist_id, task_id):
"""Deletes a task.
Args:
tasklist_id (str): The id of the tasklist that contains the task.
task_id (str): The id of the task inside the tasklist that is to
be deleted.
"""
self.tasks_service.tasks().delete(
tasklist=tasklist_id,
task=task_id,
).execute()
def delete_tasks_action(self, args):
self.auth(args, use_credentials_cache=args.use_credentials_cache)
# ask the user to choose a task list that contains the tasks
tasklists = self.get_tasklists()
for index, tasklist in enumerate(tasklists, 1):
print(
"{0}) {1} (id: {2})".format(
index,
tasklist['title'],
tasklist['id'],
)
)
ans = consoleutil.input(
"Please choose the list that contains the tasks: "
)
intvs = consoleutil.parse_intervals(ans)
list_number = intvs[0].start
chosen_tasklist = tasklists[list_number - 1]
_plain_logger.info("")
# ask the user to choose the tasks from the chosen task list
tasks = self.get_tasks_from_tasklist(chosen_tasklist['id'])
_plain_logger.info(
"Tasklist [%s] has the following tasks:",
chosen_tasklist['title'],
)
self._print_tasks_with_ids(tasks)
ans = consoleutil.input(
"Please choose the tasks that you want to delete: "
)
intvs = consoleutil.parse_intervals(ans)
chosen_tasks = []
for intv in intvs:
chosen_tasks.extend(
# the start and end in interals are one-based, so need to
# reduce them each by 1
tasks[intv.start - 1:intv.end]
)
dups = self._get_duplicated_items(chosen_tasks)
if dups:
_plain_logger.error(
"The following tasks are chosen more than once:"
)
self._print_tasks_with_ids(dups, ref=tasks)
return GoogleTasksConsoleApp.EXIT_CODE_USER_INPUT_ERROR
_plain_logger.info("Chosen tasks:")
self._print_tasks_with_ids(chosen_tasks, ref=tasks)
_plain_logger.info("")
# ask the user to confirm
confirm_msg = re.sub(
r"\s+",
" ",
"""
Are you sure you want to delete the chosen tasks from task list?
[{0}]
""".strip().format(
chosen_tasklist['title'],
),
)
if not consoleutil.confirm(confirm_msg):
_plain_error_logger.error("Aborted")
return GoogleTasksConsoleApp.EXIT_CODE_USER_ABORT
# delete the tasks
for task in chosen_tasks:
self.delete_task(chosen_tasklist['id'], task['id'])
return 0
def _print_tasks_with_ids(self, tasks, ref=None):
"""Print tasks along with their id's.
Args:
tasks (sequence[task]): Tasks to be printed out.
ref (Optional[sequence[task]]): By default, this method simply
prints the sequence numbers of the tasks according to the
position of the task in the given tasks (one-based). If ref is
provided, the position of the task in ref is used instead
(one-based). If the task cannot be found in ref, ValueError is
thrown.
Raises:
ValueError: If ref is given but a task cannot be found in ref.
"""
if ref:
for task in tasks:
index = ref.index(task)
if index == -1:
raise ValueError(
"Task [{0}] cannot be found in {1}".format(
task,
ref,
)
)
print(
"{0}) {1} (id: {2})".format(
index + 1,
task['title'],
task['id'],
)
)
else:
for index, task in enumerate(tasks, 1):
print(
"{0}) {1} (id: {2})".format(
index,
task['title'],
task['id'],
)
)
def _get_duplicated_items(self, items):
seen = []
duplicated = []
for item in items:
# TODO(htaihm): optimize this
if item in seen:
duplicated.append(item)
else:
seen.append(item)
return duplicated
def get_tasks_action(self, args):
"""Handle the subcommand get_tasks
Print out the tasks for the task lists specified from the flags of the
script.
Args:
args (argparse.Namespace): The flags of the script.
"""
self.auth(args, use_credentials_cache=args.use_credentials_cache)
tasklists_names = args.tasklist or []
tasklists = self.get_tasklists()
tasklists_to_show = []
tasklists_map = {
tasklist['title']: tasklist for tasklist in tasklists
}
if not tasklists_names:
# assume all the task lists if no task lists are provided
tasklists_to_show.extend(tasklists)
else:
for tasklist_name in tasklists_names:
if tasklist_name not in tasklists_map:
_plain_error_logger.error(
"Task list [%s] does not exist. Avaliable task lists"
" are:",
tasklist_name,
)
for index, tasklist in enumerate(tasklists, 1):
if args.detail:
s = pprint.pformat(tasklist)
else:
s = tasklist['title']
_plain_logger.error("%d) %s", index, s)
return 1
tasklists_to_show.append(tasklists_map[tasklist_name])
for tasklist_to_show in tasklists_to_show:
tasklist_id = tasklist_to_show['id']
if args.detail:
s = pprint.pformat(tasklist_to_show)
else:
s = "[%s]:" % tasklist_to_show['title']
_plain_logger.info(
"Tasks for the list %s",
s,
)
tasks = self.get_tasks_from_tasklist(tasklist_id)
for index, task in enumerate(tasks, 1):
if args.detail:
s = pprint.pformat(task)
else:
try:
s = stringutil.format_with_default_value(
lambda k: "<%s:null>" % k,
args.task_format,
task,
)
except KeyError as ex:
s = "KeyError: %s, task: %s" % (
ex,
pprint.pformat(task),
)
if args.list_with_chars is not None:
_plain_logger.info("%s%s", args.list_with_chars, s)
else:
_plain_logger.info("%d) %s", index, s)
if args.show_notes:
_plain_logger.info("Notes: %s", task.get('notes', ""))
elif args.show_notes_if_presence:
if task.get('notes'):
_plain_logger.info("Notes: %s", task['notes'])
return 0
def get_tasklists(self):
"""Retrieve the task lists of the user
Returns:
A list of dictionaries each represents a Tasklist resource. The
exact representation is in
https://developers.google.com/google-apps/tasks/v1/reference/tasklists#resource-representations
Currently it is:
{
"kind": "tasks#taskList",
"id": string,
"etag": string,
"title": string,
"updated": datetime,
"selfLink": string
}
"""
results = self.tasks_service.tasklists().list().execute()
return results.get('items', [])
def get_tasks_from_tasklist(self, tasklist_id):
"""Retrieves a list of tasks for a Tasklist
Args:
tasklist_id (string): The ID of the Tasklist.
Returns:
A list of dictionaries each represents a Task resource. The exact
representation is in
https://developers.google.com/google-apps/tasks/v1/reference/tasks#resource-representations
Currently it is:
{
"kind": "tasks#task",
"id": string,
"etag": etag,
"title": string,
"updated": datetime,
"selfLink": string,
"parent": string,
"position": string,
"notes": string,
"status": string,
"due": datetime,
"completed": datetime,
"deleted": boolean,
"hidden": boolean,
"links": [
{
"type": string,
"description": string,
"link": string
}
]
}
"""
results = self.tasks_service.tasks().list(
tasklist=tasklist_id
).execute()
return results.get('items', [])
def parse_args(app_obj, cmd_args):
description = __doc__.split("Copyright 2014")[0].strip()
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser],
)
parser.add_argument(
"--no-credentials-cache",
dest="use_credentials_cache",
action="store_false",
default=True,
help="If specified, old credentials are not reused and you have to"
" follow the instruction from this script to get the code every"
" time you use this script.",
)
parser.add_argument(
"--debug",
action="store_true",
)
subparsers = parser.add_subparsers(
title="Subcommands",
description="Some description for subcommands",
)
get_tasks_description = """Print tasks for your task lists.
Examples:
To print tasks for all of your task lists:
$ sorno_gtasks.py get_tasks
To print task only for your task list "list1" and "list2":
$ sorno_gtasks.py get_tasks list1 list2
By default, get_tasks only prints the titles of your tasks. You can use
--show-notes option to print the notes as well. Use the --detail option to
show details.
Examples:
To show the details for all tasks and all task lists.
$ sorno_gtasks.py get_tasks --detail
"""
parser_get_tasks = subparsers.add_parser(
"get_tasks",
help="Print your tasks",
description=get_tasks_description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_get_tasks.add_argument(
"--show-notes",
action="store_true",
help="shows the notes for each task",
)
parser_get_tasks.add_argument(
"--show-notes-if-presence",
action="store_true",
help="shows the notes for tasks with notes",
)
parser_get_tasks.add_argument(
"--task-format",
help="The format for printing out a task, default is '%(default)s'."
" You can use --detail to get all the field names for tasks. If a key"
" specified in the format is missing, it is printed with the"
" form: <key:null>, in which the key is the actual name of the key.",
default="{title}",
)
parser_get_tasks.add_argument(
"--list-with-chars",
help="instead of numerating the tasks, use the characters specified in"
" this option instead"
)
parser_get_tasks.add_argument(
"--detail",
action="store_true",
help="see the details for all tasks and task lists",
)
parser_get_tasks.add_argument(
"tasklist",
nargs="*",
help="The tasks in which to be printed out. If not specified, "
" assume all tasks in all task lists.",
)
parser_get_tasks.set_defaults(func=app_obj.get_tasks_action)
parser_copy_tasks = subparsers.add_parser(
"copy_tasks",
description="Copy tasks from one task list to another."
" Simply follow the prompt instructions to finish the action.",
help="Copy tasks from one task list to another.",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_copy_tasks.set_defaults(func=app_obj.copy_tasks_action)
parser_delete_tasks = subparsers.add_parser(
"delete_tasks",
description="Delete tasks in a task list."
" Simply follow the prompt instructions to finish the action.",
help="Delete tasks in a task list.",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_delete_tasks.set_defaults(func=app_obj.delete_tasks_action)
args = parser.parse_args(cmd_args)
return args
def main():
global _plain_logger, _plain_error_logger
app = GoogleTasksConsoleApp()
args = parse_args(app, sys.argv[1:])
loggingutil.setup_logger(_log, debug=args.debug)
_plain_logger = loggingutil.create_plain_logger(
"PLAIN",
debug=args.debug,
)
_plain_error_logger = loggingutil.create_plain_logger(
"PLAIN_ERROR",
debug=args.debug,
stdout=False,
)
args.func(args)
if __name__ == '__main__':
main()
|
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Tuple
from twisted.web.server import Request
from synapse.http.server import HttpServer
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ReplicationUserAccountDataRestServlet(ReplicationEndpoint):
"""Add user account data on the appropriate account data worker.
Request format:
POST /_synapse/replication/add_user_account_data/:user_id/:type
{
"content": { ... },
}
"""
NAME = "add_user_account_data"
PATH_ARGS = ("user_id", "account_data_type")
CACHE = False
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.handler = hs.get_account_data_handler()
self.clock = hs.get_clock()
@staticmethod
async def _serialize_payload( # type: ignore[override]
user_id: str, account_data_type: str, content: JsonDict
) -> JsonDict:
payload = {
"content": content,
}
return payload
async def _handle_request( # type: ignore[override]
self, request: Request, user_id: str, account_data_type: str
) -> Tuple[int, JsonDict]:
content = parse_json_object_from_request(request)
max_stream_id = await self.handler.add_account_data_for_user(
user_id, account_data_type, content["content"]
)
return 200, {"max_stream_id": max_stream_id}
class ReplicationRoomAccountDataRestServlet(ReplicationEndpoint):
"""Add room account data on the appropriate account data worker.
Request format:
POST /_synapse/replication/add_room_account_data/:user_id/:room_id/:account_data_type
{
"content": { ... },
}
"""
NAME = "add_room_account_data"
PATH_ARGS = ("user_id", "room_id", "account_data_type")
CACHE = False
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.handler = hs.get_account_data_handler()
self.clock = hs.get_clock()
@staticmethod
async def _serialize_payload( # type: ignore[override]
user_id: str, room_id: str, account_data_type: str, content: JsonDict
) -> JsonDict:
payload = {
"content": content,
}
return payload
async def _handle_request( # type: ignore[override]
self, request: Request, user_id: str, room_id: str, account_data_type: str
) -> Tuple[int, JsonDict]:
content = parse_json_object_from_request(request)
max_stream_id = await self.handler.add_account_data_to_room(
user_id, room_id, account_data_type, content["content"]
)
return 200, {"max_stream_id": max_stream_id}
class ReplicationAddTagRestServlet(ReplicationEndpoint):
"""Add tag on the appropriate account data worker.
Request format:
POST /_synapse/replication/add_tag/:user_id/:room_id/:tag
{
"content": { ... },
}
"""
NAME = "add_tag"
PATH_ARGS = ("user_id", "room_id", "tag")
CACHE = False
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.handler = hs.get_account_data_handler()
self.clock = hs.get_clock()
@staticmethod
async def _serialize_payload( # type: ignore[override]
user_id: str, room_id: str, tag: str, content: JsonDict
) -> JsonDict:
payload = {
"content": content,
}
return payload
async def _handle_request( # type: ignore[override]
self, request: Request, user_id: str, room_id: str, tag: str
) -> Tuple[int, JsonDict]:
content = parse_json_object_from_request(request)
max_stream_id = await self.handler.add_tag_to_room(
user_id, room_id, tag, content["content"]
)
return 200, {"max_stream_id": max_stream_id}
class ReplicationRemoveTagRestServlet(ReplicationEndpoint):
"""Remove tag on the appropriate account data worker.
Request format:
POST /_synapse/replication/remove_tag/:user_id/:room_id/:tag
{}
"""
NAME = "remove_tag"
PATH_ARGS = (
"user_id",
"room_id",
"tag",
)
CACHE = False
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.handler = hs.get_account_data_handler()
self.clock = hs.get_clock()
@staticmethod
async def _serialize_payload(user_id: str, room_id: str, tag: str) -> JsonDict: # type: ignore[override]
return {}
async def _handle_request( # type: ignore[override]
self, request: Request, user_id: str, room_id: str, tag: str
) -> Tuple[int, JsonDict]:
max_stream_id = await self.handler.remove_tag_from_room(
user_id,
room_id,
tag,
)
return 200, {"max_stream_id": max_stream_id}
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationUserAccountDataRestServlet(hs).register(http_server)
ReplicationRoomAccountDataRestServlet(hs).register(http_server)
ReplicationAddTagRestServlet(hs).register(http_server)
ReplicationRemoveTagRestServlet(hs).register(http_server)
|
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import rabbitmq
from salt.exceptions import CommandExecutionError
# Globals
rabbitmq.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RabbitmqTestCase(TestCase):
'''
Test cases for salt.modules.rabbitmq
'''
# 'list_users' function tests: 1
def test_list_users(self):
'''
Test if it return a list of users based off of rabbitmqctl user_list.
'''
mock_run = MagicMock(return_value='Listing users ...\nguest\t[administrator]\n')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.list_users(), {'guest': set(['administrator'])})
# 'list_users_with_warning' function tests: 1
def test_list_users_with_warning(self):
'''
Test if having a leading WARNING returns the user_list anyway.
'''
rtn_val = '\n'.join([
'WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- location has moved to /etc/rabbitmq/rabbitmq-env.conf',
'Listing users ...',
'guest\t[administrator]\n',
])
mock_run = MagicMock(return_value=rtn_val)
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.list_users(), {'guest': set(['administrator'])})
# 'list_vhosts' function tests: 1
def test_list_vhosts(self):
'''
Test if it return a list of vhost based on rabbitmqctl list_vhosts.
'''
mock_run = MagicMock(return_value='...\nsaltstack\n...')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertListEqual(rabbitmq.list_vhosts(), ['...', 'saltstack', '...'])
# 'user_exists' function tests: 2
def test_user_exists_negative(self):
'''
Negative test of whether rabbitmq-internal user exists based
on rabbitmqctl list_users.
'''
mock_run = MagicMock(return_value='Listing users ...\nsaltstack\t[administrator]\n...done')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertFalse(rabbitmq.user_exists('rabbit_user'))
def test_user_exists(self):
'''
Test whether a given rabbitmq-internal user exists based
on rabbitmqctl list_users.
'''
mock_run = MagicMock(return_value='Listing users ...\nsaltstack\t[administrator]\n...done')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertTrue(rabbitmq.user_exists('saltstack'))
# 'vhost_exists' function tests: 1
def test_vhost_exists(self):
'''
Test if it return whether the vhost exists based
on rabbitmqctl list_vhosts.
'''
mock_run = MagicMock(return_value='Listing vhosts ...\nsaltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertTrue(rabbitmq.vhost_exists('saltstack'))
# 'add_user' function tests: 1
def test_add_user(self):
'''
Test if it add a rabbitMQ user via rabbitmqctl
user_add <user> <password>
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.add_user('saltstack'),
{'Added': 'saltstack'})
mock_run = MagicMock(return_value='Error')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
with patch.object(rabbitmq, 'clear_password',
return_value={'Error': 'Error', 'retcode': 1}):
self.assertRaises(CommandExecutionError, rabbitmq.add_user, 'saltstack')
# 'delete_user' function tests: 1
def test_delete_user(self):
'''
Test if it deletes a user via rabbitmqctl delete_user.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.delete_user('saltstack'),
{'Deleted': 'saltstack'})
# 'change_password' function tests: 1
def test_change_password(self):
'''
Test if it changes a user's password.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.change_password('saltstack',
'salt@123'),
{'Password Changed': 'saltstack'})
# 'clear_password' function tests: 1
def test_clear_password(self):
'''
Test if it removes a user's password.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.clear_password('saltstack'),
{'Password Cleared': 'saltstack'})
# 'add_vhost' function tests: 1
def test_add_vhost(self):
'''
Test if it adds a vhost via rabbitmqctl add_vhost.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.add_vhost('saltstack'),
{'Added': 'saltstack'})
# 'delete_vhost' function tests: 1
def test_delete_vhost(self):
'''
Test if it deletes a vhost rabbitmqctl delete_vhost.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.delete_vhost('saltstack'),
{'Deleted': 'saltstack'})
# 'set_permissions' function tests: 1
def test_set_permissions(self):
'''
Test if it sets permissions for vhost via rabbitmqctl set_permissions.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.set_permissions('myvhost', 'myuser'),
{'Permissions Set': 'saltstack'})
# 'list_user_permissions' function tests: 1
def test_list_user_permissions(self):
'''
Test if it list permissions for a user
via rabbitmqctl list_user_permissions.
'''
mock_run = MagicMock(return_value='Listing stuff ...\nsaltstack\tsaltstack\n...done')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.list_user_permissions('myuser'),
{'saltstack': ['saltstack']})
# 'set_user_tags' function tests: 1
def test_set_user_tags(self):
'''
Test if it add user tags via rabbitmqctl set_user_tags.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.set_user_tags('myadmin', 'admin'),
{'Tag(s) set': 'saltstack'})
# 'status' function tests: 1
def test_status(self):
'''
Test if it return rabbitmq status.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertEqual(rabbitmq.status(), 'saltstack')
# 'cluster_status' function tests: 1
def test_cluster_status(self):
'''
Test if it return rabbitmq cluster_status.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertEqual(rabbitmq.cluster_status(), 'saltstack')
# 'join_cluster' function tests: 1
def test_join_cluster(self):
'''
Test if it join a rabbit cluster.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.join_cluster('rabbit.example.com'),
{'Join': 'saltstack'})
# 'stop_app' function tests: 1
def test_stop_app(self):
'''
Test if it stops the RabbitMQ application,
leaving the Erlang node running.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertEqual(rabbitmq.stop_app(), 'saltstack')
# 'start_app' function tests: 1
def test_start_app(self):
'''
Test if it start the RabbitMQ application.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertEqual(rabbitmq.start_app(), 'saltstack')
# 'reset' function tests: 1
def test_reset(self):
'''
Test if it return a RabbitMQ node to its virgin state
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertEqual(rabbitmq.reset(), 'saltstack')
# 'force_reset' function tests: 1
def test_force_reset(self):
'''
Test if it forcefully Return a RabbitMQ node to its virgin state
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertEqual(rabbitmq.force_reset(), 'saltstack')
# 'list_queues' function tests: 1
def test_list_queues(self):
'''
Test if it returns queue details of the / virtual host
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertEqual(rabbitmq.list_queues(), 'saltstack')
# 'list_queues_vhost' function tests: 1
def test_list_queues_vhost(self):
'''
Test if it returns queue details of specified virtual host.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertEqual(rabbitmq.list_queues_vhost('consumers'),
'saltstack')
# 'list_policies' function tests: 1
def test_list_policies(self):
'''
Test if it return a dictionary of policies nested by vhost
and name based on the data returned from rabbitmqctl list_policies.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.list_policies(), {})
# 'set_policy' function tests: 1
def test_set_policy(self):
'''
Test if it set a policy based on rabbitmqctl set_policy.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.set_policy('/', 'HA', '.*',
'{"ha-mode": "all"}'),
{'Set': 'saltstack'})
# 'delete_policy' function tests: 1
def test_delete_policy(self):
'''
Test if it delete a policy based on rabbitmqctl clear_policy.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertDictEqual(rabbitmq.delete_policy('/', 'HA'),
{'Deleted': 'saltstack'})
# 'policy_exists' function tests: 1
def test_policy_exists(self):
'''
Test if it return whether the policy exists
based on rabbitmqctl list_policies.
'''
mock_run = MagicMock(return_value='saltstack')
with patch.dict(rabbitmq.__salt__, {'cmd.run': mock_run}):
self.assertFalse(rabbitmq.policy_exists('/', 'HA'))
# 'plugin_is_enabled' function tests: 1
def test_plugin_is_enabled(self):
'''
Test if it return whether the plugin is enabled.
'''
mock_run = MagicMock(return_value={'retcode': 0, 'stdout': 'saltstack'})
mock_pkg = MagicMock(return_value='')
with patch.dict(rabbitmq.__salt__, {'cmd.run_all': mock_run,
'pkg.version': mock_pkg}):
self.assertTrue(rabbitmq.plugin_is_enabled('salt'))
# 'enable_plugin' function tests: 1
def test_enable_plugin(self):
'''
Test if it enable a RabbitMQ plugin via the rabbitmq-plugins command.
'''
mock_run = MagicMock(return_value='saltstack')
mock_pkg = MagicMock(return_value='')
with patch.dict(rabbitmq.__salt__, {'cmd.run_all': mock_run,
'pkg.version': mock_pkg}):
self.assertDictEqual(rabbitmq.enable_plugin('salt'),
{'Enabled': 'saltstack'})
# 'disable_plugin' function tests: 1
def test_disable_plugin(self):
'''
Test if it disable a RabbitMQ plugin via the rabbitmq-plugins command.
'''
mock_run = MagicMock(return_value='saltstack')
mock_pkg = MagicMock(return_value='')
with patch.dict(rabbitmq.__salt__, {'cmd.run_all': mock_run,
'pkg.version': mock_pkg}):
self.assertDictEqual(rabbitmq.disable_plugin('salt'),
{'Disabled': 'saltstack'})
if __name__ == '__main__':
from integration import run_tests
run_tests(RabbitmqTestCase, needs_daemon=False)
|
|
# -*- coding: utf-8 -*-
import logging
import weakref
from collections import namedtuple
from typing import List, Iterable, Tuple
import pandas as pd
from PyQt5.QtCore import Qt, pyqtSignal, QTimer, QObject
from PyQt5.QtWidgets import QInputDialog, QMenu
from pyqtgraph import LinearRegionItem, TextItem, AxisItem, PlotItem
from pyqtgraph.GraphicsScene.mouseEvents import MouseClickEvent
from dgp.core import OID, StateAction
_log = logging.getLogger(__name__)
LineUpdate = namedtuple('LineUpdate',
['action', 'uid', 'start', 'stop', 'label'])
class PolyAxis(AxisItem):
"""AxisItem which can display tick strings formatted for a date/time value,
or as scalar values.
Parameters
----------
orientation : str, optional
timeaxis : bool, optional
Enable the time-axis formatter, default is False
kwargs
See :class:`pyqtgraph.AxisItem` for permitted kwargs
Attributes
----------
timeaxis : bool
If True format tick strings by their date values,
If False use the default scalar formatter
See Also
--------
:class:`pyqtgraph.AxisItem`
:meth:`pyqtgraph.AxisItem.tickStrings`
:meth:`pyqtgraph.AxisItem.tickSpacing`
:meth:`pyqtgraph.AxisItem.tickValues`
"""
def __init__(self, orientation='bottom', timeaxis=False, **kwargs):
super().__init__(orientation, **kwargs)
self.timeaxis = timeaxis
# Define time-format scales for time-range <= key
self._timescales = {
pd.Timedelta(seconds=1).value: '%M:%S:%f',
pd.Timedelta(minutes=1).value: '%M:%S',
pd.Timedelta(hours=1).value: '%H:%M:%S',
pd.Timedelta(days=1).value: '%d %H:%M',
pd.Timedelta(weeks=1).value: '%m-%d %H'
}
def dateTickStrings(self, values, spacing):
"""Create formatted date strings for the tick locations specified by
values.
Parameters
----------
values : List
List of values to generate tick strings for
spacing : float
Returns
-------
List[str]
List of labels corresponding to each input value.
"""
# Select the first formatter where the scale (sec/min/hour/day etc) is
# greater than the range
fmt = next((fmt for period, fmt in sorted(self._timescales.items())
if period >= spacing), '%m-%d')
labels = []
for i, loc in enumerate(values):
try:
ts: pd.Timestamp = pd.Timestamp(loc)
except (OverflowError, ValueError, OSError):
_log.exception(f'Exception converting {loc} to date string.')
labels.append('')
continue
try:
if i == 0 and len(values) > 2:
label = ts.strftime('%d-%b-%y %H:%M:%S')
else:
label = ts.strftime(fmt)
except ValueError:
_log.warning("Timestamp conversion out-of-bounds")
label = 'OoB'
labels.append(label)
return labels
def tickStrings(self, values, scale, spacing):
"""Return the tick strings that should be placed next to ticks.
This method overrides the base implementation in :class:`AxisItem`, and
will selectively provide date formatted strings if :attr:`timeaxis` is
True. Otherwise the base method is called to provide the tick strings.
Parameters
----------
values : List
List of values to generate tick strings for
scale : Scalar
Used to specify the scale of the values, useful when the axis label
is configured to show the display as some SI fraction (e.g. milli),
the scaled display value can be properly calculated.
spacing : Scalar
Spacing between values/ticks
Returns
-------
List[str]
List of strings used to label the plot at the given values
Notes
-----
This function may be called multiple times for the same plot,
where multiple tick-levels are defined i.e. Major/Minor/Sub-Minor ticks.
The range of the values may also differ between invocations depending on
the positioning of the chart. And the spacing will be different
dependent on how the ticks were placed by the
:meth:`pyqtgraph.AxisItem.tickSpacing` method.
"""
if self.timeaxis:
return self.dateTickStrings(values, spacing)
else: # pragma: no cover
return super().tickStrings(values, scale, spacing)
class LinearSegment(LinearRegionItem):
"""Custom LinearRegionItem class used to interactively select data segments.
Parameters
----------
plot : :class:`~pyqtgraph.PlotItem` or :class:`.DgpPlotItem`
PlotItem to add the LinearSegment to
left, right : float
Initial left/right values for the segment
label : str, optional
Set the initial label text for this segment
movable : bool, optional
Set the initial movable/editable state of the LinearSegment
Attributes
----------
sigLabelChanged : :class:`~pyqt.pyqtSignal` ( :class:`str` )
Emitted when the label text of this segment has changed
sigDeleteRequested : :class:`~pyqt.pyqtSignal` ()
Emitted when a delete action is triggered for this segment
"""
sigLabelChanged = pyqtSignal(str)
sigDeleteRequested = pyqtSignal()
def __init__(self, plot: PlotItem, left, right, label=None, movable=False):
super().__init__(values=(left, right),
orientation=LinearRegionItem.Vertical,
movable=movable, brush=None, bounds=None)
self._plot = weakref.ref(plot)
self._label = TextItem(text=label or '', color=(0, 0, 0), anchor=(0, 0))
self._update_label_pos()
self.sigRegionChanged.connect(self._update_label_pos)
self._menu = QMenu()
self._menu.addAction('Remove', self.sigDeleteRequested.emit)
self._menu.addAction('Set Label', self._get_label_dlg)
plot.addItem(self)
plot.addItem(self._label)
plot.sigYRangeChanged.connect(self.y_rng_changed)
@property
def label_text(self) -> str:
"""@property Returns the current plain-text of the segment's label"""
return self._label.textItem.toPlainText()
@label_text.setter
def label_text(self, value: str):
"""Set the label text, limiting input to 10 characters"""
self._label.setText(value[:10])
self._update_label_pos()
def remove(self) -> None:
"""Remove this segment from the plot"""
self._plot().removeItem(self._label)
self._plot().removeItem(self)
try:
self._plot().sigYRangeChanged.disconnect(self.y_rng_changed)
except TypeError:
pass
def mouseClickEvent(self, ev: MouseClickEvent):
"""Intercept right-click on segment to display context menu
This click handler will check if the segments are editable (movable),
if so, right-clicks will activate a context menu, left-clicks will be
passed to the super-class to handle resizing/moving.
"""
if not self.movable:
return
elif ev.button() == Qt.RightButton and not self.moving:
ev.accept()
pos = ev.screenPos().toPoint()
self._menu.popup(pos)
else:
return super().mouseClickEvent(ev)
def y_rng_changed(self, vb, ylims): # pragma: no cover
""":class:`pyqtSlot`: Update label position on change of ViewBox y-limits"""
x = self._label.pos()[0]
y = ylims[1]
self._label.setPos(x, y)
def _update_label_pos(self):
""":class:`pyqtSlot`: Update label position to new segment/view bounds"""
x0, _ = self.getRegion()
_, y1 = self._plot().viewRange()[1]
self._label.setPos(x0, y1)
def _get_label_dlg(self): # pragma: no cover
""":class:`pyqtSlot`: Popup an Input Dialog to take user string input
Emits sigLabelChanged(str) with the result of the accepted dialog value
"""
# TODO: Assign parent or create dialog with Icon
text, result = QInputDialog.getText(None, "Enter Label", "Segment Label:",
text=self.label_text)
if result:
self.sigLabelChanged.emit(str(text).strip())
class LinearSegmentGroup(QObject):
"""Container for related LinearSegments which are linked across multiple
plots
LinearSegmentGroup encapsulates the logic required to create and update a
set of LinearSegment's across a group of plot items.
Parameters
----------
plots : Iterable of :class:`PlotItem`
Iterable object containing plots to add LinearSegments to. Must have at
least 1 item.
group : :class:`~dgp.core.OID`
Unique identifier for this LinearSegmentGroup
left, right : float
Initial left/right (x) values for the segments in this group.
label : str, optional
Optional label to display on each segment
movable : bool, optional
Set the initial movable state of the segments, default is False
Attributes
----------
sigSegmentUpdate : pyqtSignal(LineUpdate)
Qt Signal, emits a :class:`LineUpdate` object when the segment group has
been mutated (Updated/Deleted)
Notes
-----
An update timer (QTimer) is utilized to rate-limit segment update signal
emissions during resize operations. Instead of a signal being emitted for
every discrete movement/drag-resize of a segment, updates are emitted only
when the timer expires. The timer is also reset with every movement so that
updates are not triggered until the user has momentarily paused dragging, or
finished their movement.
"""
sigSegmentUpdate = pyqtSignal(object)
def __init__(self, plots: Iterable[PlotItem], uid: OID,
left: float, right: float, label: str = '',
movable: bool = False, parent: QObject = None):
super().__init__(parent=parent)
self._uid = uid
self._segments: List[LinearSegment] = []
self._label_text = label
self._updating = False
self._timer = QTimer(self)
self._timer.setInterval(50)
self._timer.timeout.connect(self._update_done)
for plot in plots:
segment = LinearSegment(plot, left, right, label=label,
movable=movable)
segment.sigRegionChanged.connect(self._update_region)
segment.sigLabelChanged.connect(self._update_label)
segment.sigDeleteRequested.connect(self.delete)
self._segments.append(segment)
@property
def left(self) -> pd.Timestamp:
return pd.to_datetime(self._segments[0].getRegion()[0])
@property
def right(self) -> pd.Timestamp:
return pd.to_datetime(self._segments[0].getRegion()[1])
@property
def region(self) -> Tuple[float, float]:
"""Return the left/right region bounds of the group"""
for segment in self._segments:
return segment.getRegion()
@property
def movable(self) -> bool:
return self._segments[0].movable
@property
def label_text(self) -> str:
return self._label_text
def set_movable(self, movable: bool):
"""Set the movable property of the segments in this group"""
for segment in self._segments:
segment.setMovable(movable)
def set_visibility(self, visible: bool):
for segment in self._segments:
segment.setVisible(visible)
segment._label.setVisible(visible)
def remove(self):
self.delete(emit=False)
def delete(self, emit=True):
"""Delete all child segments and emit a DELETE update"""
for segment in self._segments:
segment.remove()
if emit:
self.emit_update(StateAction.DELETE)
def emit_update(self, action: StateAction = StateAction.UPDATE):
"""Emit a LineUpdate object with the current segment attributes
Creates and emits a LineUpdate named-tuple with the current left and
right x-values of the segment, and the current label-text.
Parameters
----------
action : StateAction, optional
Optionally specify the action for the update, defaults to UPDATE.
Use this parameter to trigger a DELETE action for instance.
"""
update = LineUpdate(action, self._uid, self.left, self.right,
self._label_text)
self.sigSegmentUpdate.emit(update)
def _update_label(self, label: str):
"""Updates the label text on all sibling segments and emits an update"""
for segment in self._segments:
segment.label_text = label
self._label_text = label
self.emit_update(StateAction.UPDATE)
def _update_region(self, segment: LinearSegment):
"""Update sibling segments to new region bounds"""
if self._updating:
return
else:
self._updating = True
self._timer.start()
for seg in [x for x in self._segments if x is not segment]:
seg.setRegion(segment.getRegion())
self._updating = False
def _update_done(self):
"""Emit an update object when the rate-limit timer has expired"""
self._timer.stop()
self.emit_update(StateAction.UPDATE)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-31"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-31"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-03-31"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-31"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2021_03_31.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> List["_models.PrivateEndpointConnection"]:
"""List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection, or the result of cls(response)
:rtype: list[~azure.mgmt.iothub.v2021_03_31.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[PrivateEndpointConnection]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2021_03_31.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(private_endpoint_connection, 'PrivateEndpointConnection')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> LROPoller["_models.PrivateEndpointConnection"]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
:type private_endpoint_connection:
~azure.mgmt.iothub.v2021_03_31.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2021_03_31.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> Optional["_models.PrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> LROPoller["_models.PrivateEndpointConnection"]:
"""Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2021_03_31.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
|
|
#python3
import requests
#import logging
#import json
from pprint import pprint
import re
from subprocess import Popen, PIPE
from bs4 import BeautifulSoup
import misc
#misc.py - personal info
#token = '*****'
#domen ='https://api.telegram.org/bot' + token + '/'
#login = '*****'
#psw = '*******'
#routerIp = 'http://192.168.1.1'
#mac ='*******'
#for router TPLINK-741N
#statusUrl = routerIp + '/userRpm/StatusRpm.htm'
#DisconnectUrl = routerIp + '/userRpm/StatusRpm.htm?Disconnect=Disconnect&wan=1'
#ConnectUrl = routerIp + '/userRpm/StatusRpm.htm?Connect=Connect&wan=1'
from time import sleep
import os
import sys
from datetime import datetime, date, time
import cv2
if not os.getegid() == 0:
sys.exit('Script must be run as root')
from pyA20.gpio import gpio
from pyA20.gpio import port
global led
led = port.PA8
global led_state
led_state = 0
global last_update_id
last_update_id = 0
global button
button = port.PA7
global button_state
button_state = 0
gpio.init()
gpio.setcfg(led, gpio.OUTPUT)
gpio.setcfg(button, gpio.INPUT)
def get_updates():
url = misc.domen + 'getupdates'
try:
r = requests.get(url)
return r.json()
except Exception:
save_err('get updates')
return None
def save_err(Ex):
with open('bot_except.log','at') as file:
now = datetime.now()
file.write(Ex+'_'+str(now)+'\n')
sleep(1)
def get_message():
#get message from telegram
data = get_updates()
#if OK
if data!=None and data['ok'] == True:
#check result
if len(data['result']) >0:
#last result
last_object = data['result'][-1]
#check unicum
current_update_id = last_object['update_id']
global last_update_id
if last_update_id != current_update_id:
last_update_id = current_update_id
message_text = last_object['message']['text']
chat_id = last_object['message']['chat']['id']
message = {'chat_id':chat_id, 'text':message_text}
return message
return None
return None
def send_message(chat_id, text):
url = misc.domen + 'sendmessage?chat_id={}&text={}'.format(chat_id, text)
try:
requests.get(url)
except Exception:
save_err('send message')
def send_photo(chat_id):
#get photo from webcamera
camera = cv2.VideoCapture(-1)
try:
ret, photo = camera.read()
if photo != None:
#save photo
imagePath = 'image.png'
cv2.imwrite(imagePath, photo)
del(camera)
#sendPhoto
url = misc.domen + 'sendPhoto'
data = {'chat_id': chat_id}
files = {'photo': (imagePath, open(imagePath, "rb"))}
try:
requests.post(url, data=data, files=files)
close(imagePath)
except Exception:
save_err('send photo')
send_message(chat_id, 'Err send photo')
else:
send_message(chat_id,'Photo size = 0')
except Exception:
save_err('read camera')
send_message(chat_id,'Camera err')
def get_ip():
url = "https://yandex.ru/internet/"
result = requests.get(url)
if result.status_code == 200:
html = result.content
soup = BeautifulSoup(html, 'html.parser')
ip= soup.find("li", attrs={"class": "client__item client__item_type_ipv4"}).find('div').text
return ip
return None
#try:
# with requests.Session() as sess:
# r= sess.get(misc.statusUrl, auth=(misc.login, misc.psw))
# if r.status_code==200:
# s = r.text
# posMac= s.find(misc.mac)
# posstartIp = posMac+21
# posEndIp = s.find("\"",posstartIp, posstartIp+16)
# return s[posstartIp:posEndIp:1]
#except Exception:
# save_err('get_ip')
def changeIp():
try:
requests.get(misc.DisconnectUrl, auth=(misc.login, misc.psw))
sleep(6)
requests.get(misc.ConnectUrl, auth=(misc.login, misc.psw))
sleep(6)
except Exception:
save_err('change ip')
def change_led_state():
try:
global led_state
led_state = not led_state
gpio.output(led, led_state)
except Exception:
save_err('led')
def get_button_state():
try:
global button
global button_state
button_state = gpio.input(button)
return button_state
except Exception:
save_err('button')
def ping_url(url):
p = Popen(['ping', '-c 1', url], stdout=PIPE, stderr=None)
pingline=''
while True:
line = p.stdout.readline()
pingline+=str(line)
if not line:
break
pingline = re.search(r', 0% packet loss,',pingline)
pingstatus= bool(pingline)
return pingstatus
def check_yota_connections():
activate_url = 'http://hello.yota.ru/php/go.php'
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36'}
while True:
payload = {'accept_lte':'1', 'redirurl': 'http://www.yota.ru/','city':'khab','connection_type':'light','service_id':'Sliders_Free_Temp'}
try:
inetstat = ping_url('ya.ru')
if inetstat is False:
with requests.Session() as s:
s.post(activate_url, headers=headers, timeout=(10, 10), data=payload)
except Exception:
save_err ('yota_activate_light')
def main ():
# with open('updates.json','w') as file:
# json.dump(d, file, inswnt =2, ensure_ascii = False)
print ('Bot is started')
global button_state
last_button_state=0
global led_state
loop = 0
while True:
answer = get_message()
if answer is not None:
chat_id = answer['chat_id']
text = answer['text']
if text == '/getip':
server_ip = get_ip()
if server_ip is not None:
send_message(chat_id, server_ip)
if text == '/led':
change_led_state()
send_message(chat_id,str(led_state))
if text == '/changeip':
changeIp()
server_ip = get_ip()
if server_ip is not None:
send_message(chat_id, 'new ip = ' + server_ip)
if text == '/sendphoto':
send_photo(chat_id)
if text =='/ping':
send_message(chat_id, str(ping_url('ya.ru')))
get_button_state()
if button_state != None and button_state != last_button_state:
send_message(chat_id, str(button_state))
last_button_state = button_state
print(str(button_state))
#with open('bot.log', 'at') as f:
# text = print(datetime.now())
# f.write(str(text) + '\n')
sleep(10)
# loop about 60second
loop+=1
if loop >= 6:
check_yota_connections()
loop = 0
if __name__ == "__main__":
main()
|
|
from django.contrib.gis.geoip import GeoIPException
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template import loader
from django.template import Context
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core import serializers
from django.core.urlresolvers import reverse
from django_countries import countries
from api.processors import get_event_by_id
from api.processors import get_filtered_events
from api.processors import get_approved_events
from api.processors import get_pending_events
from api.processors import get_created_events
from api.processors import get_next_or_previous
from api.processors import get_nearby_events
from web.forms.event_form import AddEventForm
from web.forms.event_form import SearchEventForm
from web.processors.event import get_initial_data
from web.processors.event import change_event_status
from web.processors.event import reject_event_status
from web.processors.event import create_or_update_event
from web.processors.user import update_user_email
from web.processors.user import get_ambassadors
from web.processors.event import get_client_ip
from web.processors.event import get_lat_lon_from_user_ip
from web.processors.event import list_countries
from web.processors.event import get_country
from web.processors.event import get_country_from_user_ip
from web.processors.event import count_approved_events_for_country
from web.processors.media import process_image
from web.processors.media import ImageSizeTooLargeException
from web.processors.media import UploadImageError
from web.decorators.events import can_edit_event
from web.decorators.events import can_moderate_event
from web.decorators.events import is_ambassador
from django.http import Http404
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
"""
Do not Query the database directly from te view.
Use a processors file within the api app, put all of your queries there and
then call your newly created function in view!!! .-Erika
"""
def index(request):
template = 'pages/index.html'
past = request.GET.get('past', 'no')
user_ip = get_client_ip(forwarded=request.META.get('HTTP_X_FORWARDED_FOR'),
remote=request.META.get('REMOTE_ADDR'))
country = get_country_from_user_ip(user_ip)
try:
lan_lon = get_lat_lon_from_user_ip(user_ip) or (58.08695, 5.58121)
except GeoIPException:
lan_lon = (58.08695, 5.58121)
ambassadors = get_ambassadors(country['country_code'])
all_countries = list_countries()
return render_to_response(
template, {
'lan_lon': lan_lon,
'country': country,
# all_countries minus two CUSTOM_COUNTRY_ENTRIES
'all_countries': all_countries[2:],
'past': past,
'ambassadors': ambassadors,
},
context_instance=RequestContext(request))
@login_required
def add_event(request):
if request.method == 'POST':
event_form = AddEventForm(data=request.POST, files=request.FILES)
if event_form.is_valid():
picture = request.FILES.get('picture', None)
event_data = {}
try:
if picture:
if picture.size > (256 * 1024):
raise ImageSizeTooLargeException('Image size too large.')
event_data['picture'] = process_image(picture)
event_data.update(event_form.cleaned_data)
event_data['creator'] = request.user
# checking if user entered a different email than in her profile
if request.user.email != event_data['user_email']:
update_user_email(request.user.id, event_data['user_email'])
event_data.pop('user_email')
event = create_or_update_event(**event_data)
t = loader.get_template('alerts/thank_you.html')
c = Context({'event': event, })
messages.info(request, t.render(c))
return HttpResponseRedirect(reverse('web.view_event', args=[event.pk, event.slug]))
except ImageSizeTooLargeException:
messages.error(request, 'The image is just a bit too big for us. '
'Please reduce your image size and try agin.')
except UploadImageError as e:
messages.error(request, e.message)
else:
event_form = AddEventForm(initial={'user_email': request.user.email})
return render_to_response("pages/add_event.html", {
'form': event_form,
}, context_instance=RequestContext(request))
@login_required
@can_edit_event
def edit_event(request, event_id):
event = get_event_by_id(event_id)
user = request.user
initial = get_initial_data(event)
initial['user_email'] = request.user.email
event_data = {}
if request.method == 'POST':
event_form = AddEventForm(data=request.POST, files=request.FILES)
else:
event_form = AddEventForm(initial=initial)
existing_picture = event.picture
if event_form.is_valid():
# picture_check works with jasny bootstrap magix
picture_check = request.POST.get('picture')
picture = request.FILES.get('picture', None)
event_data = event_form.cleaned_data
event_data['creator'] = request.user
# checking if user entered a different email than in her profile
if user.email != event_data['user_email']:
update_user_email(user.id, event_data['user_email'])
event_data.pop('user_email')
try:
if picture:
if picture.size > (256 * 1024):
raise ImageSizeTooLargeException('Image size too large.')
event_data['picture'] = process_image(picture)
elif picture_check == "nochange":
event_data['picture'] = existing_picture
else:
del event_data['picture']
create_or_update_event(event_id, **event_data)
return HttpResponseRedirect(reverse('web.view_event',
kwargs={'event_id': event.id, 'slug': event.slug}))
except ImageSizeTooLargeException:
messages.error(request, 'The image is just a bit too big for us (must be up to 256 kb). '
'Please reduce your image size and try agin.')
except UploadImageError as e:
messages.error(request, e.message)
return render_to_response(
'pages/add_event.html', {
'form': event_form,
'address': event_data.get('location', None),
'editing': True,
'picture_url': event.picture,
}, context_instance=RequestContext(request))
def view_event_by_country(request, country_code):
event_list = get_approved_events(country_code=country_code)
return render_to_response(
'pages/list_events.html', {
'event_list': event_list,
'country_code': country_code,
}, context_instance=RequestContext(request))
def view_event(request, event_id, slug):
try:
event = get_event_by_id(event_id)
except ObjectDoesNotExist as e:
raise Http404
next_event = get_next_or_previous(event, country_code=event.country)
nearby = get_nearby_events(event, limit=4)
return render_to_response(
'pages/view_event.html', {
'event': event,
'next_event': next_event,
'nearby': nearby
}, context_instance=RequestContext(request))
def view_event_by_id(request, event_id):
try:
event = get_event_by_id(event_id)
except ObjectDoesNotExist as e:
raise Http404
return redirect(view_event, event_id, event.slug)
@login_required
@is_ambassador
def list_pending_events(request, country_code):
"""
Display a list of pending events.
"""
active_page = request.GET.get('page','')
if request.user.is_staff:
event_list = get_pending_events(past=True)
event_list = sorted(event_list, key=lambda a: a.country.code)
else:
event_list = get_pending_events(country_code=country_code, past=True)
country_name = unicode(dict(countries)[country_code])
return render_to_response(
'pages/list_events.html', {
'event_list': event_list,
'status': 'pending',
'country_code': country_code,
'country_name': country_name,
'active_page': active_page
}, context_instance=RequestContext(request))
@login_required
@is_ambassador
def list_approved_events(request, country_code):
"""
Display a list of approved events.
"""
event_list = get_approved_events(country_code=country_code, past=True)
country_name = unicode(dict(countries)[country_code])
return render_to_response('pages/list_events.html', {
'event_list': event_list,
'status': 'approved',
'country_code': country_code,
'country_name': country_name
}, context_instance=RequestContext(request))
@login_required
def created_events(request):
"""
Display a list of pending events.
"""
creator = request.user
event_list = get_created_events(creator=creator, past=True)
return render_to_response(
'pages/list_user_events.html', {
'event_list': event_list,
}, context_instance=RequestContext(request))
def search_events(request):
country_code = request.GET.get('country_code', None)
if not country_code:
country_code = request.GET.get('country', None)
if not country_code:
user_ip = get_client_ip(forwarded=request.META.get('HTTP_X_FORWARDED_FOR'),
remote=request.META.get('REMOTE_ADDR'))
country = get_country(country_code, user_ip)
country_code = country['country_code']
past = request.GET.get('past', 'no')
past_events = False
if past == 'yes':
past_events = True
search_query = request.GET.get('q', '')
page = request.GET.get('page', None)
theme_filter = request.GET.get('theme', None)
audience_filter = request.GET.get('audience', None)
template = 'pages/search_events.html'
page_template = 'pages/ajax_faceted_search_events.html'
form = SearchEventForm(country_code=country_code, past_events=past, search=search_query)
events = get_filtered_events(search_query, country_code, theme_filter,audience_filter, past_events)
if request.is_ajax():
return render_to_response(
page_template,
{
'events':events,
'page': page
},
context_instance=RequestContext(request))
return render_to_response(
template,
{
'page_template': page_template,
'events': events,
'form': form,
'country': country_code,
},
context_instance=RequestContext(request))
def scoreboard(request):
template = 'pages/scoreboard.html'
counts = count_approved_events_for_country()
return render_to_response(
template, {
'counts': counts,
},
context_instance=RequestContext(request))
@login_required
@can_moderate_event
def change_status(request, event_id):
event = change_event_status(event_id)
return HttpResponseRedirect(reverse('web.view_event', args=[event_id, event.slug]))
@login_required
@can_moderate_event
def reject_status(request, event_id):
event = reject_event_status(event_id)
return HttpResponseRedirect(reverse('web.view_event', args=[event_id, event.slug]))
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import datetime
from frappe import _
import frappe
import frappe.database
import frappe.utils
from frappe.utils import cint
import frappe.utils.user
from frappe import conf
from frappe.sessions import Session, clear_sessions, delete_session
from frappe.modules.patch_handler import check_session_stopped
from frappe.translate import get_lang_code
from frappe.utils.password import check_password
from frappe.core.doctype.authentication_log.authentication_log import add_authentication_log
from urllib import quote
class HTTPRequest:
def __init__(self):
# Get Environment variables
self.domain = frappe.request.host
if self.domain and self.domain.startswith('www.'):
self.domain = self.domain[4:]
if frappe.get_request_header('X-Forwarded-For'):
frappe.local.request_ip = (frappe.get_request_header('X-Forwarded-For').split(",")[0]).strip()
elif frappe.get_request_header('REMOTE_ADDR'):
frappe.local.request_ip = frappe.get_request_header('REMOTE_ADDR')
else:
frappe.local.request_ip = '127.0.0.1'
# language
self.set_lang()
# load cookies
frappe.local.cookie_manager = CookieManager()
# set db
self.connect()
# login
frappe.local.login_manager = LoginManager()
if frappe.form_dict._lang:
lang = get_lang_code(frappe.form_dict._lang)
if lang:
frappe.local.lang = lang
self.validate_csrf_token()
# write out latest cookies
frappe.local.cookie_manager.init_cookies()
# check status
check_session_stopped()
def validate_csrf_token(self):
if frappe.local.request and frappe.local.request.method=="POST":
if not frappe.local.session.data.csrf_token \
or frappe.local.session.data.device=="mobile" \
or frappe.conf.get('ignore_csrf', None):
# not via boot
return
csrf_token = frappe.get_request_header("X-Frappe-CSRF-Token")
if not csrf_token and "csrf_token" in frappe.local.form_dict:
csrf_token = frappe.local.form_dict.csrf_token
del frappe.local.form_dict["csrf_token"]
if frappe.local.session.data.csrf_token != csrf_token:
frappe.local.flags.disable_traceback = True
frappe.throw(_("Invalid Request"), frappe.CSRFTokenError)
def set_lang(self):
from frappe.translate import guess_language
frappe.local.lang = guess_language()
def get_db_name(self):
"""get database name from conf"""
return conf.db_name
def connect(self, ac_name = None):
"""connect to db, from ac_name or db_name"""
frappe.local.db = frappe.database.Database(user = self.get_db_name(), \
password = getattr(conf,'db_password', ''))
class LoginManager:
def __init__(self):
self.user = None
self.info = None
self.full_name = None
self.user_type = None
if frappe.local.form_dict.get('cmd')=='login' or frappe.local.request.path=="/api/method/login":
self.login()
self.resume = False
# run login triggers
self.run_trigger('on_session_creation')
else:
try:
self.resume = True
self.make_session(resume=True)
self.set_user_info(resume=True)
except AttributeError:
self.user = "Guest"
self.make_session()
self.set_user_info()
def login(self):
# clear cache
frappe.clear_cache(user = frappe.form_dict.get('usr'))
self.authenticate()
self.post_login()
def post_login(self):
self.run_trigger('on_login')
self.validate_ip_address()
self.validate_hour()
self.make_session()
self.set_user_info()
def set_user_info(self, resume=False):
# set sid again
frappe.local.cookie_manager.init_cookies()
self.info = frappe.db.get_value("User", self.user,
["user_type", "first_name", "last_name", "user_image"], as_dict=1)
self.full_name = " ".join(filter(None, [self.info.first_name,
self.info.last_name]))
self.user_type = self.info.user_type
if self.info.user_type=="Website User":
frappe.local.cookie_manager.set_cookie("system_user", "no")
if not resume:
frappe.local.response["message"] = "No App"
frappe.local.response["home_page"] = get_website_user_home_page(self.user)
else:
frappe.local.cookie_manager.set_cookie("system_user", "yes")
if not resume:
frappe.local.response['message'] = 'Logged In'
frappe.local.response["home_page"] = "/desk"
if not resume:
frappe.response["full_name"] = self.full_name
# redirect information
redirect_to = frappe.cache().hget('redirect_after_login', self.user)
if redirect_to:
frappe.local.response["redirect_to"] = redirect_to
frappe.cache().hdel('redirect_after_login', self.user)
frappe.local.cookie_manager.set_cookie("full_name", self.full_name)
frappe.local.cookie_manager.set_cookie("user_id", self.user)
frappe.local.cookie_manager.set_cookie("user_image", self.info.user_image or "")
def make_session(self, resume=False):
# start session
frappe.local.session_obj = Session(user=self.user, resume=resume,
full_name=self.full_name, user_type=self.user_type)
# reset user if changed to Guest
self.user = frappe.local.session_obj.user
frappe.local.session = frappe.local.session_obj.data
self.clear_active_sessions()
def clear_active_sessions(self):
"""Clear other sessions of the current user if `deny_multiple_sessions` is not set"""
if not (cint(frappe.conf.get("deny_multiple_sessions")) or cint(frappe.db.get_system_setting('deny_multiple_sessions'))):
return
if frappe.session.user != "Guest":
clear_sessions(frappe.session.user, keep_current=True)
def authenticate(self, user=None, pwd=None):
if not (user and pwd):
user, pwd = frappe.form_dict.get('usr'), frappe.form_dict.get('pwd')
if not (user and pwd):
self.fail('Incomplete login details', user=user)
if cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_mobile_number")):
user = frappe.db.get_value("User", filters={"mobile_no": user}, fieldname="name") or user
self.check_if_enabled(user)
self.user = self.check_password(user, pwd)
def check_if_enabled(self, user):
"""raise exception if user not enabled"""
if user=='Administrator': return
if not cint(frappe.db.get_value('User', user, 'enabled')):
self.fail('User disabled or missing', user=user)
def check_password(self, user, pwd):
"""check password"""
try:
# returns user in correct case
return check_password(user, pwd)
except frappe.AuthenticationError:
self.fail('Incorrect password', user=user)
def fail(self, message, user="NA"):
frappe.local.response['message'] = message
add_authentication_log(message, user, status="Failed")
frappe.db.commit()
raise frappe.AuthenticationError
def run_trigger(self, event='on_login'):
for method in frappe.get_hooks().get(event, []):
frappe.call(frappe.get_attr(method), login_manager=self)
def validate_ip_address(self):
"""check if IP Address is valid"""
ip_list = frappe.db.get_value('User', self.user, 'restrict_ip', ignore=True)
if not ip_list:
return
ip_list = ip_list.replace(",", "\n").split('\n')
ip_list = [i.strip() for i in ip_list]
for ip in ip_list:
if frappe.local.request_ip.startswith(ip):
return
frappe.throw(_("Not allowed from this IP Address"), frappe.AuthenticationError)
def validate_hour(self):
"""check if user is logging in during restricted hours"""
login_before = int(frappe.db.get_value('User', self.user, 'login_before', ignore=True) or 0)
login_after = int(frappe.db.get_value('User', self.user, 'login_after', ignore=True) or 0)
if not (login_before or login_after):
return
from frappe.utils import now_datetime
current_hour = int(now_datetime().strftime('%H'))
if login_before and current_hour > login_before:
frappe.throw(_("Login not allowed at this time"), frappe.AuthenticationError)
if login_after and current_hour < login_after:
frappe.throw(_("Login not allowed at this time"), frappe.AuthenticationError)
def login_as_guest(self):
"""login as guest"""
self.login_as("Guest")
def login_as(self, user):
self.user = user
self.post_login()
def logout(self, arg='', user=None):
if not user: user = frappe.session.user
self.run_trigger('on_logout')
if user == frappe.session.user:
delete_session(frappe.session.sid, user=user, reason="User Manually Logged Out")
self.clear_cookies()
else:
clear_sessions(user)
def clear_cookies(self):
clear_cookies()
class CookieManager:
def __init__(self):
self.cookies = {}
self.to_delete = []
def init_cookies(self):
if not frappe.local.session.get('sid'): return
# sid expires in 3 days
expires = datetime.datetime.now() + datetime.timedelta(days=3)
if frappe.session.sid:
self.cookies["sid"] = {"value": frappe.session.sid, "expires": expires}
if frappe.session.session_country:
self.cookies["country"] = {"value": frappe.session.get("session_country")}
def set_cookie(self, key, value, expires=None):
self.cookies[key] = {"value": value, "expires": expires}
def delete_cookie(self, to_delete):
if not isinstance(to_delete, (list, tuple)):
to_delete = [to_delete]
self.to_delete.extend(to_delete)
def flush_cookies(self, response):
for key, opts in self.cookies.items():
response.set_cookie(key, quote((opts.get("value") or "").encode('utf-8')),
expires=opts.get("expires"))
# expires yesterday!
expires = datetime.datetime.now() + datetime.timedelta(days=-1)
for key in set(self.to_delete):
response.set_cookie(key, "", expires=expires)
@frappe.whitelist()
def get_logged_user():
return frappe.session.user
def clear_cookies():
if hasattr(frappe.local, "session"):
frappe.session.sid = ""
frappe.local.cookie_manager.delete_cookie(["full_name", "user_id", "sid", "user_image", "system_user"])
def get_website_user_home_page(user):
home_page_method = frappe.get_hooks('get_website_user_home_page')
if home_page_method:
home_page = frappe.get_attr(home_page_method[-1])(user)
return '/' + home_page.strip('/')
else:
return '/me'
|
|
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import OSIM.Simulation.Utils as u
fig = plt.figure()
Ix = fig.gca(projection='3d')
dUC = fig.gca(projection='3d')
dUE = fig.gca(projection='3d')
dUB = fig.gca(projection='3d')
vbic_is = 1 # TODO: !!
vbic_is_mm = 1 # TODO: !!!
#Anzeigeparamter
raster = 0.001
BMIN = 0.7
BSHOWMIN = BMIN
BMAX = 0.85
BSHOWMAX = BMAX #1*1.2
CMIN = -0.1
CSHOWMIN = CMIN
CSHOWMAX = 0.3
OFFSET = 0 # 0.025
EMIN = -0.05
EMAX = 0.3
PRINT = False
Nx = 1
NF = 1.014
NR = nr = 1.01
IS = eval("3.1E-17 * (Nx * 0.25) ** 1.025 * vbic_is * (1 + (vbic_is_mm - 1) / np.sqrt(Nx))")
ISSR = 1 ## TODO: Paramter von VBIC 1.2
UT = 0.026
VEF = 193
VER = 5.3
IKF = 0.009 * (Nx * 0.25)
IKR = 0.01 * (Nx * 0.25)
NKF = 0.5
diffh = 1e-10
FC = 0.97
AJE = -0.5
AJC = -0.5
AJS = -0.5
PC = 0.62
PS = 0.42
PE = 0.9
ME = 0.105
MC = 0.12
VRT = 0 # TODO: Paramter von VBIC 1.2
ART = 0.1# TODO: Paramter von VBIC 1.2
Udlim = 1.4
def qj(V, P, M, FC, A):
qj = 0
if A <= 0.0:
'''
//
//SPICE regional depletion capacitance model
//
'''
dvh = V - FC * P
if dvh > 0.0:
qlo = P * (1.0 - (1.0 - FC) ** (1.0 - M)) / (1.0 - M)
qhi = dvh * (1.0 - FC + 0.5 * M * dvh / P) / ((1.0 - FC) ** (1.0 + M))
else:
qlo = P * (1.0 - (1.0 - V / P) ** (1.0 - M)) / (1.0 - M)
qhi = 0.0
qj = qlo + qhi
else:
'''
//
// Single piece depletion capacitance model
//
// Based on c=1/(1-V/P)^M, with sqrt limiting to make it
// C-inf continuous (and computationally efficient), with
// added terms to make it monotonically increasing (which
// is physically incorrect, but avoids numerical problems
// and kinks near where the depletion and diffusion
// capacitances are of similar magnitude), and with appropriate
// offsets added to that qj(V=0)=0.
//
'''
dv0 = - P * FC
mv0 = np.sqrt(dv0 * dv0 + A)
vl0 = 0.5 * (dv0 - mv0) + P * FC
q0 = - P * (1.0 - vl0 / P) ** (1.0 - M) / (1.0 - M)
dv = V - P * FC
mv = np.sqrt(dv * dv + A)
vl = 0.5 * (dv - mv) + P * FC
qlo = - P * (1.0 - vl / P) ** (1.0 - M) / (1.0 - M)
qj = qlo + (1.0 - FC) ** (- M) * (V - vl + vl0) - q0
return qj
def q1(UBE, UBC):
qjbc = qj(UBC, PC, MC, FC ,AJC) #TODO ei
qjbe = qj(UBE, PE, ME, FC, AJE)
return 1 + qjbe / VER + qjbc / VEF
def q2(Itf, Itr, IKR, IKF):
return Itf / IKF + Itr / IKR
def qb(q1, q2, NKF): # TODO: gibt noch eine zweite Gleichung (siehe S. 99)
return q1 / 2 * (1 + (1 + 4 * q2) ** NKF)
def _ITF(BI, EI, CI):
if ((BI - EI) > 0) and ((BI - CI) > 0):
if(CI-EI < 0):
a = IS/(NR*UT) * (u.exp(-EI,1/(NR*UT),Udlim)*BI)
b = IS * (u.exp((BI - EI),1 /(NF * UT),Udlim) - 1.0)
return a+b
#print("BI: "+str(BI)+" CI: "+str(CI)+" EI: "+str(EI))
#print("hier1 "+str(self.sys.curNewtonIteration))
return IS * (u.exp((BI - EI),1 /(NF * UT),Udlim) - 1.0)
def _ITR( BI, EI, CI):
if ((BI - EI) > 0) and ((BI - CI) > 0):
if(CI-EI < 0):
start = IS * (u.exp((BI - 0),1 /(NR * UT),Udlim) - 1.0)
m = IS/(NR*UT) * (u.exp(-EI,1/(NR*UT),Udlim)
*BI - u.exp(BI,1/(NR*UT),Udlim))
return start + m*CI
if(CI-EI > 0):
pass
#print("BI: "+str(BI)+" CI: "+str(CI)+" EI: "+str(EI))
#print("hier2 "+str(self.sys.curNewtonIteration))
return IS * (u.exp((BI - CI),1 /(NR * UT),Udlim) - 1.0)
def _ITF_r(B,E):
return IS * (u.exp((B - E),1 /(NF * UT),Udlim) - 1.0)
def _ITR_r(B,E):
return IS * (u.exp((B - E),1 /(NR * UT),Udlim) - 1.0)
def tI_T(B,C,E):
q_1 = q1(B-E,B-C)
Itr = _ITR(B,E,C)
Itf = _ITF(B,E,C)
q_2 = q2(Itf,Itr,IKR,IKF)
q_b = qb(q_1,q_2,NKF)
n = 0
if ((B > E) and ( B > C)):
if(C < E):
q1_r = q1(B-E, B-E)
q2_r = q2(_ITF_r(B,E),_ITR_r(B,E),IKR,IKF)
q_b = qb(q1_r,q2_r,NKF)
n = 1
if(C > E):
n = 2
if(PRINT):
if(n == 0):
print("nicht uebersteuert")
if(n == 1):
print("normal uebersteurt: B:%G, C:%G, E:%G"%(B,C,E))
if(n == 2):
print("invers uebersteuert: B:%G, C:%G, E:%G"%(B,C,E))
return (Itf-Itr)/q_b
# if b < BMAX und C >CMIN
xB = np.arange(BSHOWMIN, BMAX, raster)
yC = np.arange(CMIN, CSHOWMAX, raster)
B, C = np.meshgrid(xB, yC)
I = np.zeros((len(yC),len(xB)))
dub = np.zeros((len(yC),len(xB)))
duc = np.zeros((len(yC),len(xB)))
due = np.zeros((len(yC),len(xB)))
dI = np.zeros(((len(yC),len(xB))))
for cidx, c in enumerate(yC):
for bidx,b in enumerate(xB):
current = tI_T(b, c, 0)
I[cidx][bidx] = current
db_current = tI_T(b+diffh,c,0)
dc_current = tI_T(b,c+diffh,0)
de_current = tI_T(b,c,0+diffh)
dub[cidx][bidx] = (db_current-current)/diffh # = dI_T/dub
duc[cidx][bidx] = (dc_current-current)/diffh # = dI_T/duc
due[cidx][bidx] = (de_current-current)/diffh
# ax.plot_surface(B, C, I, rstride=8, cstride=8, alpha=0.3)
Ix.plot_wireframe(B, C, I, rstride=15, cstride=3, alpha=0.3)
#b = dUB.plot_wireframe(B,C,dub,rstride=15, cstride=3, alpha=0.3)
#c = dUC.plot_wireframe(B,C,duc,rstride=15, cstride=3, alpha=0.3)
#e = dUE.plot_wireframe(B,C,due,rstride=15, cstride=3, alpha=0.3)
#ax.plot_wireframe(B, C, dI, rstride=15, cstride=3, alpha=0.3)
#cset = ax.contour(B, C, I, zdir='x', offset=BMAX, cmap=cm.coolwarm)
#cset = ax.contour(bB, bC, bI, zdir='y', offset=0.3, cmap=cm.coolwarm)
#cset = ax.contour(B, C, I, zdir='y', offset=1, cmap=cm.coolwarm)
'''
dUC.set_xlabel('B')
dUC.set_ylabel('C')
dUC.set_zlabel('duc')
dUB.set_xlabel('B')
dUB.set_ylabel('C')
dUB.set_zlabel('dub')
dUE.set_xlabel('B')
dUE.set_ylabel('C')
dUE.set_zlabel('due')
'''
Ix.set_xlabel('B')
Ix.set_xlim(BSHOWMIN, BSHOWMAX)
Ix.set_ylabel('C')
Ix.set_ylim(CMIN,CSHOWMAX)
#Ix.set_ylabel('E')
#Ix.set_ylim(EMIN, EMAX)
Ix.set_zlabel('I')
Ix.set_zlim(np.amin(I), np.amax(I))
plt.show()
|
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for migration / resize operations.
"""
import os
import re
from nova import block_device
import nova.conf
from nova import exception
from nova.virt import configdrive
from nova.virt import driver
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from compute_hyperv.i18n import _
from compute_hyperv.nova import block_device_manager
from compute_hyperv.nova import constants
from compute_hyperv.nova import imagecache
from compute_hyperv.nova import pathutils
from compute_hyperv.nova import vmops
from compute_hyperv.nova import volumeops
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class MigrationOps(object):
_ADMINISTRATIVE_SHARE_RE = re.compile(r'\\\\.*\\[a-zA-Z]\$\\.*')
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = pathutils.PathUtils()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._imagecache = imagecache.ImageCache()
self._block_dev_man = block_device_manager.BlockDeviceInfoManager()
self._migrationutils = utilsfactory.get_migrationutils()
self._metricsutils = utilsfactory.get_metricsutils()
def _move_vm_files(self, instance):
instance_path = self._pathutils.get_instance_dir(instance.name)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_path, remove_dir=True, create_dir=True)
export_path = self._pathutils.get_export_dir(
instance_dir=revert_path, create_dir=True)
# copy the given instance's files to a _revert folder, as backup.
LOG.debug("Moving instance files to a revert path: %s",
revert_path, instance=instance)
self._pathutils.move_folder_files(instance_path, revert_path)
self._pathutils.copy_vm_config_files(instance.name, export_path)
return revert_path
def _check_target_flavor(self, instance, flavor, block_device_info):
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or
instance.flavor.ephemeral_gb)
new_root_gb = flavor.root_gb
curr_root_gb = instance.flavor.root_gb
new_eph_size = flavor.ephemeral_gb
root_down = new_root_gb < curr_root_gb
ephemeral_down = new_eph_size < eph_size
booted_from_volume = self._block_dev_man.is_boot_from_volume(
block_device_info)
if root_down and not booted_from_volume:
raise exception.InstanceFaultRollback(
exception.CannotResizeDisk(
reason=_("Cannot resize the root disk to a smaller size. "
"Current size: %(curr_root_gb)s GB. Requested "
"size: %(new_root_gb)s GB.") % {
'curr_root_gb': curr_root_gb,
'new_root_gb': new_root_gb}))
# We allow having a new flavor with no ephemeral storage, in which
# case we'll just remove all the ephemeral disks.
elif ephemeral_down and new_eph_size:
reason = (_("The new flavor ephemeral size (%(flavor_eph)s) is "
"smaller than the current total ephemeral disk size: "
"%(current_eph)s.") %
dict(flavor_eph=flavor.ephemeral_gb,
current_eph=eph_size))
raise exception.InstanceFaultRollback(
exception.CannotResizeDisk(reason=reason))
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None, timeout=0,
retry_interval=0):
LOG.debug("migrate_disk_and_power_off called", instance=instance)
self._check_target_flavor(instance, flavor, block_device_info)
self._vmops.power_off(instance, timeout, retry_interval)
instance_path = self._move_vm_files(instance)
instance.system_metadata['backup_location'] = instance_path
instance.save()
self._vmops.destroy(instance, network_info,
block_device_info, destroy_disks=True,
cleanup_migration_files=False)
# return the instance's path location.
return instance_path
def confirm_migration(self, context, migration, instance, network_info):
LOG.debug("confirm_migration called", instance=instance)
revert_path = instance.system_metadata['backup_location']
export_path = self._pathutils.get_export_dir(instance_dir=revert_path)
self._pathutils.check_dir(export_path, remove_dir=True)
self._pathutils.check_dir(revert_path, remove_dir=True)
def _revert_migration_files(self, instance):
revert_path = instance.system_metadata['backup_location']
instance_path = re.sub('_revert$', '', revert_path)
# the instance dir might still exist, if the destination node kept
# the files on the original node.
self._pathutils.check_dir(instance_path, remove_dir=True)
self._pathutils.rename(revert_path, instance_path)
return instance_path
def _check_and_attach_config_drive(self, instance, vm_gen):
if configdrive.required_by(instance):
configdrive_path = self._pathutils.lookup_configdrive_path(
instance.name)
if configdrive_path:
self._vmops.attach_config_drive(instance, configdrive_path,
vm_gen)
else:
raise exception.ConfigDriveNotFound(
instance_uuid=instance.uuid)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("finish_revert_migration called", instance=instance)
instance_path = self._revert_migration_files(instance)
image_meta = self._imagecache.get_image_details(context, instance)
self._import_and_setup_vm(context, instance, instance_path, image_meta,
block_device_info)
if power_on:
self._vmops.power_on(instance, network_info=network_info)
def _merge_base_vhd(self, diff_vhd_path, base_vhd_path):
base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
os.path.basename(base_vhd_path))
try:
LOG.debug('Copying base disk %(base_vhd_path)s to '
'%(base_vhd_copy_path)s',
{'base_vhd_path': base_vhd_path,
'base_vhd_copy_path': base_vhd_copy_path})
self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_copy_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_copy_path)
LOG.debug("Merging differential disk %s into its parent.",
diff_vhd_path)
self._vhdutils.merge_vhd(diff_vhd_path)
# Replace the differential VHD with the merged one
self._pathutils.rename(base_vhd_copy_path, diff_vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_copy_path):
self._pathutils.remove(base_vhd_copy_path)
def _check_resize_vhd(self, vhd_path, vhd_info, new_size):
curr_size = vhd_info['VirtualSize']
if new_size < curr_size:
raise exception.CannotResizeDisk(
reason=_("Cannot resize the root disk to a smaller size. "
"Current size: %(curr_root_gb)s GB. Requested "
"size: %(new_root_gb)s GB.") % {
'curr_root_gb': curr_size / units.Gi,
'new_root_gb': new_size / units.Gi})
elif new_size > curr_size:
self._resize_vhd(vhd_path, new_size)
def _resize_vhd(self, vhd_path, new_size):
if vhd_path.split('.')[-1].lower() == "vhd":
LOG.debug("Getting parent disk info for disk: %s", vhd_path)
base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path)
if base_disk_path:
# A differential VHD cannot be resized. This limitation
# does not apply to the VHDX format.
self._merge_base_vhd(vhd_path, base_disk_path)
LOG.debug("Resizing disk \"%(vhd_path)s\" to new max "
"size %(new_size)s",
{'vhd_path': vhd_path, 'new_size': new_size})
self._vhdutils.resize_vhd(vhd_path, new_size)
def _check_base_disk(self, context, instance, diff_vhd_path,
src_base_disk_path):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
# If the location of the base host differs between source
# and target hosts we need to reconnect the base disk
if src_base_disk_path.lower() != base_vhd_path.lower():
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_path)
def _migrate_disks_from_source(self, migration, instance,
source_inst_dir):
source_inst_dir = self._pathutils.get_remote_path(
migration.source_compute, source_inst_dir)
source_export_path = self._pathutils.get_export_dir(
instance_dir=source_inst_dir)
if CONF.hyperv.move_disks_on_cold_migration:
# copy the files from the source node to this node's configured
# location.
inst_dir = self._pathutils.get_instance_dir(
instance.name, create_dir=True, remove_dir=True)
elif self._ADMINISTRATIVE_SHARE_RE.match(source_inst_dir):
# make sure that the source is not a remote local path.
# e.g.: \\win-srv\\C$\OpenStack\Instances\..
# CSVs, local paths, and shares are fine.
# NOTE(claudiub): get rid of the final _revert part of the path.
# rstrip can remove more than _revert, which is not desired.
inst_dir = re.sub('_revert$', '', source_inst_dir)
LOG.warning(
'Host is configured not to copy disks on cold migration, but '
'the instance will not be able to start with the remote path: '
'"%s". Only local, share, or CSV paths are acceptable.',
inst_dir)
inst_dir = self._pathutils.get_instance_dir(
instance.name, create_dir=True, remove_dir=True)
else:
# make a copy on the source node's configured location.
# strip the _revert from the source backup dir.
inst_dir = re.sub('_revert$', '', source_inst_dir)
self._pathutils.check_dir(inst_dir, create_dir=True)
export_path = self._pathutils.get_export_dir(
instance_dir=inst_dir)
self._pathutils.copy_folder_files(source_inst_dir, inst_dir)
self._pathutils.copy_dir(source_export_path, export_path)
return inst_dir
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
LOG.debug("finish_migration called", instance=instance)
instance_dir = self._migrate_disks_from_source(migration, instance,
disk_info)
# NOTE(claudiub): nova compute manager only takes into account disk
# flavor changes when passing to the driver resize_instance=True.
# we need to take into account flavor extra_specs as well.
resize_instance = (
migration.old_instance_type_id != migration.new_instance_type_id)
self._import_and_setup_vm(context, instance, instance_dir, image_meta,
block_device_info, resize_instance)
if power_on:
self._vmops.power_on(instance, network_info=network_info)
def _import_and_setup_vm(self, context, instance, instance_dir, image_meta,
block_device_info, resize_instance=False):
vm_gen = self._vmops.get_image_vm_generation(instance.uuid, image_meta)
self._import_vm(instance_dir)
self._vmops.update_vm_resources(instance, vm_gen, image_meta,
instance_dir, resize_instance)
self._volumeops.connect_volumes(block_device_info)
self._update_disk_image_paths(instance, instance_dir)
self._check_and_update_disks(context, instance, vm_gen, image_meta,
block_device_info,
resize_instance=resize_instance)
self._volumeops.fix_instance_volume_disk_paths(
instance.name, block_device_info)
self._migrationutils.realize_vm(instance.name)
# During a resize, ephemeral disks may be removed. We cannot remove
# disks from a planned vm, for which reason we have to do this after
# *realizing* it. At the same time, we cannot realize a VM before
# updating disks to use the destination paths.
ephemerals = block_device_info['ephemerals']
self._check_ephemeral_disks(instance, ephemerals, resize_instance)
self._vmops.configure_remotefx(instance, vm_gen, resize_instance)
self._vmops.configure_instance_metrics(instance.name)
def _import_vm(self, instance_dir):
snapshot_dir = self._pathutils.get_instance_snapshot_dir(
instance_dir=instance_dir)
export_dir = self._pathutils.get_export_dir(instance_dir=instance_dir)
vm_config_file_path = self._pathutils.get_vm_config_file(export_dir)
self._migrationutils.import_vm_definition(vm_config_file_path,
snapshot_dir)
# NOTE(claudiub): after the VM was imported, the VM config files are
# not necessary anymore.
self._pathutils.get_export_dir(instance_dir=instance_dir,
remove_dir=True)
def _update_disk_image_paths(self, instance, instance_path):
"""Checks if disk images have the correct path and updates them if not.
When resizing an instance, the vm is imported on the destination node
and the disk files are copied from source node. If the hosts have
different instance_path config options set, the disks are migrated to
the correct paths, but vm disk resources are not updated to point to
the new location.
"""
(disk_files, volume_drives) = self._vmutils.get_vm_storage_paths(
instance.name)
pattern = re.compile('configdrive|eph|root', re.IGNORECASE)
for disk_file in disk_files:
disk_name = os.path.basename(disk_file)
if not pattern.match(disk_name):
# skip files that do not match the pattern.
continue
expected_disk_path = os.path.join(instance_path, disk_name)
if not os.path.exists(expected_disk_path):
raise exception.DiskNotFound(location=expected_disk_path)
if expected_disk_path.lower() != disk_file.lower():
LOG.debug("Updating VM disk location from %(src)s to %(dest)s",
{'src': disk_file, 'dest': expected_disk_path,
'instance': instance})
self._vmutils.update_vm_disk_path(disk_file,
expected_disk_path,
is_physical=False)
def _check_and_update_disks(self, context, instance, vm_gen, image_meta,
block_device_info, resize_instance=False):
self._block_dev_man.validate_and_update_bdi(instance, image_meta,
vm_gen, block_device_info)
root_device = block_device_info['root_disk']
if root_device['type'] == constants.DISK:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
root_device['path'] = root_vhd_path
if not root_vhd_path:
base_vhd_path = self._pathutils.get_instance_dir(instance.name)
raise exception.DiskNotFound(location=base_vhd_path)
root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path)
src_base_disk_path = root_vhd_info.get("ParentPath")
if src_base_disk_path:
self._check_base_disk(context, instance, root_vhd_path,
src_base_disk_path)
if resize_instance:
new_size = instance.flavor.root_gb * units.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
def _check_ephemeral_disks(self, instance, ephemerals,
resize_instance=False):
instance_name = instance.name
new_eph_gb = instance.get('ephemeral_gb', 0)
ephemerals_to_remove = set()
if not ephemerals and new_eph_gb:
# No explicit ephemeral disk bdm was retrieved, yet the flavor
# provides ephemeral storage, for which reason we're adding a
# default ephemeral disk.
eph = dict(device_type='disk',
drive_addr=0,
size=new_eph_gb)
ephemerals.append(eph)
if len(ephemerals) == 1:
# NOTE(claudiub): Resize only if there is one ephemeral. If there
# are more than 1, resizing them can be problematic. This behaviour
# also exists in the libvirt driver and it has to be addressed in
# the future.
ephemerals[0]['size'] = new_eph_gb
elif new_eph_gb and sum(
eph['size'] for eph in ephemerals) != new_eph_gb:
# New ephemeral size is different from the original ephemeral size
# and there are multiple ephemerals.
LOG.warning("Cannot resize multiple ephemeral disks for instance.",
instance=instance)
for index, eph in enumerate(ephemerals):
eph_name = "eph%s" % index
existing_eph_path = self._pathutils.lookup_ephemeral_vhd_path(
instance_name, eph_name)
if not existing_eph_path and eph['size']:
eph['format'] = self._vhdutils.get_best_supported_vhd_format()
eph['path'] = self._pathutils.get_ephemeral_vhd_path(
instance_name, eph['format'], eph_name)
if not resize_instance:
# ephemerals should have existed.
raise exception.DiskNotFound(location=eph['path'])
# We cannot rely on the BlockDeviceInfoManager class to
# provide us a disk slot as it's only usable when creating
# new instances (it's not aware of the current disk address
# layout).
# There's no way in which IDE may be requested for new
# ephemeral disks (after a resize), so we'll just enforce
# SCSI for now. os-win does not currently allow retrieving
# free IDE slots.
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance.name)
ctrl_addr = self._vmutils.get_free_controller_slot(
ctrller_path)
eph['disk_bus'] = constants.CTRL_TYPE_SCSI
eph['ctrl_disk_addr'] = ctrl_addr
# create ephemerals
self._vmops.create_ephemeral_disk(instance.name, eph)
self._vmops.attach_ephemerals(instance_name, [eph])
elif eph['size'] > 0:
# ephemerals exist. resize them.
eph['path'] = existing_eph_path
eph_vhd_info = self._vhdutils.get_vhd_info(eph['path'])
self._check_resize_vhd(
eph['path'], eph_vhd_info, eph['size'] * units.Gi)
else:
eph['path'] = None
# ephemeral new size is 0, remove it.
ephemerals_to_remove.add(existing_eph_path)
if not new_eph_gb:
# The new flavor does not provide any ephemeral storage. We'll
# remove any existing ephemeral disk (default ones included).
attached_ephemerals = self._vmops.get_attached_ephemeral_disks(
instance.name)
ephemerals_to_remove |= set(attached_ephemerals)
for eph_path in ephemerals_to_remove:
self._vmutils.detach_vm_disk(instance_name, eph_path,
is_physical=False)
self._pathutils.remove(eph_path)
|
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pylab import *
import Image
from tifffile import imsave
#%matplotlib inline
# <codecell>
def addBead(r,xf,yf,zf):
'''
r - sphere size
xf - x offset
yf - y offset
zf - z offset
'''
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
#construct bead
x = r * np.outer(np.cos(u), np.sin(v)) + xf
y = r * np.outer(np.sin(u), np.sin(v)) + yf
z = r * np.outer(np.ones(np.size(u)), np.cos(v)) +zf
return x, y, z
# <codecell>
def make_plane(xoff,yoff,zoff,normal,grid_range,ref_plane='z'):
'''
xoff - x offset
yoff - y offset
zoff - z offset
normal - define plane, eg: np.array([0, 0, 1])
'''
point = np.array([0, 0, 0])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
### x_ref
if ref_plane =='x':
# xy are the base grid
z2, yy = np.meshgrid(grid_range, grid_range)
xx = (-normal[2]*z2 - normal[1]*yy - d) * 1. /normal[0] # convert to real
### y_ref
if ref_plane =='y':
# xz are the base grid
xx, z2 = np.meshgrid(grid_range, grid_range)
yy = (-normal[0]*xx - normal[2]*z2 - d) * 1. /normal[1] # convert to real
### z_ref
if ref_plane =='z':
# xy are the base grid
xx, yy = np.meshgrid(grid_range, grid_range)
z2 = (-normal[0]*xx - normal[1]*yy - d) * 1. /normal[2] # convert to real
#apply offset
xx = xx + xoff
yy = yy + yoff
zz = z2 + zoff
return xx, yy, zz
# <markdowncell>
# ### Save multiplane tiff file to be loaded into Fiji ###
# <codecell>
wkdir = '/Users/hogstrom/Dropbox (MIT)/Neuron_data/SPIM_simulation/tiff'
beadSize = .05
nBeads = 40
bcMtrx = np.random.random_integers(-3,3,size=(nBeads,3)) #bead coordinate matrix
#a=-3
#b=3
#bcMtrx = (b - a) * np.random.random((3,3)) + a
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# add multiple beads
x = np.zeros((0,100))
y = np.zeros((0,100))
z = np.zeros((0,100))
for i in range(nBeads):
x1,y1,z1 = addBead(beadSize, bcMtrx[i,0], bcMtrx[i,1], bcMtrx[i,2])
x = np.concatenate((x,x1))
y = np.concatenate((y,y1))
z = np.concatenate((z,z1))
ax.plot_surface(x1, y1, z1, rstride=4, cstride=4, color='b')
x1,y1,z1 = addBead(2, 0, 0, 0)
x = np.concatenate((x,x1))
y = np.concatenate((y,y1))
z = np.concatenate((z,z1))
ax.plot_surface(x1, y1, z1, rstride=4, cstride=4, color='b')
show()
print bcMtrx
# <codecell>
#slice_thickness = .05
slice_thickness = 2
for j in [-3,-2,-1,0,1,2,3]:
igr = bcMtrx[:,2] >= j
ilt = bcMtrx[:,2] <= j + slice_thickness
irange = igr * ilt
#z[irange]
fig = plt.figure(facecolor="k")
ax = fig.add_subplot(1,1,1, axisbg="k")
ax.plot(bcMtrx[irange,0],bcMtrx[irange,1],'o',c='w') #, backgroundcolor='b'
ax.axis([-4, 4, -4, 4])
ax.set_axis_bgcolor('r')
plt.axis('off')
fname = wkdir + '/test_' + str(j) + '.tif'
fig.savefig(fname,facecolor='k',dpi=200)
show()
plt.close()
# <markdowncell>
# ### create tif images from matrix coordinates ###
# <codecell>
def circle_around_coord(x,y,r,zero_grid):
'''
x - x position of center
y - y position of center
r - radius of circle around center
zero_grid - matrix
'''
#(x-center_x)**2 + (y-center_y)**2 <= radius**2
mtrx = zero_grid.copy()
xAx = np.arange(zero_grid.shape[0])
yAx = np.arange(zero_grid.shape[1])
dx = np.abs(xAx - x)
dy = np.abs(xAx - y)
### create matrix of euclidian distance form the point (x,y)
eucMtrx = zero_grid.copy()
for ix in range(zero_grid.shape[0]):
for iy in range(zero_grid.shape[0]):
eucMtrx[ix,iy] = np.sqrt(dx[ix]**2 + dy[iy]**2)
bCircle = eucMtrx < r # boolian matrix of the circle
mtrx[bCircle] = 1
return mtrx
# <codecell>
def circles_around_coord(xy,r,zero_grid):
'''
xy - numpy matrix of n bead coordinates (n x 2)
r - radius of circle around center
zero_grid - matrix
'''
#(x-center_x)**2 + (y-center_y)**2 <= radius**2
mtrx = zero_grid.copy()
xAx = np.arange(zero_grid.shape[0])
yAx = np.arange(zero_grid.shape[1])
for i in range(xy.shape[0]):
x = xy[i,0]
y = xy[i,1]
dx = np.abs(xAx - x)
dy = np.abs(xAx - y)
### create matrix of euclidian distance form the point (x,y)
eucMtrx = zero_grid.copy()
for ix in range(zero_grid.shape[0]):
for iy in range(zero_grid.shape[0]):
eucMtrx[ix,iy] = np.sqrt(dx[ix]**2 + dy[iy]**2)
bCircle = eucMtrx < r # boolian matrix of the circle
mtrx[bCircle] = 1
return mtrx
# <codecell>
### run circle coord
x=50
y=50
r=20
zero_grid = np.zeros((100,100))
crcMtrx = circle_around_coord(x,y,r,zero_grid)
plt.imshow(crcMtrx)
### run circles coord
frmLarge = np.array([[710, 300], [284, 300]])
zero_grid = np.zeros((994,994))
r=10
crcMtrx = circles_around_coord(frmLarge,r,zero_grid)
plt.imshow(crcMtrx)
# <markdowncell>
# ### write many tif images along multiple planes - multiframe tiff file ###
# <codecell>
slice_thickness = .05
nFrames = 7
boxMin = -3
boxMax = 3
spacing = (boxMax - boxMin + 1)/nFrames
steps = np.arange(boxMin,boxMax+1,spacing)
r=2 # pixel radius around bead center
mag = 1000/nFrames
npix = mag*nFrames
mtrx = np.zeros((nFrames,npix,npix))
for j in steps:
igr = bcMtrx[:,2] >= j
ilt = bcMtrx[:,2] <= j + slice_thickness
irange = igr * ilt
bcMtrx[irange,0]
### turn cooordinates into a matrix form
frm = bcMtrx[irange,:2]
frmLarge = frm*mag + (-boxMin * mag)
mtrx[j,frmLarge[:,0],frmLarge[:,1]] = 1
### create a circles around the center of beads
crcMtrx = circles_around_coord(frmLarge,r,zero_grid)
mtrx[j,:,:] = crcMtrx
plt.imshow(crcMtrx)
### write matrix to multiframe tiff
#image = np.zeros((32, 256, 256), 'uint16')
mtrx = np.array(mtrx, 'uint16')
fname = wkdir + '/test_multiframe.tif'
imsave(fname, mtrx)
# <markdowncell>
# ### store metadata for saved tiff images ###
# <codecell>
### write metadata
mtrx = np.array(mtrx, 'uint16')
fname = wkdir + '/test1_multiframe1.tif'
meatadata = 'ImageJ=1.49m\nimages=81\nslices=81\nunit=um\nspacing=2.0\nloop=false\n'
imsave(fname, mtrx, description=metadata)
### load metadata back in
import tifffile
with tifffile.TiffFile(fname) as tif:
data = tif.asarray()
metadata = tif[0].image_description
# <markdowncell>
# ### take 45 degree plane of 3D matrix ###
# <codecell>
mtrx = np.random.random_integers(-3,3,size=(10,10,10))
#45 degree angle hits the
mtrx[:,0,-1]
# <markdowncell>
# ###
|
|
import logging
import cffi
import cle
from sortedcontainers import SortedDict
from ..analysis import Analysis
_l = logging.getLogger(name=__name__)
class CFBlanketView:
"""
A view into the control-flow blanket.
"""
def __init__(self, cfb):
self._cfb = cfb
def __getitem__(self, item):
if isinstance(item, slice):
addr = item.start
start_addr = self._cfb.floor_addr(addr)
addr_ = start_addr
while True:
obj = self._cfb[addr_]
yield obj
addr_ += obj
# Find gaps
# TODO: finish it
raise NotImplementedError()
#
# Memory region
#
class MemoryRegion:
def __init__(self, addr, size, type_, object_, cle_region):
self.addr = addr
self.size = size
self.type = type_
self.object = object_
self.cle_region = cle_region
#
# An address can be mapped to one of the following types of object
# - Block
# - MemoryData
# - Unknown
#
class Unknown:
def __init__(self, addr, size, bytes_=None, object_=None, segment=None, section=None):
self.addr = addr
self.size = size
# Optional
self.bytes = bytes_
self.object = object_
self.segment = segment
self.section = section
if size == 0:
raise Exception("You cannot create an unknown region of size 0.")
def __repr__(self):
s = "<Unknown %#x-%#x>" % (self.addr, self.addr + self.size)
return s
class CFBlanket(Analysis):
"""
A Control-Flow Blanket is a representation for storing all instructions, data entries, and bytes of a full program.
"""
def __init__(self, cfg=None):
self._blanket = SortedDict()
self._regions = [ ]
self._init_regions()
if cfg is not None:
self._from_cfg(cfg)
else:
_l.debug("CFG is not specified. Initialize CFBlanket from the knowledge base.")
for func in self.kb.functions.values():
self.add_function(func)
def _init_regions(self):
for obj in self.project.loader.all_objects:
if isinstance(obj, cle.MetaELF):
if obj.sections:
# Enumerate sections in an ELF file
for section in obj.sections:
if section.occupies_memory:
mr = MemoryRegion(section.vaddr, section.memsize, 'TODO', obj, section)
self._regions.append(mr)
else:
raise NotImplementedError("Currently ELFs without sections are not supported. Please implement or "
"complain on GitHub.")
elif isinstance(obj, cle.PE):
if obj.sections:
for section in obj.sections:
mr = MemoryRegion(section.vaddr, section.memsize, 'TODO', obj, section)
self._regions.append(mr)
else:
raise NotImplementedError("Currently PEs without sections are not supported. Please report to "
"GitHub and provide an example binary.")
else:
if hasattr(obj, "size"):
size = obj.size
else:
size = obj.max_addr - obj.min_addr
mr = MemoryRegion(obj.min_addr, size, 'TODO', obj, None)
self._regions.append(mr)
# Sort them just in case
self._regions = list(sorted(self._regions, key=lambda x: x.addr))
@property
def regions(self):
"""
Return all memory regions.
"""
return self._regions
def floor_addr(self, addr):
try:
return next(self._blanket.irange(maximum=addr, reverse=True))
except StopIteration:
raise KeyError(addr)
def floor_item(self, addr):
key = self.floor_addr(addr)
return key, self._blanket[key]
def floor_items(self, addr=None, reverse=False):
if addr is None:
start_addr = None
else:
try:
start_addr = next(self._blanket.irange(maximum=addr, reverse=True))
except StopIteration:
start_addr = addr
for key in self._blanket.irange(minimum=start_addr, reverse=reverse):
yield key, self._blanket[key]
def ceiling_addr(self, addr):
try:
return next(self._blanket.irange(minimum=addr))
except StopIteration:
raise KeyError(addr)
def ceiling_item(self, addr):
key = self.ceiling_addr(addr)
return key, self._blanket[key]
def ceiling_items(self, addr=None, reverse=False, include_first=True):
if addr is None:
start_addr = None
else:
try:
start_addr = next(self._blanket.irange(minimum=addr))
except StopIteration:
start_addr = addr
for key in self._blanket.irange(maximum=start_addr if include_first else start_addr - 1, reverse=reverse):
yield key, self._blanket[key]
def __getitem__(self, addr):
return self._blanket[addr]
def add_obj(self, addr, obj):
"""
Adds an object `obj` to the blanket at the specified address `addr`
"""
self._blanket[addr] = obj
def add_function(self, func):
"""
Add a function `func` and all blocks of this function to the blanket.
"""
for block in func.blocks:
self.add_obj(block.addr, block)
def dbg_repr(self):
"""
The debugging representation of this CFBlanket.
:return: The debugging representation of this CFBlanket.
:rtype: str
"""
output = [ ]
for obj in self.project.loader.all_objects:
for section in obj.sections:
if section.memsize == 0:
continue
min_addr, max_addr = section.min_addr, section.max_addr
output.append("### Object %s" % repr(section))
output.append("### Range %#x-%#x" % (min_addr, max_addr))
pos = min_addr
while pos < max_addr:
try:
addr, thing = self.floor_item(pos)
output.append("%#x: %s" % (addr, repr(thing)))
if thing.size == 0: pos += 1
else: pos += thing.size
except KeyError:
pos += 1
output.append("")
return "\n".join(output)
def _from_cfg(self, cfg):
"""
Initialize CFBlanket from a CFG instance.
:param cfg: A CFG instance.
:return: None
"""
# Let's first add all functions first
for func in cfg.kb.functions.values():
self.add_function(func)
self._mark_unknowns()
def _mark_unknowns(self):
"""
Mark all unmapped regions.
:return: None
"""
for obj in self.project.loader.all_objects:
if isinstance(obj, cle.ELF):
# sections?
if obj.sections:
for section in obj.sections:
if not section.memsize or not section.vaddr:
continue
min_addr, max_addr = section.min_addr, section.max_addr
self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, section=section)
elif obj.segments:
for segment in obj.segments:
if not segment.memsize:
continue
min_addr, max_addr = segment.min_addr, segment.max_addr
self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, segment=segment)
else:
# is it empty?
_l.warning("Empty ELF object %s.", repr(obj))
elif isinstance(obj, cle.PE):
if obj.sections:
for section in obj.sections:
if not section.memsize:
continue
min_addr, max_addr = section.min_addr, section.max_addr
self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj, section=section)
else:
# is it empty?
_l.warning("Empty PE object %s.", repr(obj))
else:
min_addr, max_addr = obj.min_addr, obj.max_addr
self._mark_unknowns_core(min_addr, max_addr + 1, obj=obj)
def _mark_unknowns_core(self, min_addr, max_addr, obj=None, segment=None, section=None):
# The region should be [min_addr, max_addr)
try:
addr = self.floor_addr(min_addr)
if addr < min_addr:
raise KeyError
except KeyError:
# there is no other lower address
try:
next_addr = self.ceiling_addr(min_addr)
if next_addr >= max_addr:
raise KeyError
except KeyError:
next_addr = max_addr
size = next_addr - min_addr
if obj is None or isinstance(obj, cle.ExternObject):
bytes_ = None
else:
try:
_l.debug("Loading bytes from object %s, section %s, segmeng %s, addresss %#x.",
obj, section, segment, min_addr)
bytes_ = self.project.loader.memory.load(min_addr, size)
except KeyError:
# The address does not exist
bytes_ = None
self.add_obj(min_addr,
Unknown(min_addr, size, bytes_=bytes_, object_=obj, segment=segment, section=section)
)
addr = min_addr
while addr < max_addr:
last_addr, last_item = self.floor_item(addr)
if last_addr < min_addr:
# impossible
raise Exception('Impossible')
if last_item.size == 0:
# Make sure everything has a non-zero size
last_item_size = 1
else:
last_item_size = last_item.size
end_addr = last_addr + last_item_size
if end_addr < max_addr:
try:
next_addr = self.ceiling_addr(end_addr)
except KeyError:
next_addr = max_addr
if next_addr > end_addr:
# there is a gap
size = next_addr - end_addr
if obj is None or isinstance(obj, cle.ExternObject):
bytes_ = None
else:
try:
_l.debug("Loading bytes from object %s, section %s, segmeng %s, addresss %#x.",
obj, section, segment, next_addr)
bytes_ = self.project.loader.memory.load(next_addr, size)
except KeyError:
# The address does not exist
bytes_ = None
self.add_obj(end_addr,
Unknown(end_addr, size, bytes_=bytes_, object_=obj, segment=segment, section=section)
)
addr = next_addr
else:
addr = max_addr
from angr.analyses import AnalysesHub
AnalysesHub.register_default('CFB', CFBlanket)
AnalysesHub.register_default('CFBlanket', CFBlanket)
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Factory functions to prepare useful data.
"""
import pytz
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources import (SpecificEquityTrades,
DataFrameSource,
DataPanelSource)
from zipline.finance.trading import SimulationParameters, TradingEnvironment
from zipline.sources.test_source import create_trade
# For backwards compatibility
from zipline.data.loader import (load_from_yahoo,
load_bars_from_yahoo)
__all__ = ['load_from_yahoo', 'load_bars_from_yahoo']
def create_simulation_parameters(year=2006, start=None, end=None,
capital_base=float("1.0e5"),
num_days=None, load=None,
data_frequency='daily',
emission_rate='daily',
env=None):
"""Construct a complete environment with reasonable defaults"""
if env is None:
env = TradingEnvironment(load=load)
if start is None:
start = datetime(year, 1, 1, tzinfo=pytz.utc)
if end is None:
if num_days:
start_index = env.trading_days.searchsorted(
start)
end = env.trading_days[start_index + num_days - 1]
else:
end = datetime(year, 12, 31, tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=capital_base,
data_frequency=data_frequency,
emission_rate=emission_rate,
env=env,
)
return sim_params
def get_next_trading_dt(current, interval, env):
next_dt = pd.Timestamp(current).tz_convert(env.exchange_tz)
while True:
# Convert timestamp to naive before adding day, otherwise the when
# stepping over EDT an hour is added.
next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=env.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
if env.is_market_hours(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(env.exchange_tz)
return next_dt_utc
def create_trade_history(sid, prices, amounts, interval, sim_params, env,
source_id="test_factory"):
trades = []
current = sim_params.first_open
oneday = timedelta(days=1)
use_midnight = interval >= oneday
for price, amount in zip(prices, amounts):
if use_midnight:
trade_dt = current.replace(hour=0, minute=0)
else:
trade_dt = current
trade = create_trade(sid, price, amount, trade_dt, source_id)
trades.append(trade)
current = get_next_trading_dt(current, interval, env)
assert len(trades) == len(prices)
return trades
def create_dividend(sid, payment, declared_date, ex_date, pay_date):
div = Event({
'sid': sid,
'gross_amount': payment,
'net_amount': payment,
'payment_sid': None,
'ratio': None,
'declared_date': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
return div
def create_stock_dividend(sid, payment_sid, ratio, declared_date,
ex_date, pay_date):
return Event({
'sid': sid,
'payment_sid': payment_sid,
'ratio': ratio,
'net_amount': None,
'gross_amount': None,
'dt': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
def create_split(sid, ratio, date):
return Event({
'sid': sid,
'ratio': ratio,
'dt': date.replace(hour=0, minute=0, second=0, microsecond=0),
'type': DATASOURCE_TYPE.SPLIT,
'source_id': 'MockSplitSource'
})
def create_txn(sid, price, amount, datetime):
txn = Event({
'sid': sid,
'amount': amount,
'dt': datetime,
'price': price,
'type': DATASOURCE_TYPE.TRANSACTION,
'source_id': 'MockTransactionSource'
})
return txn
def create_commission(sid, value, datetime):
txn = Event({
'dt': datetime,
'type': DATASOURCE_TYPE.COMMISSION,
'cost': value,
'sid': sid,
'source_id': 'MockCommissionSource'
})
return txn
def create_txn_history(sid, priceList, amtList, interval, sim_params, env):
txns = []
current = sim_params.first_open
for price, amount in zip(priceList, amtList):
current = get_next_trading_dt(current, interval, env)
txns.append(create_txn(sid, price, amount, current))
current = current + interval
return txns
def create_returns_from_range(sim_params):
return pd.Series(index=sim_params.trading_days,
data=np.random.rand(len(sim_params.trading_days)))
def create_returns_from_list(returns, sim_params):
return pd.Series(index=sim_params.trading_days[:len(returns)],
data=returns)
def create_daily_trade_source(sids, sim_params, env, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
env=env,
concurrent=concurrent,
)
def create_minutely_trade_source(sids, sim_params, env, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and every minute
thereafter for each sid. Thus, two sids should result in two trades per
minute.
"""
return create_trade_source(
sids,
timedelta(minutes=1),
sim_params,
env=env,
concurrent=concurrent,
)
def create_trade_source(sids, trade_time_increment, sim_params, env,
concurrent=False):
# If the sim_params define an end that is during market hours, that will be
# used as the end of the data source
if env.is_market_hours(sim_params.period_end):
end = sim_params.period_end
# Otherwise, the last_close after the period_end is used as the end of the
# data source
else:
end = sim_params.last_close
args = tuple()
kwargs = {
'sids': sids,
'start': sim_params.first_open,
'end': end,
'delta': trade_time_increment,
'filter': sids,
'concurrent': concurrent,
'env': env,
}
source = SpecificEquityTrades(*args, **kwargs)
return source
def create_test_df_source(sim_params=None, env=None, bars='daily'):
if bars == 'daily':
freq = pd.datetools.BDay()
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
raise ValueError('%s bars not understood.' % bars)
if sim_params and bars == 'daily':
index = sim_params.trading_days
else:
if env is None:
env = TradingEnvironment()
start = pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
days = env.days_in_range(start, end)
if bars == 'daily':
index = days
if bars == 'minute':
index = pd.DatetimeIndex([], freq=freq)
for day in days:
day_index = env.market_minutes_for_day(day)
index = index.append(day_index)
x = np.arange(1, len(index) + 1)
df = pd.DataFrame(x, index=index, columns=[0])
return DataFrameSource(df), df
def create_test_panel_source(sim_params=None, env=None, source_type=None):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
if env is None:
env = TradingEnvironment()
index = env.days_in_range(start, end)
price = np.arange(0, len(index))
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'volume': volume,
'arbitrary': arbitrary},
index=index)
if source_type:
source_types = np.full(len(index), source_type)
df['type'] = source_types
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
def create_test_panel_ohlc_source(sim_params, env):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
index = env.days_in_range(start, end)
price = np.arange(0, len(index)) + 100
high = price * 1.05
low = price * 0.95
open_ = price + .1 * (price % 2 - .5)
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'high': high,
'low': low,
'open': open_,
'volume': volume,
'arbitrary': arbitrary},
index=index)
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
|
|
"""
Utility functions.
Includes:
- Logging decorators
- Logging functions log_start() and log_end()
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '8/4/15'
from collections import deque
from datetime import datetime
import logging
import logging.config
import os
import six
import time
ENTRY_MESSAGE = '{timestamp} {func_name}.begin {kvp}'
EXIT_MESSAGE = '{timestamp} {func_name}.end {dur} {kvp}'
EVENT_MESSAGE = '{timestamp} {func_name} {kvp}'
DEFAULT_LEVEL = logging.INFO
logformat = '%(levelname)s %(message)s'
g_running_nosetests = None
def get_logger(name=''):
global g_running_nosetests
if not name.startswith('doekbase'):
if name == '':
name = 'doekbase'
else:
name = 'doekbase.' + name
# If we are running in nose, nest under there so nose -v options
# can apply to us. Cache the result of checking for nose in a global var.
if g_running_nosetests is None: # haven't checked yet
g_running_nosetests = 'nose' in logging.root.manager.loggerDict
if g_running_nosetests:
name = 'nose.' + name
# create logger
logger = logging.getLogger(name)
logger.propagate = 1
return logger
def basic_config(level=logging.INFO):
log = get_logger()
log.setLevel(level)
h = logging.StreamHandler()
h.setFormatter(logging.Formatter(logformat))
log.addHandler(h)
return log
class Timer(object):
def __init__(self):
self._timings = []
self._start = time.time()
def __enter__(self):
self._start = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
t = time.time()
dt, self._start = t - self._start, t
self._timings.append(dt)
def pop(self):
return self._timings.pop()
#TODO turn example below into a test
def logged(logger, log_level=logging.INFO, log_name=None, **kw):
"""Wrap a method/function in a log start/end message.
Example usage. Given the following code::
@logged(g_log, log_name='hello')
def hello_world(self, what):
sys.stderr.write("hello {} world\n". format(what))
Then the statement ``hello_world("dumb")`` should print output like::
2015-08-26T01:02:30.123456 hello.begin
hello dumb world
2015-08-26T01:02:30.654321 hello.end
"""
def real_decorator(method, logger_name=logger.name):
# choose name for logged event
func_name = log_name or method.__name__
full_name = logger_name + '.' + func_name
# format any key/value pairs
kvp_str = format_kvp(kw, ',') if kw else ''
# create wrapper
def method_wrapper(self, *args, **kwds):
t0 = log_start(logger, full_name, level=log_level, kvp=kvp_str)
returnval = method(self, *args, **kwds)
log_end(logger, t0, full_name, level=log_level, kvp=kvp_str)
return returnval
# return wrapper
return method_wrapper
return real_decorator
def log_start(logger, func_name, level=None, fmt=None, kvp=None):
t0 = time.time()
kvp_str = format_kvp(kvp, ',') if kvp else ''
d = dict(timestamp=format_timestamp(t0),
func_name=func_name, kvp=kvp_str)
fmt = fmt or ENTRY_MESSAGE
msg = fmt.format(**d)
if level is None:
level = DEFAULT_LEVEL
logger.log(level, msg)
return t0
def log_event(logger, func_name, level=None, fmt=None, kvp=None):
t0 = time.time()
kvp_str = format_kvp(kvp, ',') if kvp else ''
d = dict(timestamp=format_timestamp(t0),
func_name=func_name, kvp=kvp_str)
fmt = fmt or EVENT_MESSAGE
msg = fmt.format(**d)
if level is None:
level = DEFAULT_LEVEL
logger.log(level, msg)
return t0
def log_end(logger, t0, func_name, level=None, fmt=None, status_code=0, kvp=None):
t1 = time.time()
kvp_str = format_kvp(kvp, ',') if kvp else ''
d = dict(timestamp=format_timestamp(t1),
func_name=func_name,
kvp=kvp_str,
dur=(t1 - t0),
status=status_code)
fmt = fmt or EXIT_MESSAGE
if level is None:
level = DEFAULT_LEVEL
logger.log(level, fmt.format(**d))
def format_kvp(d, sep):
"""Format key-value pairs as a string."""
pairs = []
for k, v in d.items():
if isinstance(v, six.string_types):
if sep in v:
v = v.replace(',', '\\,')
pairs.append((k, v))
s = sep.join(['{}={}'.format(k, v) for k, v in pairs])
return s
# Format a timestamp as an ISO8601 string.
format_timestamp = lambda t: datetime.fromtimestamp(t).isoformat()
def get_auth_token():
try:
return os.environ["KB_AUTH_TOKEN"]
except KeyError:
raise Exception(
"Missing authentication token! "
"Set KB_AUTH_TOKEN environment variable.")
# Simple performance classes
class PerfCollector(object):
"""Collector of multiple performance events.
"""
MAX_SIZE = 1000 # max number events in history
EVENT_WILDCARD = '*'
def __init__(self, namespace):
self._ns = namespace
self._history = deque(maxlen=self.MAX_SIZE)
self._cur = {}
self._make_key = lambda e, k: '{e}::{k}'.format(e=e, k=k)
self._observers = {}
self._meta = {}
def add_observer(self, event, start_fn, end_fn):
"""Add observer functions for an event.
Args:
event (str): Event name or EVENT_WILDCARD for all events.
start_fn: Function taking (event, key, timestamp) or None
end_fn: Function taking (event, PerfEvent) or None
"""
if event in self._observers:
self._observers[event].append((start_fn, end_fn))
else:
self._observers[event] = [(start_fn, end_fn)]
def _broadcast(self, event, idx, *args):
if event in self._observers:
for obs in self._observers[event]:
if obs[idx]:
obs[idx](event, *args)
if self.EVENT_WILDCARD in self._observers:
for obs in self._observers[self.EVENT_WILDCARD]:
if obs[idx]:
obs[idx](event, *args)
def set_metadata(self, meta):
self._meta = meta
def start_event(self, event, key):
timestamp = time.time()
ekey = self._make_key(event, key)
self._cur[ekey] = timestamp
self._broadcast(event, 0, key, timestamp)
def end_event(self, event, key, **meta):
timestamp = time.time()
ekey = self._make_key(event, key)
if not ekey in self._cur:
raise KeyError('No current event found for key "{}"'
.format(ekey))
t0 = self._cur[ekey]
del self._cur[ekey]
full_event = '{}.{}'.format(self._ns, event)
pevent = PerfEvent(full_event, key, t0, timestamp, meta)
for k in self._meta:
pevent.add_metadata(k, self._meta[k])
self._history.append(pevent)
self._broadcast(event, 1, pevent)
def get_last(self):
if not self._history:
return None
return self._history[-1]
def get_event(self, event, limit=0):
"""Get all performance events matching name `event`, up
to `limit` number of entries (0=all).
"""
#print('@@ events in history: {}'.format(
# [x.event for x in self._history]))
if not self._history:
result = []
else:
if event == self.EVENT_WILDCARD:
limit = len(self._history) + 1
n, result = 0, []
for i in range(len(self._history) - 1, 0, -1):
if self._history[i].event == event:
result.append(self._history[i])
n += 1
if n == limit:
break
return result
def dump(self, stream):
by_event = {}
for item in self._history:
if item.event in by_event:
by_event[item.event].append(item)
else:
by_event[item.event] = [item]
stream.write("Event Duration Metadata\n")
for event in sorted(by_event.keys()):
for item in by_event[event]:
meta_str = ' '.join(['{k}={v}'.format(k=k, v=v)
for k,v in item.metadata.items()])
stream.write("{e:30s} {d:8.3f} {m}\n".format(
e=event[:30], d=item.duration, m=meta_str))
class PerfEvent(object):
"""Single timed event.
Events can be extracted using dictionary syntax,
e.g. my_event['<key'], for any key in the metadata.
This will also work for any of the attributes, in case it's
more convenient to get at them that way.
Attributes:
event (str): Full name of event <namespace>.<event-name>
key (str): Identifying key
start(float): Start timestamp, in seconds since 1/1/1970
end (float): End timestamp, in seconds since 1/1/1970
duration (float): Duration in seconds
"""
def __init__(self, event, key, start_time, end_time, meta):
"""Ctor.
Args:
event (str): Full name of event <namespace>.<event-name>
key (str): Identifying key
start_time (float): Unix epoch seconds for start
end_time (float): Floating point time in seconds for end
meta (dict) : Additional key/value pairs
"""
self.event = event
self.key = key
self.start = start_time
self.end = end_time
self.duration = end_time - start_time
self._meta = meta
def add_metadata(self, key, value):
"""Modify the metadata by setting `value` for `key`.
"""
self._meta[key] = value
@property
def metadata(self):
"""Return a *copy* of the metadata.
"""
return self._meta.copy()
def __getitem__(self, key):
if key in ('event', 'key', 'start', 'end', 'duration'):
return getattr(self, key)
if key in self._meta:
return self._meta[key]
raise KeyError(key)
def as_dict(self):
d = self._meta.copy()
d.update({'event': self.event,
'key': self.key,
'timestamp': self.start_time,
'dur': self.duration})
return d
def collect_performance(perf_collector, prefix='', suffix=''):
def real_decorator(method):
event = prefix + method.__name__ + suffix
key = str(time.time())
# create wrapper
def method_wrapper(self, *args, **kwds):
perf_collector.start_event(event, key)
returnval = method(self, *args, **kwds)
for i, a in enumerate(args):
kwds['_{:d}'.format(i)] = str(a)
perf_collector.end_event(event, key, **kwds)
return returnval
# return wrapper
return method_wrapper
return real_decorator
def get_msgpack_object_ref(path):
"""Get object-id ref for object in messagepack-encoded file.
Args:
(str) path: Full path to file.
Returns:
(str) reference, in form A/B/C e.g. '93/111124/2'
Raises:
IOError if the file cannot be opened.
ValueError if the data in the file cannot be decoded.
KeyError if the reference field is not found in the data.
"""
import msgpack
try:
f = open(path)
except IOError as err:
raise IOError('Cannot open file for reading: {}'.format(path))
try:
t = msgpack.load(f)
except Exception as err:
raise ValueError('Cannot decode messagepack data in path "{}": {}'
''.format(path, err))
try:
ref = t['ref']
except KeyError:
raise KeyError('Field "ref" not found in object at "{}"'.format(path))
return ref
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.v1 import images
from openstack_dashboard import api
from openstack_dashboard.test.test_data import utils
class Namespace(dict):
def __repr__(self):
return "<Namespace %s>" % self._info
def __init__(self, info):
super(Namespace, self).__init__()
self.__dict__.update(info)
self.update(info)
self._info = info
def as_json(self, indent=4):
return self.__dict__
class APIResourceV2(dict):
_base_props = [
'id', 'name', 'status', 'visibility', 'protected', 'checksum', 'owner',
'size', 'virtual_size', 'container_format', 'disk_format',
'created_at', 'updated_at', 'tags', 'direct_url', 'min_ram',
'min_disk', 'self', 'file', 'schema', 'locations']
def __getattr__(self, item):
if item == 'schema':
return {'properties': {k: '' for k in self._base_props}}
else:
return self.get(item)
def data(TEST):
TEST.images = utils.TestDataContainer()
TEST.images_api = utils.TestDataContainer()
TEST.snapshots = utils.TestDataContainer()
TEST.metadata_defs = utils.TestDataContainer()
TEST.imagesV2 = utils.TestDataContainer()
TEST.snapshotsV2 = utils.TestDataContainer()
# Snapshots
snapshot_dict = {'name': u'snapshot',
'container_format': u'ami',
'id': 3,
'status': "active",
'owner': TEST.tenant.id,
'properties': {'image_type': u'snapshot'},
'is_public': False,
'protected': False}
snapshot_dict_no_owner = {'name': u'snapshot 2',
'container_format': u'ami',
'id': 4,
'status': "active",
'owner': None,
'properties': {'image_type': u'snapshot'},
'is_public': False,
'protected': False}
snapshot_dict_queued = {'name': u'snapshot 2',
'container_format': u'ami',
'id': 5,
'status': "queued",
'owner': TEST.tenant.id,
'properties': {'image_type': u'snapshot'},
'is_public': False,
'protected': False}
snapshot_dict_with_volume = {'name': u'snapshot 2',
'container_format': u'ami',
'id': 6,
'status': "queued",
'owner': TEST.tenant.id,
'properties': {
'block_device_mapping':
'[{"source_type": "snapshot"}]'},
'is_public': False,
'protected': False}
snapshot = images.Image(images.ImageManager(None), snapshot_dict)
TEST.snapshots.add(api.glance.Image(snapshot))
snapshot = images.Image(images.ImageManager(None), snapshot_dict_no_owner)
TEST.snapshots.add(api.glance.Image(snapshot))
snapshot = images.Image(images.ImageManager(None), snapshot_dict_queued)
TEST.snapshots.add(api.glance.Image(snapshot))
snapshot = images.Image(images.ImageManager(None),
snapshot_dict_with_volume)
TEST.snapshots.add(api.glance.Image(snapshot))
# Images
image_dict = {'id': '007e7d55-fe1e-4c5c-bf08-44b4a4964822',
'name': 'public_image',
'disk_format': u'qcow2',
'status': "active",
'size': 20 * 1024 ** 3,
'virtual_size': None,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'novaImage',
'properties': {'image_type': u'image'},
'is_public': True,
'protected': False,
'min_ram': 0,
'created_at': '2014-02-14T20:56:53'}
public_image = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': 'a001c047-22f8-47d0-80a1-8ec94a9524fe',
'name': 'private_image',
'status': "active",
'size': 10 * 1024 ** 2,
'virtual_size': 20 * 1024 ** 2,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'aki',
'is_public': False,
'protected': False,
'min_ram': 0,
'created_at': '2014-03-14T12:56:53'}
private_image = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': 'd6936c86-7fec-474a-85c5-5e467b371c3c',
'name': 'protected_images',
'status': "active",
'owner': TEST.tenant.id,
'size': 2 * 1024 ** 3,
'virtual_size': None,
'min_disk': 30,
'container_format': 'novaImage',
'properties': {'image_type': u'image'},
'is_public': True,
'protected': True,
'min_ram': 0,
'created_at': '2014-03-16T06:22:14'}
protected_image = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': '278905a6-4b52-4d1e-98f9-8c57bb25ba32',
'name': None,
'status': "active",
'size': 5 * 1024 ** 3,
'virtual_size': None,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'novaImage',
'properties': {'image_type': u'image'},
'is_public': True,
'protected': False,
'min_ram': 0}
public_image2 = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': '710a1acf-a3e3-41dd-a32d-5d6b6c86ea10',
'name': 'private_image 2',
'status': "active",
'size': 30 * 1024 ** 3,
'virtual_size': None,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'aki',
'is_public': False,
'protected': False,
'min_ram': 0}
private_image2 = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': '7cd892fd-5652-40f3-a450-547615680132',
'name': 'private_image 3',
'status': "active",
'size': 2 * 1024 ** 3,
'virtual_size': None,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'aki',
'is_public': False,
'protected': False,
'min_ram': 0}
private_image3 = images.Image(images.ImageManager(None), image_dict)
# A shared image. Not public and not local tenant.
image_dict = {'id': 'c8756975-7a3b-4e43-b7f7-433576112849',
'name': 'shared_image 1',
'status': "active",
'size': 8 * 1024 ** 3,
'virtual_size': None,
'min_disk': 0,
'owner': 'someothertenant',
'container_format': 'aki',
'is_public': False,
'protected': False,
'min_ram': 0}
shared_image1 = images.Image(images.ImageManager(None), image_dict)
# "Official" image. Public and tenant matches an entry
# in IMAGES_LIST_FILTER_TENANTS.
image_dict = {'id': 'f448704f-0ce5-4d34-8441-11b6581c6619',
'name': 'official_image 1',
'status': "active",
'size': 2 * 1024 ** 3,
'virtual_size': None,
'min_disk': 0,
'owner': 'officialtenant',
'container_format': 'aki',
'is_public': True,
'protected': False,
'min_ram': 0}
official_image1 = images.Image(images.ImageManager(None), image_dict)
image_dict = {'id': 'a67e7d45-fe1e-4c5c-bf08-44b4a4964822',
'name': 'multi_prop_image',
'status': "active",
'size': 20 * 1024 ** 3,
'virtual_size': None,
'min_disk': 0,
'owner': TEST.tenant.id,
'container_format': 'novaImage',
'properties': {'description': u'a multi prop image',
'foo': u'foo val',
'bar': u'bar val'},
'is_public': True,
'protected': False}
multi_prop_image = images.Image(images.ImageManager(None), image_dict)
# An image without name being returned based on current api
image_dict = {'id': 'c8756975-7a3b-4e43-b7f7-433576112849',
'status': "active",
'size': 8 * 1024 ** 3,
'virtual_size': None,
'min_disk': 0,
'owner': 'someothertenant',
'container_format': 'aki',
'is_public': False,
'protected': False}
no_name_image = images.Image(images.ImageManager(None), image_dict)
TEST.images_api.add(public_image, private_image, protected_image,
public_image2, private_image2, private_image3,
shared_image1, official_image1, multi_prop_image)
TEST.images.add(api.glance.Image(public_image),
api.glance.Image(private_image),
api.glance.Image(protected_image),
api.glance.Image(public_image2),
api.glance.Image(private_image2),
api.glance.Image(private_image3),
api.glance.Image(shared_image1),
api.glance.Image(official_image1),
api.glance.Image(multi_prop_image))
TEST.empty_name_image = api.glance.Image(no_name_image)
image_v2_dicts = [{
'checksum': 'eb9139e4942121f22bbc2afc0400b2a4',
'container_format': 'novaImage',
'created_at': '2014-02-14T20:56:53',
'direct_url': 'swift+config://ref1/glance/'
'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5',
'disk_format': u'qcow2',
'file': '/v2/images/'
'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5/file',
'id': '007e7d55-fe1e-4c5c-bf08-44b4a4964822',
'kernel_id': 'f6ebd5f0-b110-4406-8c1e-67b28d4e85e7',
'locations': [
{'metadata': {},
'url': 'swift+config://ref1/glance/'
'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5'}],
'min_ram': 0,
'name': 'public_image',
'image_type': u'image',
'min_disk': 0,
'owner': TEST.tenant.id,
'protected': False,
'ramdisk_id': '868efefc-4f2d-4ed8-82b1-7e35576a7a47',
'size': 20 * 1024 ** 3,
'status': 'active',
'tags': ['active_image'],
'updated_at': '2015-08-31T19:37:45Z',
'virtual_size': None,
'visibility': 'public'
}, {
'checksum': None,
'container_format': 'novaImage',
'created_at': '2014-03-16T06:22:14',
'disk_format': None,
'image_type': u'image',
'file': '/v2/images/885d1cb0-9f5c-4677-9d03-175be7f9f984/file',
'id': 'd6936c86-7fec-474a-85c5-5e467b371c3c',
'locations': [],
'min_disk': 30,
'min_ram': 0,
'name': 'protected_images',
'owner': TEST.tenant.id,
'protected': True,
'size': 2 * 1024 ** 3,
'status': "active",
'tags': ['empty_image'],
'updated_at': '2015-09-01T22:37:32Z',
'virtual_size': None,
'visibility': 'public'
}, {
'checksum': 'e533283e6aac072533d1d091a7d2e413',
'container_format': 'novaImage',
'created_at': '2015-09-02T00:31:16Z',
'disk_format': 'qcow2',
'file': '/v2/images/10ca6b6b-48f4-43ac-8159-aa9e9353f5e4/file',
'id': 'a67e7d45-fe1e-4c5c-bf08-44b4a4964822',
'image_type': 'an image type',
'min_disk': 0,
'min_ram': 0,
'name': 'multi_prop_image',
'owner': TEST.tenant.id,
'protected': False,
'size': 20 * 1024 ** 3,
'status': 'active',
'tags': ['custom_property_image'],
'updated_at': '2015-09-02T00:31:17Z',
'virtual_size': None,
'visibility': 'public',
'description': u'a multi prop image',
'foo': u'foo val',
'bar': u'bar val'
}]
for fixture in image_v2_dicts:
apiresource = APIResourceV2(fixture)
TEST.imagesV2.add(api.glance.Image(apiresource))
snapshot_v2_dict = {
'checksum': None,
'container_format': 'novaImage',
'created_at': '2018-02-26T22:50:56Z',
'disk_format': None,
'block_device_mapping': '[{"source_type": "snapshot"}]',
'file': '/v2/images/c701226a-aa32-4064-bd36-e85a3dcc61aa/file',
'id': 'c701226a-aa32-4064-bd36-e85a3dcc61aa',
'locations': [],
'min_disk': 30,
'min_ram': 0,
'name': 'snpashot_with_volume',
'owner': TEST.tenant.id,
'protected': True,
'size': 2 * 1024 ** 3,
'status': "active",
'tags': ['empty_image'],
'updated_at': '2018-02-26T22:50:56Z',
'virtual_size': None,
'visibility': 'public'
}
TEST.snapshotsV2.add(api.glance.Image(APIResourceV2(snapshot_v2_dict)))
metadef_dict = {
'namespace': 'namespace_1',
'display_name': 'Namespace 1',
'description': 'Mock desc 1',
'resource_type_associations': [
{
'created_at': '2014-08-21T08:39:43Z',
'prefix': 'mock',
'name': 'mock name'
}
],
'visibility': 'public',
'protected': True,
'created_at': '2014-08-21T08:39:43Z',
'properties': {
'cpu_mock:mock': {
'default': '1',
'type': 'integer',
'description': 'Number of mocks.',
'title': 'mocks'
}
}
}
metadef = Namespace(metadef_dict)
TEST.metadata_defs.add(metadef)
metadef_dict = {
'namespace': 'namespace_2',
'display_name': 'Namespace 2',
'description': 'Mock desc 2',
'resource_type_associations': [
{
'created_at': '2014-08-21T08:39:43Z',
'prefix': 'mock',
'name': 'mock name'
}
],
'visibility': 'private',
'protected': False,
'created_at': '2014-08-21T08:39:43Z',
'properties': {
'hdd_mock:mock': {
'default': '2',
'type': 'integer',
'description': 'Number of mocks.',
'title': 'mocks'
}
}
}
metadef = Namespace(metadef_dict)
TEST.metadata_defs.add(metadef)
metadef_dict = {
'namespace': 'namespace_3',
'display_name': 'Namespace 3',
'description': 'Mock desc 3',
'resource_type_associations': [
{
'created_at': '2014-08-21T08:39:43Z',
'prefix': 'mock',
'name': 'mock name'
}
],
'visibility': 'public',
'protected': False,
'created_at': '2014-08-21T08:39:43Z',
'properties': {
'gpu_mock:mock': {
'default': '2',
'type': 'integer',
'description': 'Number of mocks.',
'title': 'mocks'
}
}
}
metadef = Namespace(metadef_dict)
TEST.metadata_defs.add(metadef)
metadef_dict = {
'namespace': 'namespace_4',
'display_name': 'Namespace 4',
'description': 'Mock desc 4',
'resource_type_associations': [
{
'created_at': '2014-08-21T08:39:43Z',
'prefix': 'mock',
'name': 'OS::Cinder::Volume',
'properties_target': 'user'
}
],
'visibility': 'public',
'protected': True,
'created_at': '2014-08-21T08:39:43Z',
'properties': {
'ram_mock:mock': {
'default': '2',
'type': 'integer',
'description': 'Number of mocks.',
'title': 'mocks'
}
}
}
metadef = Namespace(metadef_dict)
TEST.metadata_defs.add(metadef)
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of SQLAlchemy backend."""
import copy
import sys
import threading
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils
from oslo_log import log as logging
import six
import sqlalchemy as sa
from sahara.db.sqlalchemy import models as m
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.service.validations import acl as validate
from sahara.utils import types
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_FACADE = None
_LOCK = threading.Lock()
def _create_facade_lazily():
global _LOCK, _FACADE
if _FACADE is None:
with _LOCK:
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF,
sqlite_fk=True)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def _parse_sorting_args(sort_by):
if sort_by is None:
sort_by = "id"
if sort_by[0] == "-":
return sort_by[1:], "desc"
return sort_by, "asc"
def _get_prev_and_next_objects(objects, limit, marker, order=None):
if order == 'desc':
objects.reverse()
position = None
if limit is None:
return None, None
if marker:
for pos, obj in enumerate(objects):
if obj.id == marker.id:
position = pos
break
if position - limit >= 0:
prev_marker = objects[position - limit].id
else:
prev_marker = None
if position + limit < len(objects):
next_marker = objects[position + limit].id
else:
next_marker = None
else:
if limit < len(objects):
next_marker = objects[limit - 1].id
else:
next_marker = None
prev_marker = None
return prev_marker, next_marker
def cleanup():
global _FACADE
_FACADE = None
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def model_query(model, context, session=None, project_only=True):
"""Query helper.
:param model: base model to query
:param context: context to query under
:param project_only: if present and context is user-type, then restrict
query to match the context's tenant_id.
"""
session = session or get_session()
query = session.query(model)
if project_only and not context.is_admin:
query = query.filter(
(model.tenant_id == context.tenant_id) |
getattr(model, 'is_public', False))
return query
def count_query(model, context, session=None, project_only=None):
"""Count query helper.
:param model: base model to query
:param context: context to query under
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
return model_query(sa.func.count(model.id), context, session, project_only)
def in_filter(query, cls, search_opts):
"""Add 'in' filters for specified columns.
Add a sqlalchemy 'in' filter to the query for any entry in the
'search_opts' dict where the key is the name of a column in
'cls' and the value is a tuple.
This allows the value of a column to be matched
against multiple possible values (OR).
Return the modified query and any entries in search_opts
whose keys do not match columns or whose values are not
tuples.
:param query: a non-null query object
:param cls: the database model class that filters will apply to
:param search_opts: a dictionary whose key/value entries are interpreted as
column names and search values
:returns: a tuple containing the modified query and a dictionary of
unused search_opts
"""
if not search_opts:
return query, search_opts
remaining = {}
for k, v in six.iteritems(search_opts):
if type(v) == tuple and k in cls.__table__.columns:
col = cls.__table__.columns[k]
query = query.filter(col.in_(v))
else:
remaining[k] = v
return query, remaining
def like_filter(query, cls, search_opts):
"""Add 'like' filters for specified columns.
Add a sqlalchemy 'like' filter to the query for any entry in the
'search_opts' dict where the key is the name of a column in
'cls' and the value is a string containing '%'.
This allows the value of a column to be matched
against simple sql string patterns using LIKE and the
'%' wildcard.
Return the modified query and any entries in search_opts
whose keys do not match columns or whose values are not
strings containing '%'.
:param query: a non-null query object
:param cls: the database model class the filters will apply to
:param search_opts: a dictionary whose key/value entries are interpreted as
column names and search patterns
:returns: a tuple containing the modified query and a dictionary of
unused search_opts
"""
if not search_opts:
return query, search_opts
remaining = {}
for k, v in six.iteritems(search_opts):
if isinstance(v, six.string_types) and (
'%' in v and k in cls.__table__.columns):
col = cls.__table__.columns[k]
query = query.filter(col.like(v))
else:
remaining[k] = v
return query, remaining
def _get_regex_op(connection):
db = connection.split(':')[0].split('+')[0]
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP'
}
return regexp_op_map.get(db, None)
def regex_filter(query, cls, regex_cols, search_opts):
"""Add regex filters for specified columns.
Add a regex filter to the query for any entry in the
'search_opts' dict where the key is the name of a column in
'cls' and listed in 'regex_cols' and the value is a string.
Return the modified query and any entries in search_opts
whose keys do not match columns or whose values are not
strings.
This is only supported for mysql and postgres. For other
databases, the query is not altered.
:param query: a non-null query object
:param cls: the database model class the filters will apply to
:param regex_cols: a list of columns for which regex is supported
:param search_opts: a dictionary whose key/value entries are interpreted as
column names and search patterns
:returns: a tuple containing the modified query and a dictionary of
unused search_opts
"""
regex_op = _get_regex_op(CONF.database.connection)
if not regex_op:
return query, copy.copy(search_opts)
remaining = {}
for k, v in six.iteritems(search_opts):
if isinstance(v, six.string_types) and (
k in cls.__table__.columns and k in regex_cols):
col = cls.__table__.columns[k]
query = query.filter(col.op(regex_op)(v))
else:
remaining[k] = v
return query, remaining
def setup_db():
try:
engine = get_engine()
m.Cluster.metadata.create_all(engine)
except sa.exc.OperationalError as e:
LOG.warning(_LW("Database registration exception: {exc}")
.format(exc=e))
return False
return True
def drop_db():
try:
engine = get_engine()
m.Cluster.metadata.drop_all(engine)
except Exception as e:
LOG.warning(_LW("Database shutdown exception: {exc}").format(exc=e))
return False
return True
# Cluster ops
def _cluster_get(context, session, cluster_id):
query = model_query(m.Cluster, context, session)
return query.filter_by(id=cluster_id).first()
def cluster_get(context, cluster_id):
return _cluster_get(context, get_session(), cluster_id)
def cluster_get_all(context, regex_search=False,
limit=None, marker=None, sort_by=None, **kwargs):
sort_by, order = _parse_sorting_args(sort_by)
regex_cols = ['name', 'description', 'plugin_name', 'tenant_id']
query = model_query(m.Cluster, context)
if regex_search:
query, kwargs = regex_filter(query,
m.Cluster, regex_cols, kwargs)
limit = int(limit) if limit else None
marker = cluster_get(context, marker)
prev_marker, next_marker = _get_prev_and_next_objects(
query.filter_by(**kwargs).order_by(sort_by).all(),
limit, marker, order=order)
result = utils.paginate_query(query.filter_by(**kwargs), m.Cluster,
limit, [sort_by], marker, order)
return types.Page(result, prev_marker, next_marker)
def cluster_create(context, values):
values = values.copy()
cluster = m.Cluster()
node_groups = values.pop("node_groups", [])
cluster.update(values)
session = get_session()
try:
with session.begin():
session.add(cluster)
session.flush(objects=[cluster])
for ng in node_groups:
node_group = m.NodeGroup()
node_group.update(ng)
node_group.update({"cluster_id": cluster.id})
session.add(node_group)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for object %(object)s. Failed on columns: "
"%(columns)s") % {"object": e.value, "columns": e.columns})
return cluster_get(context, cluster.id)
def cluster_update(context, cluster_id, values):
session = get_session()
try:
with session.begin():
cluster = _cluster_get(context, session, cluster_id)
if cluster is None:
raise ex.NotFoundException(cluster_id,
_("Cluster id '%s' not found!"))
cluster.update(values)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for Cluster: %s") % e.columns)
return cluster
def cluster_destroy(context, cluster_id):
session = get_session()
with session.begin():
cluster = _cluster_get(context, session, cluster_id)
if not cluster:
raise ex.NotFoundException(cluster_id,
_("Cluster id '%s' not found!"))
session.delete(cluster)
# Node Group ops
def _node_group_get(context, session, node_group_id):
query = model_query(m.NodeGroup, context, session)
return query.filter_by(id=node_group_id).first()
def node_group_add(context, cluster_id, values):
session = get_session()
with session.begin():
cluster = _cluster_get(context, session, cluster_id)
if not cluster:
raise ex.NotFoundException(cluster_id,
_("Cluster id '%s' not found!"))
node_group = m.NodeGroup()
node_group.update({"cluster_id": cluster_id})
node_group.update(values)
session.add(node_group)
return node_group.id
def node_group_update(context, node_group_id, values):
session = get_session()
with session.begin():
node_group = _node_group_get(context, session, node_group_id)
if not node_group:
raise ex.NotFoundException(node_group_id,
_("Node Group id '%s' not found!"))
node_group.update(values)
def node_group_remove(context, node_group_id):
session = get_session()
with session.begin():
node_group = _node_group_get(context, session, node_group_id)
if not node_group:
raise ex.NotFoundException(node_group_id,
_("Node Group id '%s' not found!"))
session.delete(node_group)
# Instance ops
def _instance_get(context, session, instance_id):
query = model_query(m.Instance, context, session)
return query.filter_by(id=instance_id).first()
def instance_add(context, node_group_id, values):
session = get_session()
with session.begin():
node_group = _node_group_get(context, session, node_group_id)
if not node_group:
raise ex.NotFoundException(node_group_id,
_("Node Group id '%s' not found!"))
instance = m.Instance()
instance.update({"node_group_id": node_group_id})
instance.update(values)
session.add(instance)
node_group = _node_group_get(context, session, node_group_id)
node_group.count += 1
return instance.id
def instance_update(context, instance_id, values):
session = get_session()
with session.begin():
instance = _instance_get(context, session, instance_id)
if not instance:
raise ex.NotFoundException(instance_id,
_("Instance id '%s' not found!"))
instance.update(values)
def instance_remove(context, instance_id):
session = get_session()
with session.begin():
instance = _instance_get(context, session, instance_id)
if not instance:
raise ex.NotFoundException(instance_id,
_("Instance id '%s' not found!"))
session.delete(instance)
node_group_id = instance.node_group_id
node_group = _node_group_get(context, session, node_group_id)
node_group.count -= 1
# Volumes ops
def append_volume(context, instance_id, volume_id):
session = get_session()
with session.begin():
instance = _instance_get(context, session, instance_id)
if not instance:
raise ex.NotFoundException(instance_id,
_("Instance id '%s' not found!"))
instance.volumes.append(volume_id)
def remove_volume(context, instance_id, volume_id):
session = get_session()
with session.begin():
instance = _instance_get(context, session, instance_id)
if not instance:
raise ex.NotFoundException(instance_id,
_("Instance id '%s' not found!"))
instance.volumes.remove(volume_id)
# Cluster Template ops
def _cluster_template_get(context, session, cluster_template_id):
query = model_query(m.ClusterTemplate, context, session)
return query.filter_by(id=cluster_template_id).first()
def cluster_template_get(context, cluster_template_id):
return _cluster_template_get(context, get_session(), cluster_template_id)
def cluster_template_get_all(context, regex_search=False,
marker=None, limit=None, sort_by=None, **kwargs):
regex_cols = ['name', 'description', 'plugin_name', 'tenant_id']
sort_by, order = _parse_sorting_args(sort_by)
query = model_query(m.ClusterTemplate, context)
if regex_search:
query, kwargs = regex_filter(query,
m.ClusterTemplate, regex_cols, kwargs)
limit = int(limit) if limit else None
marker = cluster_template_get(context, marker)
prev_marker, next_marker = _get_prev_and_next_objects(
query.filter_by(**kwargs).order_by(sort_by).all(),
limit, marker, order=order)
result = utils.paginate_query(query.filter_by(**kwargs),
m.ClusterTemplate,
limit, [sort_by], marker, order)
return types.Page(result, prev_marker, next_marker)
def cluster_template_create(context, values):
values = values.copy()
cluster_template = m.ClusterTemplate()
node_groups = values.pop("node_groups") or []
cluster_template.update(values)
session = get_session()
try:
with session.begin():
session.add(cluster_template)
session.flush(objects=[cluster_template])
for ng in node_groups:
node_group = m.TemplatesRelation()
node_group.update({"cluster_template_id": cluster_template.id})
node_group.update(ng)
session.add(node_group)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for object %(object)s. Failed on columns: "
"%(columns)s") % {"object": e.value, "columns": e.columns})
return cluster_template_get(context, cluster_template.id)
def cluster_template_destroy(context, cluster_template_id,
ignore_prot_on_def=False):
session = get_session()
with session.begin():
cluster_template = _cluster_template_get(context, session,
cluster_template_id)
if not cluster_template:
raise ex.NotFoundException(
cluster_template_id,
_("Cluster Template id '%s' not found!"))
validate.check_tenant_for_delete(context, cluster_template)
if not (cluster_template.is_default and ignore_prot_on_def):
validate.check_protected_from_delete(cluster_template)
session.delete(cluster_template)
def cluster_template_update(context, values, ignore_prot_on_def=False):
explicit_node_groups = "node_groups" in values
if explicit_node_groups:
node_groups = values.pop("node_groups")
if node_groups is None:
node_groups = []
session = get_session()
cluster_template_id = values['id']
try:
with session.begin():
cluster_template = (_cluster_template_get(
context, session, cluster_template_id))
if not cluster_template:
raise ex.NotFoundException(
cluster_template_id,
_("Cluster Template id '%s' not found!"))
validate.check_tenant_for_update(context, cluster_template)
if not (cluster_template.is_default and ignore_prot_on_def):
validate.check_protected_from_update(cluster_template, values)
if len(cluster_template.clusters) > 0:
raise ex.UpdateFailedException(
cluster_template_id,
_("Cluster Template id '%s' can not be updated. "
"It is referenced by at least one cluster.")
)
cluster_template.update(values)
# The flush here will cause a duplicate entry exception if
# unique constraints are violated, before we go ahead and delete
# the node group templates
session.flush(objects=[cluster_template])
# If node_groups has not been specified, then we are
# keeping the old ones so don't delete!
if explicit_node_groups:
model_query(m.TemplatesRelation,
context, session=session).filter_by(
cluster_template_id=cluster_template_id).delete()
for ng in node_groups:
node_group = m.TemplatesRelation()
node_group.update(ng)
node_group.update({"cluster_template_id":
cluster_template_id})
session.add(node_group)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for ClusterTemplate: %s") % e.columns)
return cluster_template_get(context, cluster_template_id)
# Node Group Template ops
def _node_group_template_get(context, session, node_group_template_id):
query = model_query(m.NodeGroupTemplate, context, session)
return query.filter_by(id=node_group_template_id).first()
def node_group_template_get(context, node_group_template_id):
return _node_group_template_get(context, get_session(),
node_group_template_id)
def node_group_template_get_all(context, regex_search=False, marker=None,
limit=None, sort_by=None, **kwargs):
sort_by, order = _parse_sorting_args(sort_by)
regex_cols = ['name', 'description', 'plugin_name', 'tenant_id']
limit = int(limit) if limit else None
query = model_query(m.NodeGroupTemplate, context)
if regex_search:
query, kwargs = regex_filter(query,
m.NodeGroupTemplate, regex_cols, kwargs)
marker = node_group_template_get(context, marker)
prev_marker, next_marker = _get_prev_and_next_objects(
query.filter_by(**kwargs).order_by(sort_by).all(),
limit, marker, order=order)
result = utils.paginate_query(
query.filter_by(**kwargs), m.NodeGroupTemplate,
limit, [sort_by], marker, order)
return types.Page(result, prev_marker, next_marker)
def node_group_template_create(context, values):
node_group_template = m.NodeGroupTemplate()
node_group_template.update(values)
session = get_session()
try:
with session.begin():
session.add(node_group_template)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for NodeGroupTemplate: %s") % e.columns)
return node_group_template
def node_group_template_destroy(context, node_group_template_id,
ignore_prot_on_def=False):
session = get_session()
with session.begin():
node_group_template = _node_group_template_get(context, session,
node_group_template_id)
if not node_group_template:
raise ex.NotFoundException(
node_group_template_id,
_("Node Group Template id '%s' not found!"))
validate.check_tenant_for_delete(context, node_group_template)
if not (node_group_template.is_default and ignore_prot_on_def):
validate.check_protected_from_delete(node_group_template)
session.delete(node_group_template)
def node_group_template_update(context, values, ignore_prot_on_def=False):
session = get_session()
try:
with session.begin():
ngt_id = values['id']
ngt = _node_group_template_get(context, session, ngt_id)
if not ngt:
raise ex.NotFoundException(
ngt_id, _("NodeGroupTemplate id '%s' not found"))
validate.check_tenant_for_update(context, ngt)
if not (ngt.is_default and ignore_prot_on_def):
validate.check_protected_from_update(ngt, values)
# Check to see that the node group template to be updated is not in
# use by an existing cluster.
for template_relationship in ngt.templates_relations:
if len(template_relationship.cluster_template.clusters) > 0:
raise ex.UpdateFailedException(
ngt_id,
_("NodeGroupTemplate id '%s' can not be updated. "
"It is referenced by an existing cluster.")
)
ngt.update(values)
# Here we update any cluster templates that reference the
# updated node group template
for template_relationship in ngt.templates_relations:
ct_id = template_relationship.cluster_template_id
ct = cluster_template_get(
context, template_relationship.cluster_template_id)
node_groups = ct.node_groups
ct_node_groups = []
for ng in node_groups:
# Need to fill in all node groups, not just
# the modified group
ng_to_add = ng
if ng.node_group_template_id == ngt_id:
# use the updated node group template
ng_to_add = ngt
ng_to_add = ng_to_add.to_dict()
ng_to_add.update(
{"count": ng["count"],
"node_group_template_id": ng.node_group_template_id})
ng_to_add.pop("updated_at", None)
ng_to_add.pop("created_at", None)
ng_to_add.pop("id", None)
ct_node_groups.append(ng_to_add)
ct_update = {"id": ct_id,
"node_groups": ct_node_groups}
cluster_template_update(context, ct_update, ignore_prot_on_def)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for NodeGroupTemplate: %s") % e.columns)
return ngt
# Data Source ops
def _data_source_get(context, session, data_source_id):
query = model_query(m.DataSource, context, session)
return query.filter_by(id=data_source_id).first()
def data_source_get(context, data_source_id):
return _data_source_get(context, get_session(), data_source_id)
def data_source_count(context, **kwargs):
"""Count DataSource objects filtered by search criteria in kwargs.
Entries in kwargs indicate column names and search values.
'in' filters will be used to search for any entries in kwargs
that name DataSource columns and have values of type tuple. This
allows column values to match multiple values (OR)
'like' filters will be used for any entries in kwargs that
name DataSource columns and have string values containing '%'.
This allows column values to match simple wildcards.
Any other entries in kwargs will be searched for using filter_by()
"""
query = model_query(m.DataSource, context)
query, kwargs = in_filter(query, m.DataSource, kwargs)
query, kwargs = like_filter(query, m.DataSource, kwargs)
# Use normal filter_by for remaining keys
return query.filter_by(**kwargs).count()
def data_source_get_all(context, regex_search=False,
limit=None, marker=None, sort_by=None, **kwargs):
regex_cols = ['name', 'description', 'url']
sort_by, order = _parse_sorting_args(sort_by)
query = model_query(m.DataSource, context)
if regex_search:
query, kwargs = regex_filter(query,
m.DataSource, regex_cols, kwargs)
limit = int(limit) if limit else None
marker = data_source_get(context, marker)
prev_marker, next_marker = _get_prev_and_next_objects(
query.filter_by(**kwargs).order_by(sort_by).all(),
limit, marker, order=order)
result = utils.paginate_query(query.filter_by(**kwargs), m.DataSource,
limit, [sort_by], marker, order)
return types.Page(result, prev_marker, next_marker)
def data_source_create(context, values):
data_source = m.DataSource()
data_source.update(values)
session = get_session()
try:
with session.begin():
session.add(data_source)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for DataSource: %s") % e.columns)
return data_source
def data_source_destroy(context, data_source_id):
session = get_session()
try:
with session.begin():
data_source = _data_source_get(context, session, data_source_id)
if not data_source:
raise ex.NotFoundException(
data_source_id,
_("Data Source id '%s' not found!"))
validate.check_tenant_for_delete(context, data_source)
validate.check_protected_from_delete(data_source)
session.delete(data_source)
except db_exc.DBError as e:
msg = ("foreign key constraint" in six.text_type(e) and
_(" on foreign key constraint") or "")
raise ex.DeletionFailed(_("Data Source deletion failed%s") % msg)
def data_source_update(context, values):
session = get_session()
try:
with session.begin():
ds_id = values['id']
data_source = _data_source_get(context, session, ds_id)
if not data_source:
raise ex.NotFoundException(
ds_id, _("DataSource id '%s' not found"))
validate.check_tenant_for_update(context, data_source)
validate.check_protected_from_update(data_source, values)
data_source.update(values)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for DataSource: %s") % e.columns)
return data_source
# JobExecution ops
def _job_execution_get(context, session, job_execution_id):
query = model_query(m.JobExecution, context, session)
return query.filter_by(id=job_execution_id).first()
def job_execution_get(context, job_execution_id):
return _job_execution_get(context, get_session(), job_execution_id)
def job_execution_get_all(context, regex_search=False,
limit=None, marker=None, sort_by=None, **kwargs):
"""Get all JobExecutions filtered by **kwargs.
kwargs key values may be the names of fields in a JobExecution
plus the following special values with the indicated meaning:
'cluster.name' -- name of the Cluster referenced by the JobExecution
'job.name' -- name of the Job referenced by the JobExecution
'status' -- JobExecution['info']['status']
e.g. job_execution_get_all(cluster_id=12, input_id=123)
job_execution_get_all(**{'cluster.name': 'test',
'job.name': 'wordcount'})
"""
sort_by, order = _parse_sorting_args(sort_by)
regex_cols = ['job.name', 'cluster.name']
# Remove the external fields if present, they'll
# be handled with a join and filter
externals = {k: kwargs.pop(k) for k in ['cluster.name',
'job.name',
'status'] if k in kwargs}
# At this time, none of the fields in m.JobExecution itself
# are candidates for regex search, however this code fragment
# should remain in case that changes. This is the correct place
# to insert regex filters on the m.JobExecution class
query = model_query(m.JobExecution, context)
if regex_search:
query, kwargs = regex_filter(query,
m.JobExecution, regex_cols, kwargs)
# Filter JobExecution by the remaining kwargs. This has to be done
# before application of the joins and filters because those
# change the class that query.filter_by will apply to
query = query.filter_by(**kwargs)
# Now add the joins and filters for the externals
if 'cluster.name' in externals:
search_opts = {'name': externals['cluster.name']}
query = query.join(m.Cluster)
if regex_filter and 'cluster.name' in regex_cols:
query, search_opts = regex_filter(query,
m.Cluster, ['name'], search_opts)
query = query.filter_by(**search_opts)
if 'job.name' in externals:
search_opts = {'name': externals['job.name']}
query = query.join(m.Job)
if regex_filter and 'job.name' in regex_cols:
query, search_opts = regex_filter(query,
m.Job, ['name'], search_opts)
query = query.filter_by(**search_opts)
res = query.order_by(sort_by).all()
if order == 'desc':
res.reverse()
# 'info' is a JsonDictType which is stored as a string.
# It would be possible to search for the substring containing
# the value of 'status' in 'info', but 'info' also contains
# data returned from a client and not managed by Sahara.
# In the case of Oozie jobs, for example, other fields (actions)
# also contain 'status'. Therefore we can't filter on it reliably
# by a substring search in the query.
if 'status' in externals:
status = externals['status'].lower()
res = [je for je in res if (
je['info'] and je['info'].get('status', '').lower() == status)]
res_page = res
if marker:
n = None
for i, je in enumerate(res):
if je['id'] == marker:
n = i
if n:
res_page = res[n:]
if limit:
limit = int(limit)
res_page = res_page[:limit] if limit < len(res_page) else res_page
marker = job_execution_get(context, marker)
prev_marker, next_marker = _get_prev_and_next_objects(
res, limit, marker)
return types.Page(res_page, prev_marker, next_marker)
def job_execution_count(context, **kwargs):
query = count_query(m.JobExecution, context)
return query.filter_by(**kwargs).first()[0]
def _get_config_section(configs, mapping_type):
if mapping_type not in configs:
configs[mapping_type] = [] if mapping_type == "args" else {}
return configs[mapping_type]
def _merge_execution_interface(job_ex, job, execution_interface):
"""Merges the interface for a job execution with that of its job."""
configs = job_ex.job_configs or {}
nonexistent = object()
positional_args = {}
for arg in job.interface:
value = nonexistent
typed_configs = _get_config_section(configs, arg.mapping_type)
# Interface args are our first choice for the value.
if arg.name in execution_interface:
value = execution_interface[arg.name]
else:
# If a default exists, we can use that, but...
if arg.default is not None:
value = arg.default
# We should prefer an argument passed through the
# job_configs that maps to the same location.
if arg.mapping_type != "args":
value = typed_configs.get(arg.location, value)
if value is not nonexistent:
if arg.mapping_type != "args":
typed_configs[arg.location] = value
else:
positional_args[int(arg.location)] = value
if positional_args:
positional_args = [positional_args[i] for i
in range(len(positional_args))]
configs["args"] = positional_args + configs["args"]
if configs and not job_ex.job_configs:
job_ex.job_configs = configs
def job_execution_create(context, values):
session = get_session()
execution_interface = values.pop('interface', {})
job_ex = m.JobExecution()
job_ex.update(values)
try:
with session.begin():
job_ex.interface = []
job = _job_get(context, session, job_ex.job_id)
if job.interface:
_merge_execution_interface(job_ex, job, execution_interface)
session.add(job_ex)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for JobExecution: %s") % e.columns)
return job_ex
def job_execution_update(context, job_execution_id, values):
session = get_session()
with session.begin():
job_ex = _job_execution_get(context, session, job_execution_id)
if not job_ex:
raise ex.NotFoundException(job_execution_id,
_("JobExecution id '%s' not found!"))
job_ex.update(values)
session.add(job_ex)
return job_ex
def job_execution_destroy(context, job_execution_id):
session = get_session()
with session.begin():
job_ex = _job_execution_get(context, session, job_execution_id)
if not job_ex:
raise ex.NotFoundException(job_execution_id,
_("JobExecution id '%s' not found!"))
session.delete(job_ex)
# Job ops
def _job_get(context, session, job_id):
query = model_query(m.Job, context, session)
return query.filter_by(id=job_id).first()
def job_get(context, job_id):
return _job_get(context, get_session(), job_id)
def job_get_all(context, regex_search=False,
limit=None, marker=None, sort_by=None, **kwargs):
regex_cols = ['name', 'description']
sort_by, order = _parse_sorting_args(sort_by)
query = model_query(m.Job, context)
if regex_search:
query, kwargs = regex_filter(query,
m.Job, regex_cols, kwargs)
limit = int(limit) if limit else None
marker = job_get(context, marker)
prev_marker, next_marker = _get_prev_and_next_objects(
query.filter_by(**kwargs).order_by(sort_by).all(),
limit, marker, order=order)
result = utils.paginate_query(query.filter_by(**kwargs),
m.Job, limit, [sort_by], marker, order)
return types.Page(result, prev_marker, next_marker)
def _append_job_binaries(context, session, from_list, to_list):
for job_binary_id in from_list:
job_binary = model_query(
m.JobBinary, context, session).filter_by(id=job_binary_id).first()
if job_binary is not None:
to_list.append(job_binary)
def _append_interface(context, from_list, to_list):
for order, argument_values in enumerate(from_list):
argument_values['tenant_id'] = context.tenant_id
argument_values['order'] = order
argument = m.JobInterfaceArgument()
argument.update(argument_values)
to_list.append(argument)
def job_create(context, values):
mains = values.pop("mains", [])
libs = values.pop("libs", [])
interface = values.pop("interface", [])
session = get_session()
try:
with session.begin():
job = m.Job()
job.update(values)
# These are 'lazy' objects. The initialization below
# is needed here because it provides libs, mains, and
# interface to be initialized within a session even if
# the lists are empty
job.mains = []
job.libs = []
job.interface = []
_append_job_binaries(context, session, mains, job.mains)
_append_job_binaries(context, session, libs, job.libs)
_append_interface(context, interface, job.interface)
session.add(job)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for Job: %s") % e.columns)
return job
def job_update(context, job_id, values):
session = get_session()
try:
with session.begin():
job = _job_get(context, session, job_id)
if not job:
raise ex.NotFoundException(job_id,
_("Job id '%s' not found!"))
validate.check_tenant_for_update(context, job)
validate.check_protected_from_update(job, values)
job.update(values)
session.add(job)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for Job: %s") % e.columns)
return job
def job_destroy(context, job_id):
session = get_session()
try:
with session.begin():
job = _job_get(context, session, job_id)
if not job:
raise ex.NotFoundException(job_id,
_("Job id '%s' not found!"))
validate.check_tenant_for_delete(context, job)
validate.check_protected_from_delete(job)
session.delete(job)
except db_exc.DBError as e:
msg = ("foreign key constraint" in six.text_type(e) and
_(" on foreign key constraint") or "")
raise ex.DeletionFailed(_("Job deletion failed%s") % msg)
# JobBinary ops
def _job_binary_get(context, session, job_binary_id):
query = model_query(m.JobBinary, context, session)
return query.filter_by(id=job_binary_id).first()
def job_binary_get_all(context, regex_search=False,
limit=None, marker=None, sort_by=None, **kwargs):
sort_by, order = _parse_sorting_args(sort_by)
regex_cols = ['name', 'description', 'url']
query = model_query(m.JobBinary, context)
if regex_search:
query, kwargs = regex_filter(query,
m.JobBinary, regex_cols, kwargs)
limit = int(limit) if limit else None
marker = job_binary_get(context, marker)
prev_marker, next_marker = _get_prev_and_next_objects(
query.filter_by(**kwargs).order_by(sort_by).all(),
limit, marker, order=order)
result = utils.paginate_query(query.filter_by(**kwargs),
m.JobBinary,
limit, [sort_by], marker, order)
return types.Page(result, prev_marker, next_marker)
def job_binary_get(context, job_binary_id):
"""Returns a JobBinary object that does not contain a data field
The data column uses deferred loading.
"""
return _job_binary_get(context, get_session(), job_binary_id)
def job_binary_create(context, values):
"""Returns a JobBinary that does not contain a data field
The data column uses deferred loading.
"""
job_binary = m.JobBinary()
job_binary.update(values)
session = get_session()
try:
with session.begin():
session.add(job_binary)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for JobBinary: %s") % e.columns)
return job_binary
def job_binary_update(context, values):
"""Returns a JobBinary updated with the provided values."""
jb_id = values["id"]
session = get_session()
try:
with session.begin():
jb = _job_binary_get(context, session, jb_id)
if not jb:
raise ex.NotFoundException(
jb_id, _("JobBinary id '%s' not found"))
validate.check_tenant_for_update(context, jb)
validate.check_protected_from_update(jb, values)
# We do not want to update the url for internal binaries
new_url = values.get("url", None)
if new_url and "internal-db://" in jb["url"]:
if jb["url"] != new_url:
raise ex.UpdateFailedException(
jb_id,
_("The url for JobBinary Id '%s' can not "
"be updated because it is an internal-db url."))
jobs = job_execution_get_all(context)
pending_jobs = [job for job in jobs if
job.info["status"] == "PENDING"]
if len(pending_jobs) > 0:
for job in pending_jobs:
if _check_job_binary_referenced(
context, session, jb_id, job.job_id):
raise ex.UpdateFailedException(
jb_id,
_("JobBinary Id '%s' is used in a PENDING job "
"and can not be updated."))
jb.update(values)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for JobBinary: %s") % e.columns)
return jb
def _check_job_binary_referenced(ctx, session, job_binary_id, job_id=None):
args = {"JobBinary_id": job_binary_id}
if job_id:
args["Job_id"] = job_id
mains = model_query(m.mains_association, ctx, session,
project_only=False).filter_by(**args)
libs = model_query(m.libs_association, ctx, session,
project_only=False).filter_by(**args)
return mains.first() is not None or libs.first() is not None
def job_binary_destroy(context, job_binary_id):
session = get_session()
with session.begin():
job_binary = _job_binary_get(context, session, job_binary_id)
if not job_binary:
raise ex.NotFoundException(job_binary_id,
_("JobBinary id '%s' not found!"))
validate.check_tenant_for_delete(context, job_binary)
validate.check_protected_from_delete(job_binary)
if _check_job_binary_referenced(context, session, job_binary_id):
raise ex.DeletionFailed(
_("JobBinary is referenced and cannot be deleted"))
session.delete(job_binary)
# JobBinaryInternal ops
def _job_binary_internal_get(context, session, job_binary_internal_id):
query = model_query(m.JobBinaryInternal, context, session)
return query.filter_by(id=job_binary_internal_id).first()
def job_binary_internal_get_all(context, regex_search=False, limit=None,
marker=None, sort_by=None, **kwargs):
"""Returns JobBinaryInternal objects that do not contain a data field
The data column uses deferred loading.
"""
sort_by, order = _parse_sorting_args(sort_by)
regex_cols = ['name']
query = model_query(m.JobBinaryInternal, context)
if regex_search:
query, kwargs = regex_filter(query,
m.JobBinaryInternal, regex_cols, kwargs)
limit = int(limit) if limit else None
marker = job_binary_internal_get(context, marker)
prev_marker, next_marker = _get_prev_and_next_objects(
query.filter_by(**kwargs).order_by(sort_by).all(),
limit, marker, order=order)
result = utils.paginate_query(query.filter_by(**kwargs),
m.JobBinaryInternal, limit,
[sort_by], marker, order)
return types.Page(result, prev_marker, next_marker)
def job_binary_internal_get(context, job_binary_internal_id):
"""Returns a JobBinaryInternal object that does not contain a data field
The data column uses deferred loading.
"""
return _job_binary_internal_get(context, get_session(),
job_binary_internal_id)
def job_binary_internal_get_raw_data(context, job_binary_internal_id):
"""Returns only the data field for the specified JobBinaryInternal."""
query = model_query(m.JobBinaryInternal, context)
res = query.filter_by(id=job_binary_internal_id).first()
if res is not None:
datasize_KB = res.datasize / 1024.0
if datasize_KB > CONF.job_binary_max_KB:
raise ex.DataTooBigException(
round(datasize_KB, 1), CONF.job_binary_max_KB,
_("Size of internal binary (%(size)sKB) is greater than the "
"maximum (%(maximum)sKB)"))
# This assignment is sufficient to load the deferred column
res = res.data
return res
def job_binary_internal_create(context, values):
"""Returns a JobBinaryInternal that does not contain a data field
The data column uses deferred loading.
"""
values["datasize"] = len(values["data"])
datasize_KB = values["datasize"] / 1024.0
if datasize_KB > CONF.job_binary_max_KB:
raise ex.DataTooBigException(
round(datasize_KB, 1), CONF.job_binary_max_KB,
_("Size of internal binary (%(size)sKB) is greater "
"than the maximum (%(maximum)sKB)"))
job_binary_int = m.JobBinaryInternal()
job_binary_int.update(values)
session = get_session()
try:
with session.begin():
session.add(job_binary_int)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for JobBinaryInternal: %s") % e.columns)
return job_binary_internal_get(context, job_binary_int.id)
def job_binary_internal_destroy(context, job_binary_internal_id):
session = get_session()
with session.begin():
job_binary_internal = _job_binary_internal_get(context, session,
job_binary_internal_id)
if not job_binary_internal:
raise ex.NotFoundException(
job_binary_internal_id,
_("JobBinaryInternal id '%s' not found!"))
validate.check_tenant_for_delete(context, job_binary_internal)
validate.check_protected_from_delete(job_binary_internal)
session.delete(job_binary_internal)
def job_binary_internal_update(context, job_binary_internal_id, values):
"""Returns a JobBinary updated with the provided values."""
session = get_session()
try:
with session.begin():
j_b_i = _job_binary_internal_get(
context, session, job_binary_internal_id)
if not j_b_i:
raise ex.NotFoundException(
job_binary_internal_id,
_("JobBinaryInternal id '%s' not found!"))
validate.check_tenant_for_update(context, j_b_i)
validate.check_protected_from_update(j_b_i, values)
j_b_i.update(values)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for JobBinaryInternal: %s") % e.columns)
return j_b_i
# Events ops
def _cluster_provision_step_get(context, session, provision_step_id):
query = model_query(m.ClusterProvisionStep, context, session)
return query.filter_by(id=provision_step_id).first()
def _cluster_provision_step_update(context, session, step_id):
step = _cluster_provision_step_get(context, session, step_id)
if step is None:
raise ex.NotFoundException(
step_id,
_("Cluster Provision Step id '%s' not found!"))
if step.successful is not None:
return
if len(step.events) == step.total:
for event in step.events:
session.delete(event)
step.update({'successful': True})
def cluster_provision_step_add(context, cluster_id, values):
session = get_session()
with session.begin():
cluster = _cluster_get(context, session, cluster_id)
if not cluster:
raise ex.NotFoundException(cluster_id,
_("Cluster id '%s' not found!"))
provision_step = m.ClusterProvisionStep()
values['cluster_id'] = cluster_id
values['tenant_id'] = context.tenant_id
provision_step.update(values)
session.add(provision_step)
return provision_step.id
def cluster_provision_step_update(context, step_id):
if CONF.disable_event_log:
return
session = get_session()
with session.begin():
_cluster_provision_step_update(context, session, step_id)
def cluster_provision_progress_update(context, cluster_id):
if CONF.disable_event_log:
return _cluster_get(context, get_session(), cluster_id)
session = get_session()
with session.begin():
cluster = _cluster_get(context, session, cluster_id)
if cluster is None:
raise ex.NotFoundException(cluster_id,
_("Cluster id '%s' not found!"))
for step in cluster.provision_progress:
if step.successful is None:
_cluster_provision_step_update(context, session, step.id)
result_cluster = _cluster_get(context, session, cluster_id)
return result_cluster
def cluster_event_add(context, step_id, values):
session = get_session()
with session.begin():
provision_step = _cluster_provision_step_get(
context, session, step_id)
if not provision_step:
raise ex.NotFoundException(
step_id,
_("Cluster Provision Step id '%s' not found!"))
event = m.ClusterEvent()
values['step_id'] = step_id
if not values['successful']:
provision_step.update({'successful': False})
event.update(values)
session.add(event)
return event.id
# Cluster verifications / health check ops
def _cluster_verification_get(context, session, verification_id):
# tenant id is not presented
query = model_query(m.ClusterVerification, context, session,
project_only=False)
return query.filter_by(id=verification_id).first()
def cluster_verification_get(context, verification_id):
return _cluster_verification_get(context, get_session(), verification_id)
def cluster_verification_add(context, cluster_id, values):
session = get_session()
with session.begin():
cluster = _cluster_get(context, session, cluster_id)
if not cluster:
raise ex.NotFoundException(
cluster_id, _("Cluster id '%s' not found!"))
verification = m.ClusterVerification()
values['cluster_id'] = cluster_id
verification.update(values)
session.add(verification)
return verification
def cluster_verification_update(context, verification_id, values):
session = get_session()
with session.begin():
verification = _cluster_verification_get(
context, session, verification_id)
if not verification:
raise ex.NotFoundException(
verification_id, _("Verification id '%s' not found!"))
verification.update(values)
return verification
def cluster_verification_delete(context, verification_id):
session = get_session()
with session.begin():
verification = _cluster_verification_get(
context, session, verification_id)
if not verification:
raise ex.NotFoundException(
verification_id, _("Verification id '%s' not found!"))
for check in verification.checks:
session.delete(check)
session.delete(verification)
def _cluster_health_check_get(context, session, health_check_id):
# tenant id is not presented
query = model_query(m.ClusterHealthCheck, context, session,
project_only=False)
return query.filter_by(id=health_check_id).first()
def cluster_health_check_get(context, health_check_id):
return _cluster_health_check_get(context, get_session(), health_check_id)
def cluster_health_check_add(context, verification_id, values):
session = get_session()
with session.begin():
verification = _cluster_verification_get(
context, session, verification_id)
if not verification:
raise ex.NotFoundException(
verification_id, _("Verification id '%s' not found!"))
health_check = m.ClusterHealthCheck()
values['verification_id'] = verification_id
values['tenant_id'] = context.tenant_id
health_check.update(values)
session.add(health_check)
return health_check
def cluster_health_check_update(context, health_check_id, values):
session = get_session()
with session.begin():
health_check = _cluster_health_check_get(
context, session, health_check_id)
if not health_check:
raise ex.NotFoundException(
health_check_id, _("Health check id '%s' not found!"))
health_check.update(values)
return health_check
def _plugin_get(context, session, name):
query = model_query(m.PluginData, context, session)
return query.filter_by(name=name).first()
def plugin_get(context, name):
session = get_session()
with session.begin():
data = _plugin_get(context, session, name)
return data
def plugin_create(context, values):
session = get_session()
with session.begin():
plugin = m.PluginData()
values['tenant_id'] = context.tenant_id
plugin.update(values)
session.add(plugin)
return plugin
def plugin_get_all(context):
query = model_query(m.PluginData, context)
return query.all()
def plugin_update(context, name, values):
session = get_session()
with session.begin():
plugin = _plugin_get(context, session, name)
if not plugin:
raise ex.NotFoundException(name, _("Plugin name '%s' not found!"))
plugin.update(values)
return plugin
def plugin_remove(context, name):
session = get_session()
with session.begin():
plugin = _plugin_get(context, session, name)
if not plugin:
raise ex.NotFoundException(name, _("Plugin name '%s' not found!"))
session.delete(plugin)
|
|
import ast
import datetime
import json
import logging
import copy
from django.http import HttpResponse
from multiprocessing import Process
from threading import Thread, local
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from multiprocessing.pool import Pool, ThreadPool
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import render_to_response
from django.template import RequestContext
import crits.services
from crits.core.class_mapper import class_from_type, class_from_id
from crits.core.crits_mongoengine import json_handler
from crits.core.handlers import build_jtable, csv_export
from crits.core.handlers import jtable_ajax_list, jtable_ajax_delete
from crits.core.user_tools import user_sources
from crits.services.analysis_result import AnalysisResult, AnalysisConfig
from crits.services.analysis_result import EmbeddedAnalysisResultLog
from crits.services.core import ServiceConfigError, AnalysisTask
from crits.services.service import CRITsService
logger = logging.getLogger(__name__)
def generate_analysis_results_csv(request):
"""
Generate a CSV file of the Analysis Results information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,AnalysisResult)
return response
def generate_analysis_results_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = AnalysisResult
type_ = "analysis_result"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Analysis Results",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.services.views.%ss_listing' % type_,
args=('jtlist',)),
'deleteurl': reverse('crits.services.views.%ss_listing' % type_,
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def service_work_handler(service_instance, final_config):
"""
Handles a unit of work for a service by calling the service's "execute"
method. This function is generally called by processes/threads. Also
this function is needed because it is picklable and passing in the
service_instance.execute method is not picklable because it is an
instance method.
:param service_instance: The service instance that the work will be performed in
:type service_instance: crits.services.core.Service
:param service_instance: The service's configuration settings
:type service_instance: dict
"""
service_instance.execute(final_config)
def run_service(name, type_, id_, user, obj=None,
execute='local', custom_config={}, **kwargs):
"""
Run a service.
:param name: The name of the service to run.
:type name: str
:param type_: The type of the object.
:type type_: str
:param id_: The identifier of the object.
:type id_: str
:param user: The user running the service.
:type user: str
:param obj: The CRITs object, if given this overrides crits_type and identifier.
:type obj: CRITs object.
:param analyst: The user updating the results.
:type analyst: str
:param execute: The execution type.
:type execute: str
:param custom_config: Use a custom configuration for this run.
:type custom_config: dict
"""
result = {'success': False}
if type_ not in settings.CRITS_TYPES:
result['html'] = "Unknown CRITs type."
return result
if name not in enabled_services():
result['html'] = "Service %s is unknown or not enabled." % name
return result
service_class = crits.services.manager.get_service_class(name)
if not service_class:
result['html'] = "Unable to get service class."
return result
if not obj:
obj = class_from_id(type_, id_)
if not obj:
result['html'] = 'Could not find object.'
return result
service = CRITsService.objects(name=name).first()
if not service:
result['html'] = "Unable to find service in database."
return result
# See if the object is a supported type for the service.
if not service_class.supported_for_type(type_):
result['html'] = "Service not supported for type '%s'" % type_
return result
# When running in threaded mode, each thread needs to have its own copy of
# the object. If we do not do this then one thread may read() from the
# object (to get the binary) and then the second would would read() without
# knowing and get undefined behavior as the file pointer would be who knows
# where. By giving each thread a local copy they can operate independently.
#
# When not running in thread mode this has no effect except wasted memory.
local_obj = local()
local_obj.obj = copy.deepcopy(obj)
# Give the service a chance to check for required fields.
try:
service_class.valid_for(local_obj.obj)
if hasattr(local_obj.obj, 'filedata') and local_obj.obj.filedata:
# Reset back to the start so the service gets the full file.
local_obj.obj.filedata.seek(0)
except ServiceConfigError as e:
result['html'] = str(e)
return result
# Get the config from the database and validate the submitted options
# exist.
db_config = service.config.to_dict()
try:
service_class.validate_runtime(custom_config, db_config)
except ServiceConfigError as e:
result['html'] = str(e)
return result
final_config = db_config
# Merge the submitted config with the one from the database.
# This is because not all config options may be submitted.
final_config.update(custom_config)
form = service_class.bind_runtime_form(user, final_config)
if form:
if not form.is_valid():
# TODO: return corrected form via AJAX
result['html'] = str(form.errors)
return result
# If the form is valid, create the config using the cleaned data.
final_config = db_config
final_config.update(form.cleaned_data)
logger.info("Running %s on %s, execute=%s" % (name, local_obj.obj.id, execute))
service_instance = service_class(notify=update_analysis_results,
complete=finish_task)
# Give the service a chance to modify the config that gets saved to the DB.
saved_config = dict(final_config)
service_class.save_runtime_config(saved_config)
task = AnalysisTask(local_obj.obj, service_instance, user)
task.config = AnalysisConfig(**saved_config)
task.start()
add_task(task)
service_instance.set_task(task)
if execute == 'process':
p = Process(target=service_instance.execute, args=(final_config,))
p.start()
elif execute == 'thread':
t = Thread(target=service_instance.execute, args=(final_config,))
t.start()
elif execute == 'process_pool':
if __service_process_pool__ is not None and service.compatability_mode != True:
__service_process_pool__.apply_async(func=service_work_handler,
args=(service_instance, final_config,))
else:
logger.warning("Could not run %s on %s, execute=%s, running in process mode" % (name, local_obj.obj.id, execute))
p = Process(target=service_instance.execute, args=(final_config,))
p.start()
elif execute == 'thread_pool':
if __service_thread_pool__ is not None and service.compatability_mode != True:
__service_thread_pool__.apply_async(func=service_work_handler,
args=(service_instance, final_config,))
else:
logger.warning("Could not run %s on %s, execute=%s, running in thread mode" % (name, local_obj.obj.id, execute))
t = Thread(target=service_instance.execute, args=(final_config,))
t.start()
elif execute == 'local':
service_instance.execute(final_config)
# Return after starting thread so web request can complete.
result['success'] = True
return result
def add_task(task):
"""
Add a new task.
"""
logger.debug("Adding task %s" % task)
insert_analysis_results(task)
def run_triage(obj, user):
"""
Run all services marked as triage against this top-level object.
:param obj: The CRITs top-level object class.
:type obj: Class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param user: The user requesting the services to be run.
:type user: str
"""
services = triage_services()
for service_name in services:
try:
run_service(service_name,
obj._meta['crits_type'],
obj.id,
user,
obj=obj,
execute=settings.SERVICE_MODEL)
except:
pass
return
def add_result(object_type, object_id, analysis_id, result, type_, subtype,
analyst):
"""
add_results wrapper for a single result.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param result: The result to append.
:type result: str
:param type_: The result type.
:type type_: str
:param subtype: The result subtype.
:type subtype: str
:param analyst: The user updating the results.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
return add_results(object_type, object_id, analysis_id, [result], [type_],
[subtype], analyst)
def add_results(object_type, object_id, analysis_id, result, type_, subtype,
analyst):
"""
Add multiple results to an analysis task.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param result: The list of result to append.
:type result: list of str
:param type_: The list of result types.
:type type_: list of str
:param subtype: The list of result subtypes.
:type subtype: list of str
:param analyst: The user updating the results.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
res = {'success': False}
if not object_type or not object_id or not analysis_id:
res['message'] = "Must supply object id/type and analysis id."
return res
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
sources = user_sources(analyst)
obj = klass.objects(id=object_id, source__name__in=sources).first()
if not obj:
res['message'] = "Could not find object to add results to."
return res
if not(result and type_ and subtype):
res['message'] = "Need a result, type, and subtype to add a result."
return res
if not(len(result) == len(type_) == len(subtype)):
res['message'] = "result, type, and subtype need to be the same length."
return res
# Update analysis results
final_list = []
for key, r in enumerate(result):
final = {}
final['subtype'] = subtype[key]
final['result'] = r
tmp = ast.literal_eval(type_[key])
for k in tmp:
final[k] = tmp[k]
final_list.append(final)
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(push_all__results=final_list)
res['success'] = True
return res
def add_log(object_type, object_id, analysis_id, log_message, level, analyst):
"""
Add a log entry to an analysis task.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param log_message: The log entry to append.
:type log_message: dict
:param level: The log level.
:type level: str
:param analyst: The user updating the log.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
results = {'success': False}
if not object_type or not object_id or not analysis_id:
results['message'] = "Must supply object id/type and analysis id."
return results
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
sources = user_sources(analyst)
obj = klass.objects(id=object_id, source__name__in=sources).first()
if not obj:
results['message'] = "Could not find object to add results to."
return results
# Update analysis log
le = EmbeddedAnalysisResultLog()
le.message = log_message
le.level = level
le.datetime = str(datetime.datetime.now())
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(push__log=le)
results['success'] = True
else:
results['message'] = "Could not find task to add log to."
return results
def finish_task(object_type, object_id, analysis_id, status, analyst):
"""
Finish a task by setting its status to "completed" and setting the finish
date.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param status: The status of the task.
:type status: str ("error", "completed")
:param analyst: The user updating the log.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
results = {'success': False}
if not status:
status = "completed"
if status not in ('error', 'completed'):
status = "completed"
if not object_type or not object_id or not analysis_id:
results['message'] = "Must supply object id/type and analysis id."
return results
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
sources = user_sources(analyst)
obj = klass.objects(id=object_id, source__name__in=sources).first()
if not obj:
results['message'] = "Could not find object to add results to."
return results
# Update analysis log
date = str(datetime.datetime.now())
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(set__status=status,
set__finish_date=date)
results['success'] = True
return results
def update_config(service_name, config, analyst):
"""
Update the configuration for a service.
"""
service = CRITsService.objects(name=service_name).first()
service.config = AnalysisConfig(**config)
try:
#TODO: get/validate the config from service author to set status
#update_status(service_name)
service.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def get_service_config(name):
status = {'success': False}
service = CRITsService.objects(name=name, status__ne="unavailable").first()
if not service:
status['error'] = 'Service "%s" is unavailable. Please review error logs.' % name
return status
config = service.config.to_dict()
service_class = crits.services.manager.get_service_class(name)
if not service_class:
status['error'] = 'Service "%s" is unavilable. Please review error logs.' % name
return status
display_config = service_class.get_config_details(config)
status['config'] = display_config
status['config_error'] = _get_config_error(service)
# TODO: fix code so we don't have to do this
status['service'] = service.to_dict()
status['success'] = True
return status
def _get_config_error(service):
"""
Return a string describing the error in the service configuration.
Returns None if there are no errors.
"""
error = None
name = service['name']
config = service['config']
if service['status'] == 'misconfigured':
service_class = crits.services.manager.get_service_class(name)
try:
service_class.parse_config(config.to_dict())
except Exception as e:
error = str(e)
return error
def do_edit_config(name, analyst, post_data=None):
status = {'success': False}
service = CRITsService.objects(name=name, status__ne="unavailable").first()
if not service:
status['config_error'] = 'Service "%s" is unavailable. Please review error logs.' % name
status['form'] = ''
status['service'] = ''
return status
# Get the class that implements this service.
service_class = crits.services.manager.get_service_class(name)
config = service.config.to_dict()
cfg_form, html = service_class.generate_config_form(config)
# This isn't a form object. It's the HTML.
status['form'] = html
status['service'] = service
if post_data:
#Populate the form with values from the POST request
form = cfg_form(post_data)
if form.is_valid():
try:
service_class.parse_config(form.cleaned_data)
except ServiceConfigError as e:
service.status = 'misconfigured'
service.save()
status['config_error'] = str(e)
return status
result = update_config(name, form.cleaned_data, analyst)
if not result['success']:
return status
service.status = 'available'
service.save()
else:
status['config_error'] = form.errors
return status
status['success'] = True
return status
def get_config(service_name):
"""
Get the configuration for a service.
"""
service = CRITsService.objects(name=service_name).first()
if not service:
return None
return service.config
def set_enabled(service_name, enabled=True, analyst=None):
"""
Enable/disable a service in CRITs.
"""
if enabled:
logger.info("Enabling: %s" % service_name)
else:
logger.info("Disabling: %s" % service_name)
service = CRITsService.objects(name=service_name).first()
service.enabled = enabled
try:
service.save(username=analyst)
if enabled:
url = reverse('crits.services.views.disable', args=(service_name,))
else:
url = reverse('crits.services.views.enable', args=(service_name,))
return {'success': True, 'url': url}
except ValidationError, e:
return {'success': False, 'message': e}
def set_triage(service_name, enabled=True, analyst=None):
"""
Enable/disable a service for running on triage (upload).
"""
if enabled:
logger.info("Enabling triage: %s" % service_name)
else:
logger.info("Disabling triage: %s" % service_name)
service = CRITsService.objects(name=service_name).first()
service.run_on_triage = enabled
try:
service.save(username=analyst)
if enabled:
url = reverse('crits.services.views.disable_triage',
args=(service_name,))
else:
url = reverse('crits.services.views.enable_triage',
args=(service_name,))
return {'success': True, 'url': url}
except ValidationError, e:
return {'success': False,
'message': e}
def enabled_services(status=True):
"""
Return names of services which are enabled.
"""
if status:
services = CRITsService.objects(enabled=True,
status="available")
else:
services = CRITsService.objects(enabled=True)
return [s.name for s in services]
def get_supported_services(crits_type):
"""
Get the supported services for a type.
"""
services = CRITsService.objects(enabled=True)
for s in services:
if s.supported_types == 'all' or crits_type in s.supported_types:
yield s.name
def triage_services(status=True):
"""
Return names of services set to run on triage.
"""
if status:
services = CRITsService.objects(run_on_triage=True,
status="available")
else:
services = CRITsService.objects(run_on_triage=True)
return [s.name for s in services]
def delete_analysis(task_id, analyst):
"""
Delete analysis results.
"""
ar = AnalysisResult.objects(id=task_id).first()
if ar:
ar.delete(username=analyst)
def insert_analysis_results(task):
"""
Insert analysis results for this task.
"""
ar = AnalysisResult()
tdict = task.to_dict()
tdict['analysis_id'] = tdict['id']
del tdict['id']
ar.merge(arg_dict=tdict)
ar.save()
def update_analysis_results(task):
"""
Update analysis results for this task.
"""
# If the task does not currently exist for the given sample in the
# database, add it.
found = False
ar = AnalysisResult.objects(analysis_id=task.task_id).first()
if ar:
found = True
if not found:
logger.warning("Tried to update a task that didn't exist.")
insert_analysis_results(task)
else:
# Otherwise, update it.
tdict = task.to_dict()
tdict['analysis_id'] = tdict['id']
del tdict['id']
#TODO: find a better way to do this.
new_dict = {}
for k in tdict.iterkeys():
new_dict['set__%s' % k] = tdict[k]
AnalysisResult.objects(id=ar.id).update_one(**new_dict)
# The service pools need to be defined down here because the functions
# that are used by the services must already be defined.
if settings.SERVICE_MODEL == 'thread_pool':
__service_thread_pool__ = ThreadPool(processes=settings.SERVICE_POOL_SIZE)
__service_process_pool__ = None
elif settings.SERVICE_MODEL == 'process_pool':
__service_thread_pool__ = None
__service_process_pool__ = Pool(processes=settings.SERVICE_POOL_SIZE)
else:
__service_thread_pool__ = None
__service_process_pool__ = None
|
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver library for NetApp C-mode block storage systems.
"""
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
QOS_CLEANUP_INTERVAL_SECONDS = 60
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
@six.add_metaclass(utils.TraceWrapperMetaclass)
class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
"""NetApp block storage library for Data ONTAP (Cluster-mode)."""
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
def __init__(self, driver_name, driver_protocol, **kwargs):
super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name,
driver_protocol,
**kwargs)
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
self.driver_mode = 'cluster'
def do_setup(self, context):
super(NetAppBlockStorageCmodeLibrary, self).do_setup(context)
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
self.vserver = self.configuration.netapp_vserver
self.zapi_client = client_cmode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vserver=self.vserver)
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
self.ssc_library = capabilities.CapabilitiesLibrary(
self.driver_protocol, self.vserver, self.zapi_client,
self.configuration)
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
self.ssc_library.check_api_permissions()
if not self._get_flexvol_to_pool_map():
msg = _('No pools are available for provisioning volumes. '
'Ensure that the configuration option '
'netapp_pool_name_search_pattern is set correctly.')
raise exception.NetAppDriverException(msg)
super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
self._start_periodic_tasks()
def _start_periodic_tasks(self):
# Note(cknight): Run the task once in the current thread to prevent a
# race with the first invocation of _update_volume_stats.
self._update_ssc()
# Start the task that updates the slow-changing storage service catalog
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc)
ssc_periodic_task.start(
interval=SSC_UPDATE_INTERVAL_SECONDS,
initial_delay=SSC_UPDATE_INTERVAL_SECONDS)
# Start the task that harvests soft-deleted QoS policy groups.
harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall(
self.zapi_client.remove_unused_qos_policy_groups)
harvest_qos_periodic_task.start(
interval=QOS_CLEANUP_INTERVAL_SECONDS,
initial_delay=QOS_CLEANUP_INTERVAL_SECONDS)
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
self.zapi_client.create_lun(
volume_name, lun_name, size, metadata, qos_policy_group_name)
def _create_lun_handle(self, metadata):
"""Returns LUN handle based on filer type."""
return '%s:%s' % (self.vserver, metadata['Path'])
def _find_mapped_lun_igroup(self, path, initiator_list):
"""Find an igroup for a LUN mapped to the given initiator(s)."""
initiator_igroups = self.zapi_client.get_igroup_by_initiators(
initiator_list)
lun_maps = self.zapi_client.get_lun_map(path)
if initiator_igroups and lun_maps:
for igroup in initiator_igroups:
igroup_name = igroup['initiator-group-name']
if igroup_name.startswith(na_utils.OPENSTACK_PREFIX):
for lun_map in lun_maps:
if lun_map['initiator-group'] == igroup_name:
return igroup_name, lun_map['lun-id']
return None, None
def _clone_lun(self, name, new_name, space_reserved=None,
qos_policy_group_name=None, src_block=0, dest_block=0,
block_count=0, source_snapshot=None, is_snapshot=False):
"""Clone LUN with the given handle to the new name."""
if not space_reserved:
space_reserved = self.lun_space_reservation
metadata = self._get_lun_attr(name, 'metadata')
volume = metadata['Volume']
self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
qos_policy_group_name=qos_policy_group_name,
src_block=src_block, dest_block=dest_block,
block_count=block_count,
source_snapshot=source_snapshot,
is_snapshot=is_snapshot)
LOG.debug("Cloned LUN with new name %s", new_name)
lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
path='/vol/%s/%s'
% (volume, new_name))
if len(lun) == 0:
msg = _("No cloned LUN named %s found on the filer")
raise exception.VolumeBackendAPIException(data=msg % new_name)
clone_meta = self._create_lun_meta(lun[0])
self._add_lun_to_table(
block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'],
clone_meta['Path']),
new_name,
lun[0].get_child_content('size'),
clone_meta))
def _create_lun_meta(self, lun):
"""Creates LUN metadata dictionary."""
self.zapi_client.check_is_naelement(lun)
meta_dict = {}
meta_dict['Vserver'] = lun.get_child_content('vserver')
meta_dict['Volume'] = lun.get_child_content('volume')
meta_dict['Qtree'] = lun.get_child_content('qtree')
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = \
lun.get_child_content('is-space-reservation-enabled')
meta_dict['UUID'] = lun.get_child_content('uuid')
return meta_dict
def _get_fc_target_wwpns(self, include_partner=True):
return self.zapi_client.get_fc_target_wwpns()
def _update_volume_stats(self, filter_function=None,
goodness_function=None):
"""Retrieve stats info from vserver."""
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.driver_name
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.driver_protocol
data['pools'] = self._get_pool_stats(
filter_function=filter_function,
goodness_function=goodness_function)
data['sparse_copy_volume'] = True
self.zapi_client.provide_ems(self, self.driver_name, self.app_version)
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (Data ONTAP flexvol) stats.
Pool statistics are assembled from static driver capabilities, the
Storage Service Catalog of flexvol attributes, and real-time capacity
and controller utilization metrics. The pool name is the flexvol name.
"""
pools = []
ssc = self.ssc_library.get_ssc()
if not ssc:
return pools
# Get up-to-date node utilization metrics just once
self.perf_library.update_performance_cache(ssc)
# Get up-to-date aggregate capacities just once
aggregates = self.ssc_library.get_ssc_aggregates()
aggr_capacities = self.zapi_client.get_aggregate_capacities(aggregates)
for ssc_vol_name, ssc_vol_info in ssc.items():
pool = dict()
# Add storage service catalog data
pool.update(ssc_vol_info)
# Add driver capabilities and config info
pool['QoS_support'] = True
pool['consistencygroup_support'] = True
pool['reserved_percentage'] = self.reserved_percentage
pool['max_over_subscription_ratio'] = (
self.max_over_subscription_ratio)
# Add up-to-date capacity info
capacity = self.zapi_client.get_flexvol_capacity(
flexvol_name=ssc_vol_name)
size_total_gb = capacity['size-total'] / units.Gi
pool['total_capacity_gb'] = na_utils.round_down(size_total_gb)
size_available_gb = capacity['size-available'] / units.Gi
pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)
pool['provisioned_capacity_gb'] = round(
pool['total_capacity_gb'] - pool['free_capacity_gb'], 2)
aggregate_name = ssc_vol_info.get('aggregate')
aggr_capacity = aggr_capacities.get(aggregate_name, {})
pool['aggregate_used_percent'] = aggr_capacity.get(
'percent-used', 0)
# Add utilization data
utilization = self.perf_library.get_node_utilization_for_pool(
ssc_vol_name)
pool['utilization'] = na_utils.round_down(utilization)
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pools.append(pool)
return pools
def _update_ssc(self):
"""Refresh the storage service catalog with the latest set of pools."""
self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
def _get_flexvol_to_pool_map(self):
"""Get the flexvols that match the pool name search pattern.
The map is of the format suitable for seeding the storage service
catalog: {<flexvol_name> : {'pool_name': <flexvol_name>}}
"""
pool_regex = na_utils.get_pool_name_filter_regex(self.configuration)
pools = {}
flexvol_names = self.zapi_client.list_flexvols()
for flexvol_name in flexvol_names:
msg_args = {
'flexvol': flexvol_name,
'vol_pattern': pool_regex.pattern,
}
if pool_regex.match(flexvol_name):
msg = "Volume '%(flexvol)s' matches %(vol_pattern)s"
LOG.debug(msg, msg_args)
pools[flexvol_name] = {'pool_name': flexvol_name}
else:
msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s"
LOG.debug(msg, msg_args)
return pools
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume)
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume)
except exception.Invalid:
# Delete even if there was invalid qos policy specified for the
# volume.
qos_policy_group_info = None
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s'
LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info})
def _get_preferred_target_from_list(self, target_details_list,
filter=None):
# cDOT iSCSI LIFs do not migrate from controller to controller
# in failover. Rather, an iSCSI LIF must be configured on each
# controller and the initiator has to take responsibility for
# using a LIF that is UP. In failover, the iSCSI LIF on the
# downed controller goes DOWN until the controller comes back up.
#
# Currently Nova only accepts a single target when obtaining
# target details from Cinder, so we pass back the first portal
# with an UP iSCSI LIF. There are plans to have Nova accept
# and try multiple targets. When that happens, we can and should
# remove this filter and return all targets since their operational
# state could change between the time we test here and the time
# Nova uses the target.
operational_addresses = (
self.zapi_client.get_operational_lif_addresses())
return (super(NetAppBlockStorageCmodeLibrary, self)
._get_preferred_target_from_list(target_details_list,
filter=operational_addresses))
def _setup_qos_for_volume(self, volume, extra_specs):
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume, extra_specs)
except exception.Invalid:
msg = _('Invalid QoS specification detected while getting QoS '
'policy for volume %s') % volume['id']
raise exception.VolumeBackendAPIException(data=msg)
self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
return qos_policy_group_info
def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
self.zapi_client.mark_qos_policy_group_for_deletion(
qos_policy_group_info)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
"""
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume)
except exception.Invalid:
# Unmanage even if there was invalid qos policy specified for the
# volume.
qos_policy_group_info = None
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume)
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import re
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the'
' relevant server API calls such as create, rebuild'
' or rescue, If the hypervisor does not support'
' password injection then the password returned will'
' not be correct'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
XML_WARNING = False
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
global XML_WARNING
if not XML_WARNING:
LOG.warn(_LW('XML support has been deprecated and may be removed '
'as early as the Juno release.'))
XML_WARNING = True
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request."""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6", "key_name",
"availability_zone", "min_count", "max_count"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
res_id = server_node.getAttribute('return_reservation_id')
if res_id:
server['return_reservation_id'] = \
strutils.bool_from_string(res_id)
scheduler_hints = self._extract_scheduler_hints(server_node)
if scheduler_hints:
server['OS-SCH-HNT:scheduler_hints'] = scheduler_hints
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
user_data_node = self.find_first_child_named(server_node, "user_data")
if user_data_node is not None:
server["user_data"] = self.extract_text(user_data_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
# NOTE(vish): this is not namespaced in json, so leave it without a
# namespace for now
block_device_mapping = self._extract_block_device_mapping(server_node)
if block_device_mapping is not None:
server["block_device_mapping"] = block_device_mapping
block_device_mapping_v2 = self._extract_block_device_mapping_v2(
server_node)
if block_device_mapping_v2 is not None:
server["block_device_mapping_v2"] = block_device_mapping_v2
# NOTE(vish): Support this incorrect version because it was in the code
# base for a while and we don't want to accidentally break
# anyone that might be using it.
auto_disk_config = server_node.getAttribute('auto_disk_config')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
auto_disk_config = server_node.getAttribute('OS-DCF:diskConfig')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
config_drive = server_node.getAttribute('config_drive')
if config_drive:
server['config_drive'] = config_drive
return server
def _extract_block_device_mapping(self, server_node):
"""Marshal the block_device_mapping node of a parsed request."""
node = self.find_first_child_named(server_node, "block_device_mapping")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
mapping = {}
attributes = ["volume_id", "snapshot_id", "device_name",
"virtual_name", "volume_size"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = value
attributes = ["delete_on_termination", "no_device"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = strutils.bool_from_string(value)
block_device_mapping.append(mapping)
return block_device_mapping
else:
return None
def _extract_block_device_mapping_v2(self, server_node):
"""Marshal the new block_device_mappings."""
node = self.find_first_child_named(server_node,
"block_device_mapping_v2")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
block_device_mapping.append(
dict((attr, child.getAttribute(attr))
for attr in block_device.bdm_new_api_fields
if child.getAttribute(attr)))
return block_device_mapping
def _extract_scheduler_hints(self, server_node):
"""Marshal the scheduler hints attribute of a parsed request."""
node = self.find_first_child_named_in_namespace(server_node,
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request."""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
if network_node.hasAttribute("port"):
item["port"] = network_node.getAttribute("port")
networks.append(item)
return networks
else:
return None
def _extract_security_groups(self, server_node):
"""Marshal the security_groups attribute of a parsed request."""
node = self.find_first_child_named(server_node, "security_groups")
if node is not None:
security_groups = []
for sg_node in self.find_children_named(node, "security_group"):
item = {}
name = self.find_attribute_or_element(sg_node, 'name')
if name:
item["name"] = name
security_groups.append(item)
return security_groups
else:
return None
class ActionDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = xmlutil.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'createImage': self._action_create_image,
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_change_password(self, node):
if not node.hasAttribute("adminPass"):
raise AttributeError("No adminPass was specified in request")
return {"adminPass": node.getAttribute("adminPass")}
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
name = node.getAttribute("name")
if not name:
raise AttributeError("Name cannot be blank")
rebuild['name'] = name
if node.hasAttribute("auto_disk_config"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(node)
if personality is not None:
rebuild["personality"] = personality
if not node.hasAttribute("imageRef"):
raise AttributeError("No imageRef was specified in request")
rebuild["imageRef"] = node.getAttribute("imageRef")
if node.hasAttribute("adminPass"):
rebuild["adminPass"] = node.getAttribute("adminPass")
if node.hasAttribute("accessIPv4"):
rebuild["accessIPv4"] = node.getAttribute("accessIPv4")
if node.hasAttribute("accessIPv6"):
rebuild["accessIPv6"] = node.getAttribute("accessIPv6")
if node.hasAttribute("preserve_ephemeral"):
rebuild["preserve_ephemeral"] = strutils.bool_from_string(
node.getAttribute("preserve_ephemeral"), strict=True)
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavorRef"):
resize["flavorRef"] = node.getAttribute("flavorRef")
else:
raise AttributeError("No flavorRef was specified in request")
if node.hasAttribute("auto_disk_config"):
resize['OS-DCF:diskConfig'] = node.getAttribute("auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
resize['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request."""
dom = xmlutil.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class Controller(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, ext_mgr=None, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
search_opts.pop('status', None)
if 'status' in req.GET.keys():
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(str(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts,
limit=limit,
marker=marker,
want_objects=True)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found", search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = objects.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_injected_files(self, personality):
"""Create a list of injected files from the personality attribute.
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
if self._decode_base64(contents) is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument : port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ip_address(
address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
# For neutronv2, requested_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if utils.is_neutron():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def _validate_user_data(self, user_data):
"""Check if the user_data is encoded properly."""
if not user_data:
return
if self._decode_base64(user_data) is None:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv4(self, address):
if not utils.is_valid_ipv4(address):
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
if not utils.is_valid_ipv6(address):
expl = _('accessIPv6 is not proper IPv6 format')
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
def show(self, req, id):
"""Returns server details by server id."""
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id,
want_objects=True)
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
if self.ext_mgr.is_loaded('os-config-drive'):
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
if self.ext_mgr.is_loaded('os-security-groups'):
security_groups = server_dict.get('security_groups')
if security_groups is not None:
sg_names = [sg['name'] for sg in security_groups
if sg.get('name')]
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = None
if (self.ext_mgr.is_loaded('os-networks')
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
if not isinstance(requested_networks, list):
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
requested_networks = self._get_requested_networks(
requested_networks)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# optional openstack extensions:
key_name = None
if self.ext_mgr.is_loaded('os-keypairs'):
key_name = server_dict.get('key_name')
user_data = None
if self.ext_mgr.is_loaded('os-user-data'):
user_data = server_dict.get('user_data')
self._validate_user_data(user_data)
availability_zone = None
if self.ext_mgr.is_loaded('os-availability-zone'):
availability_zone = server_dict.get('availability_zone')
block_device_mapping = None
block_device_mapping_v2 = None
legacy_bdm = True
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot'):
# Consider the new data format for block device mapping
block_device_mapping_v2 = server_dict.get(
'block_device_mapping_v2', [])
# NOTE (ndipanov): Disable usage of both legacy and new
# block device format in the same request
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
# Assume legacy format
legacy_bdm = not bool(block_device_mapping_v2)
try:
block_device_mapping_v2 = [
block_device.BlockDeviceDict.from_api(bdm_dict)
for bdm_dict in block_device_mapping_v2]
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
block_device_mapping = (block_device_mapping or
block_device_mapping_v2)
ret_resv_id = False
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = 1
max_count = 1
if self.ext_mgr.is_loaded('os-multiple-create'):
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count', 1)
max_count = server_dict.get('max_count', min_count)
try:
min_count = utils.validate_integer(
min_count, "min_count", min_value=1)
max_count = utils.validate_integer(
max_count, "max_count", min_value=1)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = {}
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
scheduler_hints = server_dict.get('scheduler_hints', {})
try:
_get_inst_type = flavors.get_flavor_by_flavor_id
inst_type = _get_inst_type(flavor_id, ctxt=context,
read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.NetworkNotFound,
exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.SecurityGroupNotFound,
exception.InstanceUserDataTooLarge,
exception.InstanceUserDataMalformed) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
except exception.Invalid as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if ret_resv_id:
return wsgi.ResponseObject({'reservation_id': resv_id},
xml=ServerMultipleCreateTemplate)
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = (
access_ipv4 and access_ipv4.strip() or None)
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = (
access_ipv6 and access_ipv6.strip() or None)
if 'auto_disk_config' in body['server']:
auto_disk_config = strutils.bool_from_string(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(ctxt, id,
want_objects=True)
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
return exc.HTTPNoContent()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeDisk as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.NoValidHost,
exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_ref_from_req_data(self, data):
try:
return unicode(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _image_uuid_from_href(self, image_href):
if not image_href:
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, data):
"""Get image data from the request or raise appropriate
exceptions
If no image is supplied - checks to see if there is
block devices set and proper extesions loaded.
"""
image_ref = data['server'].get('imageRef')
bdm = data['server'].get('block_device_mapping')
bdm_v2 = data['server'].get('block_device_mapping_v2')
if (not image_ref and (
(bdm and self.ext_mgr.is_loaded('os-volumes')) or
(bdm_v2 and
self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot')))):
return ''
else:
image_href = self._image_ref_from_req_data(data)
image_uuid = self._image_uuid_from_href(image_href)
return image_uuid
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if ('changePassword' not in body
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = self._get_server_admin_password(body['changePassword'])
server = self._get_server(context, req, id)
try:
self.compute_api.set_admin_password(context, server, password)
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
metadata.iteritems()
except AttributeError:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
flavor_ref = str(body["resize"]["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
body = body['rebuild']
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(body)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
kwargs = {}
# take the preserve_ephemeral value into account only when the
# corresponding extension is active
if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild')
and 'preserve_ephemeral' in body):
kwargs['preserve_ephemeral'] = strutils.bool_from_string(
body['preserve_ephemeral'], strict=True)
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
if 'name' in body:
self._validate_server_name(body['name'])
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs.pop('files_to_inject')
files_to_inject = self._get_injected_files(personality)
else:
files_to_inject = None
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
files_to_inject=files_to_inject,
**kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
properties = bdms.root_metadata(
context, self.compute_api.image_api,
self.compute_api.volume_api)
image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
url_prefix = self._view_builder._update_glance_link_prefix(
req.application_url)
image_ref = os.path.join(url_prefix,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
def create_resource(ext_mgr):
return wsgi.Resource(Controller(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
|
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is the main AppEngine content handler for serving Ghilbert.
import cgi
import urllib
import logging
import StringIO
import os
import verify
import babygit.web
import app.edit
import app.recent
import app.showthm
import app.users
import app.wiki
import app.workspace
import webapp2
from webapp2_extras import json
from google.appengine.api import users
from google.appengine.ext import db
# Return a stream over all proofs in the repository, including the base peano set.
# The proofs are ordered by number. Optionally, you may provide one proof to be replaced.
# TODO: this probably doesn't have to all be in memory at once.
# TODO: this also shouldn't query the entire DB and sort.
def get_all_proofs(replacing=None, below=None):
pipe = StringIO.StringIO()
for line in open('peano/peano_thms.gh', 'r'):
pipe.write(str(line))
q = Proof.all()
q.order('number')
written = (replacing is None)
for proof in q:
if below is not None and proof.number >= below:
break
if not written and proof.number > replacing.number:
pipe.write(str(replacing.content))
written = True
if replacing is None or proof.name != replacing.name:
pipe.write("# number %s\n" % str(proof.number))
if proof.content is None:
pipe.write("# proof.content is None\n")
else:
pipe.write(str(proof.content))
pipe.write("\n")
if not written:
pipe.write(str(replacing.content))
pipe.seek(0)
return pipe
class Proof(db.Model):
author = db.UserProperty()
name = db.StringProperty()
content = db.TextProperty()
date = db.DateTimeProperty(auto_now=True)
number = db.FloatProperty()
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
class RecentPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("""<html><body>
<p>Recent saves:</p>""")
proofs = db.GqlQuery("SELECT * FROM Proof ORDER BY date DESC LIMIT 10")
for proof in proofs:
if proof.author:
self.response.out.write('<b>%s</b> wrote ' % proof.author.nickname())
else:
self.response.out.write('An anonymous person wrote ')
self.response.out.write('<a href="/edit/%s">%s</a>:<br />' %
(urllib.quote(proof.name),
cgi.escape(proof.name)))
if (proof.content is None):
content = ""
else:
content = cgi.escape(proof.content)
newcontent = []
for line in content.rstrip().split('\n'):
sline = line.lstrip()
newcontent.append(' ' * (len(line) - len(sline)) + sline)
content = '<br />'.join(newcontent)
self.response.out.write('<blockquote>%s</blockquote>' % content)
class SaveHandler(webapp2.RequestHandler):
def post(self):
# Note, the following line gets the un-url-encoded name.
name = self.request.get('name')
proof = Proof.get_or_insert(name)
proof.name = name
proof.content = self.request.get('content')
proof.number = float(self.request.get('number'))
if users.get_current_user():
proof.author = users.get_current_user()
self.response.out.write("Verifying:\n")
pipe = get_all_proofs(replacing=proof)
url = '-'
urlctx = verify.UrlCtx('','peano/peano_thms.gh', pipe)
ctx = verify.VerifyCtx(urlctx, verify.run, False)
ctx.run(urlctx, url, ctx, self.response.out)
# Accept or reject
if ctx.error_count > 0:
self.response.out.write("\nCannot save; %d errors\n" % ctx.error_count)
else:
proof.put()
self.response.out.write("\nsave ok\n")
class EditPage(webapp2.RequestHandler):
def get(self, name):
if name == '':
name = 'new%20theorem'
# name here is URL-encoded, we want to display the unencoded version
# as text, and avoid the possibility of injection attack.
name = urllib.unquote(name);
q = Proof.all()
q.filter('name =', name)
q.order('-date')
proof = q.get()
if proof and proof.number is not None:
number = float(proof.number)
else:
proofs = db.GqlQuery("SELECT * FROM Proof ORDER BY number DESC LIMIT 1")
last_proof = proofs.get()
if (last_proof and last_proof.number is not None):
number = float(last_proof.number + 1)
else:
number = float(1)
self.response.out.write("""<head>
<title>Ghilbert</title>
<style type="text/css">
#panel tr.headerRow {background-color: #ccc}
#panel tr.clickableRow {cursor: pointer}
#panel tr.clickableRow:hover {background-color: #eee}
#panel tr.clickableRow:active {background-color: #ddd}
table#panel { border: 1px solid black; border-collapse:collapse;}
#panel tr { border: 1px solid black; }
#panel td {padding: .3em; }
</style>
</head>
<body>
<a href="/">Home</a> <a href="/recent">Recent</a>
<h1>Ghilbert - editing <em id="thmname"></em></h1>
<script src="/js/verify.js" type="text/javascript"></script>
<script src="/js/sandbox.js" type="text/javascript"></script>
<script src="/js/inputlayer.js" type="text/javascript"></script>
<script src="/js/edit.js" type="text/javascript"></script>
<script src="/js/direct.js" type="text/javascript"></script>
<script src="/js/panel.js" type="text/javascript"></script>
<script src="/js/typeset.js" type="text/javascript"></script>
<p>
<div style="display:block;float:left">
<label for="number">number: </label><input type="text" id="number" value="%s"/> <a href="#" id="small" onclick="GH.setSize(0)">small</a> <a href="#" id="medium" onclick="GH.setSize(1)">medium</a> <a href="#" id="large" onclick="GH.setSize(2)">large</a>
<br/>
<textarea id="canvas" cols="80" rows="20" width="640" height="480" tabindex="0"></textarea><br/>
<input type="button" id="save" onclick="GH.save(document.getElementById('canvas').value)" name="save" value="save"/>
<input type="button" id="saveDraft" onclick="GH.saveDraft(document.getElementById('canvas').value)" name="save draft" value="save draft"/>
<span id="saving"></span>
<br/>
<a href="#" id="autounify" style="display:none">autounify</a><br/>
<div id="stack">...</div>
</div>
<div width="400" height="800" style="display:block;float:left">
<button id="inferences">Inference</button>
<button id="deductions">Deduction</button>
<button id="unified">Unified</button>
<label for="filter">filter: </label><input type="text" id="filter"/>
<br/>
<table id="panel" border="1" style="border:1px solid;">
</table>
</div>
<div id="output" style="clear:left;"></div>
<script type="text/javascript">
name = %s;
GH.Direct.replace_thmname(name);
GH.updatemultiline([], document.getElementById('stack'));
url = '/peano/peano_thms.gh';
uc = new GH.XhrUrlCtx('/', url);
v = new GH.VerifyCtx(uc, run);
run(uc, '/proofs_upto/%f', v);
var mainpanel = new GH.TextareaEdit(document.getElementById('canvas'));
//var mainpanel = GH.CanvasEdit.init();
window.direct = new GH.Direct(mainpanel, document.getElementById('stack'));
window.direct.vg = v;
var number = document.getElementById('number');
number.onchange = function() {
var url = '../peano/peano_thms.gh';
var uc = new GH.XhrUrlCtx('../', url);
var v = new GH.VerifyCtx(uc, run);
run(uc, '../proofs_upto/' + number.value, v);
window.direct.vg = v;
text.dirty();
};
var panel = new GH.Panel(window.direct.vg);
""" % (number, `name`, number));
if proof:
result = json.encode(proof.content.split('\n'))
self.response.out.write('mainpanel.setLines(%s);\n' % result)
self.response.out.write('</script>\n')
class PrintEnvironmentHandler(webapp2.RequestHandler):
def get(self, arg):
self.response.out.write(json.encode([1, 2]) + "\n")
if arg is not None:
print 'arg = ' + arg + '<br />\n'
environ = self.request.environ
for name in environ.keys():
self.response.out.write("%s = %s<br />\n" % (name, environ[name]))
class AllProofsPage(webapp2.RequestHandler):
def get(self, number):
self.response.headers['Content-Type'] = 'text/plain; charset=UTF-8'
self.response.out.write(get_all_proofs(below=float(number)).getvalue())
class StaticPage(webapp2.RequestHandler):
def get(self, filename):
try:
lines = open('peano/%s' % filename)
except IOError, x:
self.error(404)
return
self.response.headers['Content-Type'] = 'text/plain; charset=UTF-8'
for line in lines:
self.response.out.write(line)
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("""<title>Ghilbert web app</title>
<body>
<h1>Ghilbert web app</h1>
<p>This is an early prototype of a web app for developing
<a href="http://sites.ghilbert.org/">Ghilbert</a>
proofs.</p>
<p>See above link for basic documentation. Source code for this site
is hosted at <a href="http://ghilbert.googlecode.com/">Google Code</a>.</p>
<p><a href="/recent">Recent saves</a></p>
<p><a href="/listthms">List of all theorems</a></p>
""")
user = users.get_current_user()
if user:
self.response.out.write('<p>Logged in as ' + user.nickname() + '\n')
self.response.out.write('<p><a href="%s">login</a>' %
users.create_login_url('/'))
urlmap = [
('/', app.users.FrontPageHandler),
('/proofs_upto/(.*)', app.edit.UptoHandler),
('/edit/(.*)', app.edit.EditHandler),
('/env(/.*)?', PrintEnvironmentHandler),
('/git/(.*)', babygit.web.handler),
('/wiki(/.*)?', app.wiki.Handler),
('/workspace(/.*)?', app.workspace.Handler),
('/save', app.edit.SaveHandler),
('/account/(.*)', app.users.AccountHandler),
('/recent', app.recent.RecentChangesPage),
('/(.*\.gh/.*)', app.showthm.ShowThmPage),
('/(.*\.gh)', app.showthm.ListThmsPage),
('/(.*)', app.showthm.ThmBrowser),
]
config = {}
config['webapp2_extras.sessions'] = {
# we use datastore session store, so doesn't need to be protected
'secret_key': 'not very secret',
}
config['webapp2_extras.sessions']['cookie_args'] = {'max_age': 365 * 86400}
if not os.environ.get('SERVER_SOFTWARE').startswith('Development'):
config['webapp2_extras.sessions']['cookie_args']['secure'] = True
application = webapp2.WSGIApplication(urlmap, debug=True, config=config)
def main():
application.run()
if __name__ == "__main__":
main()
|
|
import os.path
import shutil
import string
import tempfile
from characteristic import Attribute, attributes
from .exceptions import InvalidRegistrationVariable, NousagiTestError
@attributes([
Attribute("variables", instance_of=dict),
Attribute("environ", instance_of=dict),
])
class State(object):
pass
@attributes([
Attribute("name", instance_of=str),
Attribute("attribute", instance_of=str),
])
class RegisterVariable(object):
"""Placeholder for registering variables from command output."""
@classmethod
def from_json_dict(cls, data):
return cls(name=data["name"], attribute=data["attribute"])
class Run(object):
pass
class Command(Run):
def __init__(self, func, return_state_klass, registers=None):
self._func = func
self._return_state_klass = return_state_klass
self._registers = registers or []
for register in self._registers:
self._return_state_klass.validate(register)
def register(self, return_state):
assert isinstance(return_state, self._return_state_klass)
ret = {}
for register in self._registers:
ret.update(return_state.register(register))
return ret
def run(self, state):
return self._func(state)
def update(self, state, return_state):
state.variables.update(self.register(return_state))
@attributes(["name", "value"])
class Env(Run):
@attributes(["env"])
class ReturnState(object):
@classmethod
def validate(cls, variable):
pass
def cleanup(self):
pass
@classmethod
def from_json_dict(cls, json_data):
return cls(name=json_data["name"], value=json_data["value"])
def run(self, state):
value = string.Template(self.value).substitute(**state.variables)
return_state = self.ReturnState(env={self.name: value})
return return_state
def update(self, state, return_state):
state.environ.update(return_state.env)
@attributes([Attribute("steps", instance_of=list)])
class MultiSteps(Run):
@attributes(["states"])
class ReturnState(object):
@classmethod
def validate(cls, variable):
pass
def cleanup(self):
pass
def run(self, state):
return_states = []
for step in self.steps:
return_state = step.run(state)
step.update(state, return_state)
return_states.append(return_state)
return_state = self.ReturnState(states=return_states)
return return_state
def update(self, state, return_state):
for i, step in enumerate(self.steps):
step.update(state, return_state.states[i])
class Mkdtemp(object):
@attributes(["path"])
class ReturnState(object):
@classmethod
def validate(cls, variable):
attribute_names = tuple(
attribute.name for attribute in cls.characteristic_attributes
)
if variable.attribute not in attribute_names:
msg = "Invalid value name {0!r} (must be one of {1})"
raise InvalidRegistrationVariable(
msg.format(variable.attribute, "|".join(attribute_names))
)
def cleanup(self):
shutil.rmtree(self.path)
def register(self, register):
return {register.name: getattr(self, register.attribute)}
@classmethod
def from_json_dict(cls, json_data):
return cls()
def __call__(self, state):
path = tempfile.mkdtemp()
return_state = self.ReturnState(path=path)
return return_state
@attributes(["target", "template"])
class WriteFileFromTemplate(object):
@attributes([])
class ReturnState(object):
@classmethod
def validate(cls, variable):
pass
def cleanup(self):
pass
def register(self, register):
return {}
@classmethod
def from_json_dict(cls, json_data):
source_data = json_data["source"]
kind = source_data.get("type", "template")
if kind == "template":
source = source_data["file"]
if not os.path.exists(source):
msg = "file {!r} not found".format(source)
raise NousagiTestError(msg)
return cls(target=json_data["target"], template=source)
def __call__(self, state):
target = string.Template(self.target).substitute(**state.variables)
source = string.Template(self.template).substitute(**state.variables)
with open(source, "rt") as fp:
template = string.Template(fp.read())
with open(target, "wt") as fp:
fp.write(template.substitute(**state.variables))
return_state = self.ReturnState()
return return_state
def pre_run_factory_from_json_dict(data):
commands = {
"mkdtemp": Mkdtemp,
"write_file": WriteFileFromTemplate,
}
if data["type"] == "command":
command_klass = commands.get(data["command"])
if command_klass is None:
msg = "Unknown command {!r}".format(command_klass)
raise NotImplementedError(msg)
command = command_klass.from_json_dict(data)
registers_data = data.get("register", [])
registers = tuple(
RegisterVariable.from_json_dict(register_data)
for register_data in registers_data
)
pre_run = Command(command, command.ReturnState, registers)
elif data["type"] == "env":
pre_run = Env.from_json_dict(data)
else:
msg = ("Unsupported type of pre run: {!r}".format(data["type"]))
raise NotImplementedError(msg)
return pre_run
|
|
"""Chebyshev Rational Approximation Method module
Implements two different forms of CRAM for use in openmc.deplete.
"""
from itertools import repeat
from multiprocessing import Pool
import time
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as sla
from .. import comm
def deplete(chain, x, op_result, dt, print_out):
"""Deplete materials using given reaction rates for a specified time
Parameters
----------
chain : openmc.deplete.Chain
Depletion chain
x : list of numpy.ndarray
Atom number vectors for each material
op_result : openmc.deplete.OperatorResult
Result of applying transport operator (contains reaction rates)
dt : float
Time in [s] to deplete for
print_out : bool
Whether to show elapsed time
Returns
-------
x_result : list of numpy.ndarray
Updated atom number vectors for each material
"""
t_start = time.time()
# Set up iterators
n_mats = len(x)
chains = repeat(chain, n_mats)
vecs = (x[i] for i in range(n_mats))
rates = (op_result.rates[i, :, :] for i in range(n_mats))
dts = repeat(dt, n_mats)
# Use multiprocessing pool to distribute work
with Pool() as pool:
iters = zip(chains, vecs, rates, dts)
x_result = list(pool.starmap(_cram_wrapper, iters))
t_end = time.time()
if comm.rank == 0:
if print_out:
print("Time to matexp: ", t_end - t_start)
return x_result
def _cram_wrapper(chain, n0, rates, dt):
"""Wraps depletion matrix creation / CRAM solve for multiprocess execution
Parameters
----------
chain : DepletionChain
Depletion chain used to construct the burnup matrix
n0 : numpy.array
Vector to operate a matrix exponent on.
rates : numpy.ndarray
2D array indexed by nuclide then by cell.
dt : float
Time to integrate to.
Returns
-------
numpy.array
Results of the matrix exponent.
"""
A = chain.form_matrix(rates)
return CRAM48(A, n0, dt)
def CRAM16(A, n0, dt):
"""Chebyshev Rational Approximation Method, order 16
Algorithm is the 16th order Chebyshev Rational Approximation Method,
implemented in the more stable `incomplete partial fraction (IPF)
<https://doi.org/10.13182/NSE15-26>`_ form.
Parameters
----------
A : scipy.linalg.csr_matrix
Matrix to take exponent of.
n0 : numpy.array
Vector to operate a matrix exponent on.
dt : float
Time to integrate to.
Returns
-------
numpy.array
Results of the matrix exponent.
"""
alpha = np.array([+2.124853710495224e-16,
+5.464930576870210e+3 - 3.797983575308356e+4j,
+9.045112476907548e+1 - 1.115537522430261e+3j,
+2.344818070467641e+2 - 4.228020157070496e+2j,
+9.453304067358312e+1 - 2.951294291446048e+2j,
+7.283792954673409e+2 - 1.205646080220011e+5j,
+3.648229059594851e+1 - 1.155509621409682e+2j,
+2.547321630156819e+1 - 2.639500283021502e+1j,
+2.394538338734709e+1 - 5.650522971778156e+0j],
dtype=np.complex128)
theta = np.array([+0.0,
+3.509103608414918 + 8.436198985884374j,
+5.948152268951177 + 3.587457362018322j,
-5.264971343442647 + 16.22022147316793j,
+1.419375897185666 + 10.92536348449672j,
+6.416177699099435 + 1.194122393370139j,
+4.993174737717997 + 5.996881713603942j,
-1.413928462488886 + 13.49772569889275j,
-10.84391707869699 + 19.27744616718165j],
dtype=np.complex128)
n = A.shape[0]
alpha0 = 2.124853710495224e-16
k = 8
y = np.array(n0, dtype=np.float64)
for l in range(1, k+1):
y = 2.0*np.real(alpha[l]*sla.spsolve(A*dt - theta[l]*sp.eye(n), y)) + y
y *= alpha0
return y
def CRAM48(A, n0, dt):
"""Chebyshev Rational Approximation Method, order 48
Algorithm is the 48th order Chebyshev Rational Approximation Method,
implemented in the more stable `incomplete partial fraction (IPF)
<https://doi.org/10.13182/NSE15-26>`_ form.
Parameters
----------
A : scipy.linalg.csr_matrix
Matrix to take exponent of.
n0 : numpy.array
Vector to operate a matrix exponent on.
dt : float
Time to integrate to.
Returns
-------
numpy.array
Results of the matrix exponent.
"""
theta_r = np.array([-4.465731934165702e+1, -5.284616241568964e+0,
-8.867715667624458e+0, +3.493013124279215e+0,
+1.564102508858634e+1, +1.742097597385893e+1,
-2.834466755180654e+1, +1.661569367939544e+1,
+8.011836167974721e+0, -2.056267541998229e+0,
+1.449208170441839e+1, +1.853807176907916e+1,
+9.932562704505182e+0, -2.244223871767187e+1,
+8.590014121680897e-1, -1.286192925744479e+1,
+1.164596909542055e+1, +1.806076684783089e+1,
+5.870672154659249e+0, -3.542938819659747e+1,
+1.901323489060250e+1, +1.885508331552577e+1,
-1.734689708174982e+1, +1.316284237125190e+1])
theta_i = np.array([+6.233225190695437e+1, +4.057499381311059e+1,
+4.325515754166724e+1, +3.281615453173585e+1,
+1.558061616372237e+1, +1.076629305714420e+1,
+5.492841024648724e+1, +1.316994930024688e+1,
+2.780232111309410e+1, +3.794824788914354e+1,
+1.799988210051809e+1, +5.974332563100539e+0,
+2.532823409972962e+1, +5.179633600312162e+1,
+3.536456194294350e+1, +4.600304902833652e+1,
+2.287153304140217e+1, +8.368200580099821e+0,
+3.029700159040121e+1, +5.834381701800013e+1,
+1.194282058271408e+0, +3.583428564427879e+0,
+4.883941101108207e+1, +2.042951874827759e+1])
theta = np.array(theta_r + theta_i * 1j, dtype=np.complex128)
alpha_r = np.array([+6.387380733878774e+2, +1.909896179065730e+2,
+4.236195226571914e+2, +4.645770595258726e+2,
+7.765163276752433e+2, +1.907115136768522e+3,
+2.909892685603256e+3, +1.944772206620450e+2,
+1.382799786972332e+5, +5.628442079602433e+3,
+2.151681283794220e+2, +1.324720240514420e+3,
+1.617548476343347e+4, +1.112729040439685e+2,
+1.074624783191125e+2, +8.835727765158191e+1,
+9.354078136054179e+1, +9.418142823531573e+1,
+1.040012390717851e+2, +6.861882624343235e+1,
+8.766654491283722e+1, +1.056007619389650e+2,
+7.738987569039419e+1, +1.041366366475571e+2])
alpha_i = np.array([-6.743912502859256e+2, -3.973203432721332e+2,
-2.041233768918671e+3, -1.652917287299683e+3,
-1.783617639907328e+4, -5.887068595142284e+4,
-9.953255345514560e+3, -1.427131226068449e+3,
-3.256885197214938e+6, -2.924284515884309e+4,
-1.121774011188224e+3, -6.370088443140973e+4,
-1.008798413156542e+6, -8.837109731680418e+1,
-1.457246116408180e+2, -6.388286188419360e+1,
-2.195424319460237e+2, -6.719055740098035e+2,
-1.693747595553868e+2, -1.177598523430493e+1,
-4.596464999363902e+3, -1.738294585524067e+3,
-4.311715386228984e+1, -2.777743732451969e+2])
alpha = np.array(alpha_r + alpha_i * 1j, dtype=np.complex128)
n = A.shape[0]
alpha0 = 2.258038182743983e-47
k = 24
y = np.array(n0, dtype=np.float64)
for l in range(k):
y = 2.0*np.real(alpha[l]*sla.spsolve(A*dt - theta[l]*sp.eye(n), y)) + y
y *= alpha0
return y
|
|
# EPlusInterface (EPI) - An interface for EnergyPlus
# Copyright (C) 2004 Santosh Philip
# This file is part of EPlusInterface.
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
# VERSION: 0.2
import mylib1,mylib2,mylib3,string
class eplusdict:
"""
use this when you want to use a dictionary
but want to maintain the order of the items.
There is a alist that keeps the order
"""
adict={}
alist=[]
def __init__(self,dct=None,lst=None):
if dct==None:
self.adict={}
else:
self.adict=dct
if lst==None:
self.alist=[]
else:
self.alist=lst
def add(self,name,content):
"""
you can replace an existing value with this.
Dictionary values are entered as lists:
{key1:[val1,val2,val3],key2:[val4,val8]}
if a key already exists, the value is added
to the list rather than replacing the existing list
"""
if not self.adict.has_key(name):
self.adict[name]=[]
self.adict[name].append(content)
self.alist.append(name)
def remove(self,name):
del self.adict[name]
self.alist.remove(name)
# def update(self,name,content):
# if not(self.adict.has_key(name)):
# raise ValueError,'%s does not exists in the ordereddict' %(`name`,)
# self.adict[name]=content
class ordereddict:
"""
use this when you want to use a dictionary
but want to maintain the order of the items.
There is a alist that keeps the order
"""
adict={}
alist=[]
def __init__(self,dct=None,lst=None):
if dct==None:
self.adict={}
else:
self.adict=dct
if lst==None:
self.alist=[]
else:
self.alist=lst
def add(self,name,content,warn=1):
"""
you can replace an existing value with this
just keep warn=0
"""
if warn:
if self.adict.has_key(name):
raise ValueError,'%s already exists in the ordereddict' %(`name`,)
self.adict[name]=content
try:
#in case you are replacing an existing value
self.alist.remove(name)
except:
pass
self.alist.append(name)
def remove(self,name):
del self.adict[name]
self.alist.remove(name)
def update(self,name,content):
if not(self.adict.has_key(name)):
raise ValueError,'%s does not exists in the ordereddict' %(`name`,)
self.adict[name]=content
def reverse(self):
self.alist.reverse()
class eplusdict1(ordereddict):
"""
first cycle of extracting the dictionary from the .idd file
"""
linesep=''
def __init__(self,st):
linesep=mylib3.getlinesep(st)
self.linesep=linesep
nocom=nocomment(st)
iddst=nocom.nocom.strip()
st=self.removecommasemi(iddst)
#b=eplus1.ordereddict()
while st.find(';')!=-1:
(command,this,rest)=self.commandthisrest(st)
st=rest
self.add(command,this)
self.reverse()
def removecommasemi(self,iddst):
"""
need to change , and ; from the comment(\) line
replace , with . and ; with :
"""
slashcomment='\\'
comma=','
dot='.'
semi=';'
colon=':'
linesep=self.linesep
ls=iddst.split(linesep)
for i in range(len(ls)):
a=ls[i]
if a.find(slashcomment)!=-1:
als=a.split(slashcomment)
tmp=als[-1]
als[-1]=als[-1].replace(comma,dot)
als[-1]=als[-1].replace(semi,colon)
ls[i]=string.join(als,slashcomment)
iddst=string.join(ls,linesep)
return iddst
def commandthisrest(self,st):
"""
st =
\gumby
Lead Input;
Simulation Data;
\group Simulation Parameters
VERSION,
\unique-object
A1 ; \field Version Identifier
\required-field
BUILDING,
\unique-object
\required-object
\min-fields 6
A1 , \field Building Name
\required-field
\default NONE
N1 , \field North Axis
\note degrees from true North
\units deg
\type real
\default 0.0
...
...
============================
commandthisrest works backwords thru st
In this case
command = BUILDING
this =
BUILDING,
\unique-object
\required-object
\min-fields 6
A1 , \field Building Name
\required-field
\default NONE
N1 , \field North Axis
\note degrees from true North
\units deg
\type real
\default 0.0
...
...
rest =
\gumby
Lead Input;
Simulation Data;
\group Simulation Parameters
VERSION,
\unique-object
A1 ; \field Version Identifier
\required-field
"""
linesep=self.linesep
ls1=st.split(';')
ls2=ls1[-2].split(',')
ls3=ls2[0].split(linesep)
command=ls3[-1].strip()
st=string.join(ls3[:-1],linesep)
ls=ls1[:-2]
ls.append(st)
rest=string.join(ls,';')
ls2[0]=ls3[-1]
ls2.append(ls1[-1])
this=string.join(ls2,',')
return command,this,rest
class eplusdict2(eplusdict1):
"""
second cycle of extracting the dictionary from the .idd file
"""
def __init__(self,st):
eplusdict1.__init__(self,st)
def parsecommand1(self,key):
linesep=self.linesep
st=self.adict[key]
st=st+linesep
ls=st.split(',')
for i in range(len(ls)):
ls[i]=ls[i].split(linesep)
for i in range(len(ls)-1):
ls[i]=ls[i]+(ls[i+1][:-1])
ls[i+1]=[ls[i+1][-1]]
ls.pop()
bb=ordereddict()
for el in ls:
key=el[0]
bb.add(el[0].strip(),el[1:])
return bb
class nocomment:
"""
strips the commentts form the energyplus file
"""
def __init__(self,st):
self.nocom=self.stripcomments(st)
def __repr__(self):
return self.nocom
def sp(self,a):
return string.split(a,'!')
def striplinecomments(self,aline):
ls=self.sp(aline)
#range(8)=[0, 1, 2, 3, 4, 5, 6, 7]
#range(1,8,2)=[1, 3, 5, 7]
popthis=range(1,len(ls),2)
popthis.reverse()
for i in popthis:
ls.pop(i)
return string.join(ls,'')
def stripcomments(self,text):
lsep=mylib3.getlinesep(text)
ls=string.split(text,lsep)
for i in range(len(ls)):
ls[i]=self.striplinecomments(ls[i])
return string.join(ls,lsep)
def tofile(self,filename):
mylib2.str2file(filename,`self`)
class nocomment1:
"""
strips the commentts form the energyplus file
"""
def __init__(self,st,c='!'):
self.cm=c
self.nocom=self.stripcomments(st)
def __repr__(self):
return self.nocom
def sp(self,a):
return string.split(a,self.cm)
def striplinecomments(self,aline):
ls=self.sp(aline)
#range(8)=[0, 1, 2, 3, 4, 5, 6, 7]
#range(1,8,2)=[1, 3, 5, 7]
popthis=range(1,len(ls),2)
popthis.reverse()
for i in popthis:
ls.pop(i)
return string.join(ls,'')
def stripcomments(self,text):
lsep=mylib3.getlinesep(text)
ls=string.split(text,lsep)
for i in range(len(ls)):
ls[i]=self.striplinecomments(ls[i])
return string.join(ls,lsep)
def tofile(self,filename):
mylib2.str2file(filename,`self`)
class parseidd:
"""
Complete parser for .idd file
works well
simply too slow
takes 30 minutes on my G3
I may never use.
Instead use just in time parsing for each command as needed
"""
finallst=[]
commentbin=[]
wordbin=[]
def __init__(self,st):
self.finallst=[]
self.commentbin=[]
self.wordbin=[]
sep=';'
linesep=mylib3.getlinesep(st)
st='dummy;'+linesep+st#the first command is skipped
(comment,word,rest)=self.commentwordrest(st,sep,linesep)
self.commentbin.append(comment)
self.wordbin.append(word)
while rest!=None:
rest=self.commasemifinish(rest,linesep)
def commentwordrest(self,st,sep,linesep):
ls=st.split(sep)
comment=ls[-1]
rest=string.join(ls[:-1],sep)
ls1=rest.split(linesep)
lastline=ls1[-1]
sentence=lastline
if sentence.find(',')!=-1:
ls2=sentence.split(',')
word=ls2[-1].strip()
#remove the word
ls2.pop()
lastline=string.join(ls2,',')
ls1[-1]=lastline
else:
word=sentence.strip()
ls1.pop()#remove the sentence/lastline
rest=string.join(ls1,linesep)
return (comment,word,rest)
def commasemifinish(self,st,linesep):
#no comma no semiis = finish
if st.find(',')==-1 and st.find(';')==-1:
self.finallst.reverse()
return None
else:
ls1=st.split(',')
ls2=st.split(';')
comma=len(ls1[-1])
semi=len(ls2[-1])
if semi>comma:
return self.docommastuff(st,linesep)
else:
return self.dosemistuff(st,linesep)
def dofinish(self):
print 'finished'
def docommastuff(self,st,linesep):
sep=','
(comment,word,rest)=self.commentwordrest(st,sep,linesep)
self.commentbin.append(comment)
self.wordbin.append(word)
print word
#self.commasemifinish(rest,linesep)
return rest
def dosemistuff(self,st,linesep):
self.doblock()
sep=';'
(comment,word,rest)=self.commentwordrest(st,sep,linesep)
self.commentbin.append(comment)
self.wordbin.append(word)
print word
#self.commasemifinish(rest,linesep)
return rest
def doblock(self):
self.commentbin.reverse()
self.wordbin.reverse()
d=ordereddict()
for i in range(len(self.wordbin)):
d.add(self.wordbin[i],self.commentbin[i])
self.commentbin=[]
self.wordbin=[]
self.finallst.append(d)
del d
|
|
import tensorflow as tf
from tfsnippet.ops import assert_shape_equal
from tfsnippet.utils import (add_name_and_scope_arg_doc, get_static_shape,
validate_enum_arg, assert_deps,
get_shape)
from .base import FeatureMappingFlow
from .utils import SigmoidScale, ExpScale, LinearScale, ZeroLogDet
__all__ = ['CouplingLayer']
class CouplingLayer(FeatureMappingFlow):
"""
A general implementation of the coupling layer (Dinh et al., 2016).
Basically, a :class:`CouplingLayer` does the following transformation::
x1, x2 = split(x)
if secondary:
x1, x2 = x2, x1
y1 = x1
shift, scale = shift_and_scale_fn(x1, x2.shape[axis])
if scale_type == 'exp':
y2 = (x2 + shift) * exp(scale)
elif scale_type == 'sigmoid':
y2 = (x2 + shift) * sigmoid(scale + sigmoid_scale_bias)
elif scale_type == 'linear':
y2 = (x2 + shift) * scale
else:
y2 = x2 + shift
if secondary:
y1, y2 = y2, y1
y = tf.concat([y1, y2], axis=axis)
The inverse transformation, and the log-determinants are computed
according to the above transformation, respectively.
"""
@add_name_and_scope_arg_doc
def __init__(self,
shift_and_scale_fn,
axis=-1,
value_ndims=1,
secondary=False,
scale_type='linear',
sigmoid_scale_bias=2.,
epsilon=1e-6,
name=None,
scope=None):
"""
Construct a new :class:`BaseCouplingLayer`.
Args:
shift_and_scale_fn ((tf.Tensor, int) -> (tf.Tensor, tf.Tensor or None)):
A function to which maps ``(x1, x2.shape[axis])`` to
``(shift, scale)`` (see above). If `scale_type == None`,
it should return `scale == None`. It should be a function
that reuses a fixed variable scope, e.g., a template function
derived by :func:`tf.make_template`, or an instance of
:class:`tfsnippet.layers.BaseLayer`.
axis (int): The feature axis, to apply the transformation.
value_ndims (int): Number of dimensions to be considered as the
value dimensions. `x.ndims - value_ndims == log_det.ndims`.
secondary (bool): Whether or not this layer is a secondary layer?
See :class:`tfsnippet.layers.CouplingLayer`.
scale_type: One of {"exp", "sigmoid", "linear", None}.
See :class:`tfsnippet.layers.CouplingLayer`.
sigmoid_scale_bias (float or Tensor): Add this bias to the `scale`
if ``scale_type == 'sigmoid'``. See the reason of adopting
this in :class:`tfsnippet.layers.CouplingLayer`.
epsilon: Small float number to avoid dividing by zero or taking
logarithm of zero.
"""
self._shift_and_scale_fn = shift_and_scale_fn
self._secondary = bool(secondary)
self._scale_type = validate_enum_arg(
'scale_type', scale_type, ['exp', 'sigmoid', 'linear', None])
self._sigmoid_scale_bias = sigmoid_scale_bias
self._epsilon = epsilon
self._n_features = None # type: int
super(CouplingLayer, self).__init__(
axis=int(axis), value_ndims=value_ndims, name=name, scope=scope)
@property
def explicitly_invertible(self):
return True
def _build(self, input=None):
n_features = get_static_shape(input)[self.axis]
if n_features < 2:
raise ValueError('The feature axis of `input` must be at least 2: '
'got {}, input {}, axis {}.'.
format(n_features, input, self.axis))
self._n_features = n_features
def _split(self, x):
n_features = get_static_shape(x)[self.axis]
assert(self._n_features == n_features)
n1 = n_features // 2
n2 = n_features - n1
x1, x2 = tf.split(x, [n1, n2], self.axis)
if self._secondary:
return x2, x1, n1
else:
return x1, x2, n2
def _unsplit(self, x1, x2):
n1 = self._n_features // 2
n2 = self._n_features - n1
if self._secondary:
x1, x2 = x2, x1
assert(get_static_shape(x1)[self.axis] == n1)
assert(get_static_shape(x2)[self.axis] == n2)
return tf.concat([x1, x2], axis=self.axis)
def _check_scale_or_shift_shape(self, name, tensor, x2):
assert_op = assert_shape_equal(
tensor, x2,
message='`{}.shape` expected to be {}, but got {}'.format(
name,
get_static_shape(x2),
get_static_shape(tensor)
)
)
with assert_deps([assert_op]) as asserted:
if asserted: # pragma: no cover
tensor = tf.identity(tensor)
return tensor
def _transform_or_inverse_transform(self, x, compute_y, compute_log_det,
reverse=False):
# Since the transform and inverse_transform are too similar, we
# just implement these two methods by one super method, controlled
# by `reverse == True/False`.
# check the argument
shape = get_static_shape(x)
assert (len(shape) >= self.value_ndims) # checked in `BaseFlow`
# split the tensor
x1, x2, n2 = self._split(x)
# compute the scale and shift
shift, pre_scale = self._shift_and_scale_fn(x1, n2)
if self._scale_type is not None and pre_scale is None:
raise RuntimeError('`scale_type` != None, but no scale is '
'computed.')
elif self._scale_type is None and pre_scale is not None:
raise RuntimeError('`scale_type` == None, but scale is computed.')
if pre_scale is not None:
pre_scale = self._check_scale_or_shift_shape('scale', pre_scale, x2)
shift = self._check_scale_or_shift_shape('shift', shift, x2)
# derive the scale class
if self._scale_type == 'sigmoid':
scale = SigmoidScale(
pre_scale + self._sigmoid_scale_bias, self._epsilon)
elif self._scale_type == 'exp':
scale = ExpScale(pre_scale, self._epsilon)
elif self._scale_type == 'linear':
scale = LinearScale(pre_scale, self._epsilon)
else:
assert (self._scale_type is None)
scale = None
# compute y
y = None
if compute_y:
y1 = x1
if reverse:
y2 = x2
if scale is not None:
y2 = y2 / scale
y2 -= shift
else:
y2 = x2 + shift
if scale is not None:
y2 = y2 * scale
y = self._unsplit(y1, y2)
# compute log_det
log_det = None
if compute_log_det:
assert (self.value_ndims >= 0) # checked in `_build`
if scale is not None:
log_det = tf.reduce_sum(
scale.neg_log_scale() if reverse else scale.log_scale(),
axis=list(range(-self.value_ndims, 0))
)
else:
log_det = ZeroLogDet(get_shape(x)[:-self.value_ndims],
x.dtype.base_dtype)
return y, log_det
def _transform(self, x, compute_y, compute_log_det):
return self._transform_or_inverse_transform(
x=x, compute_y=compute_y, compute_log_det=compute_log_det,
reverse=False
)
def _inverse_transform(self, y, compute_x, compute_log_det):
return self._transform_or_inverse_transform(
x=y, compute_y=compute_x, compute_log_det=compute_log_det,
reverse=True
)
|
|
"""AVM FRITZ!Box binary sensors."""
from __future__ import annotations
import datetime
import logging
from typing import Callable, TypedDict
from fritzconnection.core.exceptions import (
FritzActionError,
FritzActionFailedError,
FritzConnectionException,
FritzServiceError,
)
from fritzconnection.lib.fritzstatus import FritzStatus
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_GIGABYTES,
DATA_RATE_KILOBITS_PER_SECOND,
DATA_RATE_KILOBYTES_PER_SECOND,
DEVICE_CLASS_TIMESTAMP,
SIGNAL_STRENGTH_DECIBELS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.dt import utcnow
from .common import FritzBoxBaseEntity, FritzBoxTools
from .const import DOMAIN, DSL_CONNECTION, UPTIME_DEVIATION
_LOGGER = logging.getLogger(__name__)
def _uptime_calculation(seconds_uptime: float, last_value: str | None) -> str:
"""Calculate uptime with deviation."""
delta_uptime = utcnow() - datetime.timedelta(seconds=seconds_uptime)
if (
not last_value
or abs(
(delta_uptime - datetime.datetime.fromisoformat(last_value)).total_seconds()
)
> UPTIME_DEVIATION
):
return delta_uptime.replace(microsecond=0).isoformat()
return last_value
def _retrieve_device_uptime_state(status: FritzStatus, last_value: str) -> str:
"""Return uptime from device."""
return _uptime_calculation(status.device_uptime, last_value)
def _retrieve_connection_uptime_state(
status: FritzStatus, last_value: str | None
) -> str:
"""Return uptime from connection."""
return _uptime_calculation(status.connection_uptime, last_value)
def _retrieve_external_ip_state(status: FritzStatus, last_value: str) -> str:
"""Return external ip from device."""
return status.external_ip # type: ignore[no-any-return]
def _retrieve_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload transmission rate."""
return round(status.transmission_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download transmission rate."""
return round(status.transmission_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload max transmission rate."""
return round(status.max_bit_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download max transmission rate."""
return round(status.max_bit_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_gb_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload total data."""
return round(status.bytes_sent / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return]
def _retrieve_gb_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download total data."""
return round(status.bytes_received / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload link rate."""
return round(status.max_linked_bit_rate[0] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download link rate."""
return round(status.max_linked_bit_rate[1] / 1000, 1) # type: ignore[no-any-return]
def _retrieve_link_noise_margin_sent_state(
status: FritzStatus, last_value: str
) -> float:
"""Return upload noise margin."""
return status.noise_margin[0] / 10 # type: ignore[no-any-return]
def _retrieve_link_noise_margin_received_state(
status: FritzStatus, last_value: str
) -> float:
"""Return download noise margin."""
return status.noise_margin[1] / 10 # type: ignore[no-any-return]
def _retrieve_link_attenuation_sent_state(
status: FritzStatus, last_value: str
) -> float:
"""Return upload line attenuation."""
return status.attenuation[0] / 10 # type: ignore[no-any-return]
def _retrieve_link_attenuation_received_state(
status: FritzStatus, last_value: str
) -> float:
"""Return download line attenuation."""
return status.attenuation[1] / 10 # type: ignore[no-any-return]
class SensorData(TypedDict, total=False):
"""Sensor data class."""
name: str
device_class: str | None
state_class: str | None
unit_of_measurement: str | None
icon: str | None
state_provider: Callable
connection_type: str | None
SENSOR_DATA = {
"external_ip": SensorData(
name="External IP",
icon="mdi:earth",
state_provider=_retrieve_external_ip_state,
),
"device_uptime": SensorData(
name="Device Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
state_provider=_retrieve_device_uptime_state,
),
"connection_uptime": SensorData(
name="Connection Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
state_provider=_retrieve_connection_uptime_state,
),
"kb_s_sent": SensorData(
name="Upload Throughput",
state_class=STATE_CLASS_MEASUREMENT,
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload",
state_provider=_retrieve_kb_s_sent_state,
),
"kb_s_received": SensorData(
name="Download Throughput",
state_class=STATE_CLASS_MEASUREMENT,
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download",
state_provider=_retrieve_kb_s_received_state,
),
"max_kb_s_sent": SensorData(
name="Max Connection Upload Throughput",
unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:upload",
state_provider=_retrieve_max_kb_s_sent_state,
),
"max_kb_s_received": SensorData(
name="Max Connection Download Throughput",
unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:download",
state_provider=_retrieve_max_kb_s_received_state,
),
"gb_sent": SensorData(
name="GB sent",
state_class=STATE_CLASS_TOTAL_INCREASING,
unit_of_measurement=DATA_GIGABYTES,
icon="mdi:upload",
state_provider=_retrieve_gb_sent_state,
),
"gb_received": SensorData(
name="GB received",
state_class=STATE_CLASS_TOTAL_INCREASING,
unit_of_measurement=DATA_GIGABYTES,
icon="mdi:download",
state_provider=_retrieve_gb_received_state,
),
"link_kb_s_sent": SensorData(
name="Link Upload Throughput",
unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:upload",
state_provider=_retrieve_link_kb_s_sent_state,
connection_type=DSL_CONNECTION,
),
"link_kb_s_received": SensorData(
name="Link Download Throughput",
unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND,
icon="mdi:download",
state_provider=_retrieve_link_kb_s_received_state,
connection_type=DSL_CONNECTION,
),
"link_noise_margin_sent": SensorData(
name="Link Upload Noise Margin",
unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:upload",
state_provider=_retrieve_link_noise_margin_sent_state,
connection_type=DSL_CONNECTION,
),
"link_noise_margin_received": SensorData(
name="Link Download Noise Margin",
unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:download",
state_provider=_retrieve_link_noise_margin_received_state,
connection_type=DSL_CONNECTION,
),
"link_attenuation_sent": SensorData(
name="Link Upload Power Attenuation",
unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:upload",
state_provider=_retrieve_link_attenuation_sent_state,
connection_type=DSL_CONNECTION,
),
"link_attenuation_received": SensorData(
name="Link Download Power Attenuation",
unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
icon="mdi:download",
state_provider=_retrieve_link_attenuation_received_state,
connection_type=DSL_CONNECTION,
),
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up entry."""
_LOGGER.debug("Setting up FRITZ!Box sensors")
fritzbox_tools: FritzBoxTools = hass.data[DOMAIN][entry.entry_id]
if (
not fritzbox_tools.connection
or "WANIPConn1" not in fritzbox_tools.connection.services
):
# Only routers are supported at the moment
return
entities = []
dsl: bool = False
try:
dslinterface = await hass.async_add_executor_job(
fritzbox_tools.connection.call_action,
"WANDSLInterfaceConfig:1",
"GetInfo",
)
dsl = dslinterface["NewEnable"]
except (FritzActionError, FritzActionFailedError, FritzServiceError):
pass
for sensor_type, sensor_data in SENSOR_DATA.items():
if not dsl and sensor_data.get("connection_type") == DSL_CONNECTION:
continue
entities.append(FritzBoxSensor(fritzbox_tools, entry.title, sensor_type))
if entities:
async_add_entities(entities, True)
class FritzBoxSensor(FritzBoxBaseEntity, SensorEntity):
"""Define FRITZ!Box connectivity class."""
def __init__(
self, fritzbox_tools: FritzBoxTools, device_friendly_name: str, sensor_type: str
) -> None:
"""Init FRITZ!Box connectivity class."""
self._sensor_data: SensorData = SENSOR_DATA[sensor_type]
self._last_device_value: str | None = None
self._attr_available = True
self._attr_device_class = self._sensor_data.get("device_class")
self._attr_icon = self._sensor_data.get("icon")
self._attr_name = f"{device_friendly_name} {self._sensor_data['name']}"
self._attr_state_class = self._sensor_data.get("state_class")
self._attr_native_unit_of_measurement = self._sensor_data.get(
"unit_of_measurement"
)
self._attr_unique_id = f"{fritzbox_tools.unique_id}-{sensor_type}"
super().__init__(fritzbox_tools, device_friendly_name)
@property
def _state_provider(self) -> Callable:
"""Return the state provider for the binary sensor."""
return self._sensor_data["state_provider"]
def update(self) -> None:
"""Update data."""
_LOGGER.debug("Updating FRITZ!Box sensors")
try:
status: FritzStatus = self._fritzbox_tools.fritz_status
self._attr_available = True
except FritzConnectionException:
_LOGGER.error("Error getting the state from the FRITZ!Box", exc_info=True)
self._attr_available = False
return
self._attr_native_value = self._last_device_value = self._state_provider(
status, self._last_device_value
)
|
|
import requests
import json
from urllib.parse import urljoin
from robot.api import logger
from .version import VERSION
__version__ = VERSION
class WireMockLibrary(object):
"""Robot Framework library for interacting with [http://wiremock.org|WireMock]
The purpose of this library is to provide a keyword-based API
towards WireMock to be used in robot tests. The project is hosted in
[https://github.com/tyrjola/robotframework-wiremock|GitHub],
and packages are released to PyPI.
= Installation =
| pip install robotframework-wiremock
= Importing =
The library does not currently support any import arguments, so use the
following setting to take the library into use:
| Library | WireMockLibrary |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = __version__
def create_mock_session(self, base_url):
"""Creates an HTTP session towards wiremock.
`base_url` is the full url (including port, if applicable) of the WireMock
server, e.g. http://localhost:8080.
"""
logger.debug("robotframework-wiremock libary version: {}".format(__version__))
self.base_url = base_url
self.session = requests.Session()
def create_mock_request_matcher(self, method, url, url_match_type='urlPath',
query_parameters=None, headers=None, cookies=None,
json_body=None, regex_matching=False):
"""Creates a mock request matcher to be used by wiremock.
Returns the request matcher in a dictionary format.
`method` is the HTTP method of the mocked endpoint
`url` is the url or url pattern of the mocked endpoint, e.g. /api or /api.*
`url_match_type` is the wiremock url match pattern to use. Applicable values
are:
- `url` (match url and query params)
- `urlPattern` (match url and query params with regex)
- `urlPath` (match url)
- `urlPathPattern` (match url with regex)
`query_parameters` is a dictionary of query parameters to match
`headers` is a dictionary containing headers to match (case-insensitive matching)
`cookies` is a dictionary containing cookies to match
`json_body` is a dictionary of the json attribute(s) to match
`regex_matching` is a boolean value which, if enabled, uses regex to match
query parameter and header values
"""
req = {}
req['method'] = method
req[url_match_type] = url
match_type = 'matches' if regex_matching else 'equalTo'
if query_parameters:
req['queryParameters'] = {key: {match_type: value}
for (key, value) in query_parameters.items()}
if headers:
req['headers'] = {key: {match_type: value, 'caseInsensitive': True}
for (key, value) in headers.items()}
if cookies:
req['cookies'] = {key: {match_type: value}
for (key, value) in cookies.items()}
if json_body:
req['bodyPatterns'] = [{'equalToJson': json.dumps(json_body),
'ignoreArrayOrder': True,
'ignoreExtraElements': True}]
return req
def create_mock_response(self, status, status_message=None,
headers=None, json_body=None, template=False):
"""Creates a mock response to be used by wiremock.
Returns the response in a dictionary format.
`status` is the HTTP status code of the response
`status_message` is the HTTP status message of the response
`headers` is a dictionary of headers to be added to the response
`json_body` is a dictonary of JSON attribute(s) to be added to the response body
`template` is a boolean value which specifies whether to use templating in the response,
e.g. for copying a parameter, header or body value from the request to the response
"""
rsp = {}
rsp['status'] = int(status)
rsp['headers'] = headers
if status_message:
rsp['statusMessage'] = status_message
if json_body:
rsp['jsonBody'] = json_body
if template:
rsp['transformers'] = ['response-template']
return rsp
def create_mock_mapping(self, request, response):
"""Creates an mapping to be used by wiremock.
`request` is a mock request matcher in a dictionary format.
`response` is a mock response in a dictionary format.
"""
data = {}
data['request'] = request
data['response'] = response
self.create_mock_mapping_with_data(data)
def create_default_mock_mapping(self, method, url, status=200, status_message=None,
response_headers=None, response_body=None, template=False):
"""Creates a default expectation to be used by wiremock.
`method` is the HTTP method of the mocked endpoint
`url` is the url pattern of the mocked endpoint(s), e.g. /.*api.*
`status` is the HTTP status code of the response
`status_message` is the HTTP status message of the response
`response_headers` is a dictionary of headers to be added to the response
`response_body` is a dictonary of JSON attribute(s) to be added to the response body
`template` is a boolean value which specifies whether to use templating in the response,
e.g. for copying a parameter, header or body value from the request to the response
"""
req = self.create_mock_request_matcher(method, url, url_match_type='urlPathPattern')
rsp = self.create_mock_response(status, status_message,
response_headers, response_body, template)
self.create_mock_mapping(req, rsp)
def create_mock_mapping_with_data(self, data):
"""Creates a mapping with defined data to be used by wiremock.
`data` is a dictionary or JSON string with mapping data. Please see
[http://wiremock.org/docs/api/|WireMock documentation] for the detailed API reference.
"""
self._send_request("/__admin/mappings", data)
def get_requests(self, url, method=None):
"""Returns an array containing all requests received by wiremock for a given url pattern.
`url` is the url pattern of the endpoint(s), e.g. /.*api.*
`method` is the HTTP method of the requests
"""
data = {}
if method:
data['method'] = method
data['urlPathPattern'] = url
rsp = self._send_request("/__admin/requests/find", data)
return rsp.json()['requests']
def get_previous_request(self, url, method=None):
"""Returns the last request received by wiremock for a given url pattern.
`url` is the url pattern of the endpoint(s), e.g. /.*api.*
`method` is the HTTP method of the request
"""
return self.get_requests(url, method)[-1]
def get_previous_request_body(self, url, method=None):
"""Returns the body of the last request received by wiremock for a given url pattern
in dictionary form.
`url` is the url pattern of the endpoint(s), e.g. /.*api.*
`method` is the HTTP method of the request
"""
body = self.get_requests(url, method)[-1]['body']
return json.loads(body)
def reset_mock_mappings(self):
"""Resets all mock mappings on the wiremock server.
"""
self._send_request("/__admin/mappings/reset")
def reset_request_log(self):
"""Resets all logged requests on the wiremock server.
"""
self._send_request("/__admin/requests/reset")
def _send_request(self, path, data=None):
if isinstance(data, dict):
data_dump = json.dumps(data)
else:
data_dump = data
url = urljoin(self.base_url, path)
logger.debug("url: {}, data: {}".format(url, data_dump))
rsp = self.session.post(url, data=data_dump, timeout=5.0)
if rsp.status_code >= 400:
raise AssertionError("Wiremock failed with {}: {}".format(rsp.status_code, rsp.text))
return rsp
|
|
##########################################################################
#
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
##########################################################################
# Metadata
##########################################################################
def __drawingSummary( plug ) :
info = []
for name, label in (
( "Solid", "Shaded" ),
( "Wireframe", "Wireframe" ),
( "Outline", "Outline" ),
( "Point", "Point" ),
( "Bound", "Bound" ),
) :
values = []
if plug["primitive"+name]["enabled"].getValue() :
values.append( "On" if plug["primitive"+name]["value"].getValue() else "Off" )
if name != "Solid" and plug["primitive"+name+"Color"]["enabled"].getValue() :
values.append( "Color" )
if name != "Solid" and name != "Bound" and plug["primitive"+name+"Width"]["enabled"].getValue() :
values.append( "%0gpx" % plug["primitive"+name+"Width"]["value"].getValue() )
if values :
info.append( label + " : " + "/".join( values ) )
return ", ".join( info )
def __pointsPrimitivesSummary( plug ) :
info = []
if plug["pointsPrimitiveUseGLPoints"]["enabled"].getValue() :
info.append( "Points On" if plug["pointsPrimitiveUseGLPoints"]["value"].getValue() else "Points Off" )
if plug["pointsPrimitiveGLPointWidth"]["enabled"].getValue() :
info.append( "Width %0gpx" % plug["pointsPrimitiveGLPointWidth"]["value"].getValue() )
return ", ".join( info )
def __curvesPrimitivesSummary( plug ) :
info = []
if plug["curvesPrimitiveUseGLLines"]["enabled"].getValue() :
info.append( "Lines On" if plug["curvesPrimitiveUseGLLines"]["value"].getValue() else "Lines Off" )
if plug["curvesPrimitiveGLLineWidth"]["enabled"].getValue() :
info.append( "Width %0gpx" % plug["curvesPrimitiveGLLineWidth"]["value"].getValue() )
if plug["curvesPrimitiveIgnoreBasis"]["enabled"].getValue() :
info.append( "Basis Ignored" if plug["curvesPrimitiveIgnoreBasis"]["value"].getValue() else "Basis On" )
return ", ".join( info )
Gaffer.Metadata.registerNode(
GafferScene.OpenGLAttributes,
"description",
"""
Applies attributes to modify the appearance of objects in
the viewport and in renders done by the OpenGLRender node.
""",
plugs = {
# Section summaries
"attributes" : [
"layout:section:Drawing:summary", __drawingSummary,
"layout:section:Points Primitives:summary", __pointsPrimitivesSummary,
"layout:section:Curves Primitives:summary", __curvesPrimitivesSummary,
],
# General drawing plugs
"attributes.primitiveSolid" : [
"description",
"""
Whether or not the object is rendered solid, in which
case the assigned GLSL shader will be used to perform
the shading.
""",
"layout:section", "Drawing",
"label", "Shaded",
],
"attributes.primitiveWireframe" : [
"description",
"""
Whether or not the object is rendered as a wireframe.
Use the primitiveWireframeColor and primitiveWireframeWidth
plugs for finer control of the wireframe appearance.
""",
"layout:section", "Drawing",
"label", "Wireframe",
],
"attributes.primitiveWireframeColor" : [
"description",
"""
The colour to use for the wireframe rendering. Only
meaningful if wireframe rendering is turned on.
""",
"layout:section", "Drawing",
"label", "Wireframe Color",
],
"attributes.primitiveWireframeWidth" : [
"description",
"""
The width in pixels of the wireframe rendering. Only
meaningful if wireframe rendering is turned on.
""",
"layout:section", "Drawing",
"label", "Wireframe Width",
],
"attributes.primitiveOutline" : [
"description",
"""
Whether or not an outline is drawn around the object.
Use the primitiveOutlineColor and primitiveOutlineWidth
plugs for finer control of the outline.
""",
"layout:section", "Drawing",
"label", "Outline",
],
"attributes.primitiveOutlineColor" : [
"description",
"""
The colour to use for the outline. Only
meaningful if outline rendering is turned on.
""",
"layout:section", "Drawing",
"label", "Outline Color",
],
"attributes.primitiveOutlineWidth" : [
"description",
"""
The width in pixels of the outline. Only
meaningful if outline rendering is turned on.
""",
"layout:section", "Drawing",
"label", "Outline Width",
],
"attributes.primitivePoint" : [
"description",
"""
Whether or not the individual points (vertices) of the
object are drawn. Use the primitivePointColor and primitivePointWidth
plugs for finer control of the point rendering.
""",
"layout:section", "Drawing",
"label", "Points",
],
"attributes.primitivePointColor" : [
"description",
"""
The colour to use for the point rendering. Only
meaningful if point rendering is turned on.
""",
"layout:section", "Drawing",
"label", "Point Color",
],
"attributes.primitivePointWidth" : [
"description",
"""
The width in pixels of the points. Only
meaningful if point rendering is turned on.
""",
"layout:section", "Drawing",
"label", "Point Width",
],
"attributes.primitiveBound" : [
"description",
"""
Whether or not the bounding box of the object is drawn.
This is in addition to any drawing of unexpanded bounding
boxes that the viewer performs. Use the primitiveBoundColor
plug to change the colour of the bounding box.
""",
"layout:section", "Drawing",
"label", "Bound",
],
"attributes.primitiveBoundColor" : [
"description",
"""
The colour to use for the bounding box rendering. Only
meaningful if bounding box rendering is turned on.
""",
"layout:section", "Drawing",
"label", "Bound Color",
],
# Points primitive drawing plugs
"attributes.pointsPrimitiveUseGLPoints" : [
"description",
"""
Points primitives have a render type (set by the PointsType
node) which allows them to be rendered as particles, disks,
spheres etc. This attribute overrides that type for OpenGL
only, allowing a much faster rendering as raw OpenGL points.
""",
"layout:section", "Points Primitives",
"label", "Use GL Points",
],
"attributes.pointsPrimitiveUseGLPoints.value" : [
"preset:For GL Points", "forGLPoints",
"preset:For Particles And Disks", "forParticlesAndDisks",
"preset:For All", "forAll",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"attributes.pointsPrimitiveGLPointWidth" : [
"description",
"""
The width in pixels of the GL points rendered when
the pointsPrimitiveUseGLPoints plug has overridden
the point type.
""",
"layout:section", "Points Primitives",
"label", "GL Point Width",
],
# Curves primitive drawing plugs
"attributes.curvesPrimitiveUseGLLines" : [
"description",
"""
Curves primitives are typically rendered as ribbons
and as such have an associated width in object space.
This attribute overrides that for OpenGL only, allowing
a much faster rendering as raw OpenGL lines.
""",
"layout:section", "Curves Primitives",
"label", "Use GL Lines",
],
"attributes.curvesPrimitiveGLLineWidth" : [
"description",
"""
The width in pixels of the GL lines rendered when
the curvesPrimitiveUseGLLines plug has overridden
the drawing to use lines.
""",
"layout:section", "Curves Primitives",
"label", "GL Line Width",
],
"attributes.curvesPrimitiveIgnoreBasis" : [
"description",
"""
Turns off interpolation for cubic curves, just
rendering straight lines between the vertices
instead.
""",
"layout:section", "Curves Primitives",
"label", "Ignore Basis",
],
"attributes.visualiserScale" : [
"description",
"""
Scales non-geometric visualisations in the viewport to make them
easier to work with.
""",
"layout:section", "Visualisers",
"label", "Scale",
],
"attributes.visualiserMaxTextureResolution" : [
"description",
"""
Visualisers that load textures will respect this setting to
limit their resolution.
""",
"layout:section", "Visualisers",
"label", "Max Texture Resolution",
],
"attributes.visualiserFrustum" : [
"description",
"""
Controls whether applicable locations draw a representation of
their projection or frustum.
""",
"layout:section", "Visualisers",
"label", "Frustum",
],
"attributes.visualiserFrustum.value" : [
"preset:Off", "off",
"preset:When Selected", "whenSelected",
"preset:On", "on",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget"
],
"attributes.lightDrawingMode" : [
"description",
"""
Controls how lights are presented in the Viewer.
""",
"layout:section", "Visualisers",
"label", "Light Drawing Mode",
],
"attributes.lightDrawingMode.value" : [
"preset:Wireframe", "wireframe",
"preset:Color", "color",
"preset:Texture", "texture",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget"
],
"attributes.lightFrustumScale" : [
"description",
"""
Allows light projections to be scaled to better suit the scene.
""",
"layout:section", "Visualisers",
],
}
)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import copy
import functools
import tempfile
import os
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.image import fake
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3',
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
if FLAGS.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return info
def get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
if isinstance(instances, list):
for instance in instances:
instance['info_cache'] = {'network_info': get_fake_cache()}
else:
instances['info_cache'] = {'network_info': get_fake_cache()}
return instances
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True)
def dumb(*args, **kwargs):
pass
self.stubs.Set(utils, 'usage_from_instance', dumb)
# set up our cloud
self.cloud = cloud.CloudController()
# set up services
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
def fake_show(meh, context, id):
return {'id': id,
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.api.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.api.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def _stub_instance_get_with_fixed_ips(self, func_name):
orig_func = getattr(self.cloud.compute_api, func_name)
def fake_get(*args, **kwargs):
return get_instances_with_cached_ips(orig_func, *args, **kwargs)
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user_id, name)
def test_describe_regions(self):
"""Makes sure describe regions runs without raising an exception"""
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
self.flags(region_list=["one=test_host1", "two=test_host2"])
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 2)
def test_describe_addresses(self):
"""Makes sure describe addresses runs without raising an exception"""
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_allocate_address(self):
address = "10.10.10.10"
allocate = self.cloud.allocate_address
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.assertEqual(allocate(self.context)['publicIp'], address)
db.floating_ip_destroy(self.context, address)
self.assertRaises(exception.NoMoreFloatingIps,
allocate,
self.context)
def test_release_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova',
'project_id': self.project_id})
result = self.cloud.release_address(self.context, address)
self.assertEqual(result.get('return', None), 'true')
def test_associate_disassociate_address(self):
"""Verifies associate runs cleanly without raising an exception"""
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
# TODO(jkoelker) Probably need to query for instance_type_id and
# make sure we get a valid one
inst = db.instance_create(self.context, {'host': self.compute.host,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
instance_uuid='',
host=inst['host'],
vpn=None,
rxtx_factor=3,
project_id=project_id)
fixed_ips = nw_info.fixed_ips()
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
self.cloud.disassociate_address(self.context,
public_ip=address)
self.cloud.release_address(self.context,
public_ip=address)
self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'])
db.instance_destroy(self.context, inst['id'])
db.floating_ip_destroy(self.context, address)
def test_describe_security_groups(self):
"""Makes sure describe_security_groups works and filters results."""
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[sec['name']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context,
group_id=[sec['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
default = db.security_group_get_by_name(self.context,
self.context.project_id,
'default')
result = self.cloud.describe_security_groups(self.context,
group_id=[default['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
'default')
db.security_group_destroy(self.context, sec['id'])
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
def test_delete_security_group_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, group_id=sec['id']))
def test_delete_security_group_with_bad_name(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, 'badname')
def test_delete_security_group_with_bad_group_id(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, group_id=999)
def test_delete_security_group_no_params(self):
delete = self.cloud.delete_security_group
self.assertRaises(exception.EC2APIError, delete, self.context)
def test_authorize_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges':
{'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_fail_missing_source_group(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges':{'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}},
'ip_protocol': u'tcp'}]}
self.assertRaises(exception.SecurityGroupNotFound, authz,
self.context, group_name=sec['name'], **kwargs)
def test_authorize_security_group_ingress_ip_permissions_groups(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'somegroup1'})
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'othergroup2'})
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'},
'2': {'user_id': u'someuser',
'group_name': u'othergroup2'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_describe_security_group_ingress_groups(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec1 = db.security_group_create(self.context, kwargs)
sec2 = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'somegroup1'})
sec3 = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'othergroup2'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [
{'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}}},
{'ip_protocol': 'tcp',
'from_port': 80,
'to_port': 80,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'othergroup2'}}}]}
self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEquals(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
self.assertEquals(len(actual_rules), 4)
expected_rules = [{'fromPort': -1,
'groups': [{'groupName': 'somegroup1',
'userId': 'someuser'}],
'ipProtocol': 'icmp',
'ipRanges': [],
'toPort': -1},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'tcp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'udp',
'ipRanges': [],
'toPort': 65536},
{'fromPort': 80,
'groups': [{'groupName': u'othergroup2',
'userId': u'someuser'}],
'ipProtocol': u'tcp',
'ipRanges': [],
'toPort': 80}]
for rule in expected_rules:
self.assertTrue(rule in actual_rules)
db.security_group_destroy(self.context, sec3['id'])
db.security_group_destroy(self.context, sec2['id'])
db.security_group_destroy(self.context, sec1['id'])
def test_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
def test_authorize_revoke_security_group_ingress_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
def test_authorize_security_group_ingress_missing_protocol_params(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.EC2APIError, authz, self.context, 'test')
def test_authorize_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.EC2APIError, authz, self.context, **kwargs)
def test_authorize_security_group_ingress_already_exists(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_name=sec['name'], **kwargs)
self.assertRaises(exception.EC2APIError, authz, self.context,
group_name=sec['name'], **kwargs)
def test_revoke_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
revoke = self.cloud.revoke_security_group_ingress
self.assertRaises(exception.EC2APIError, revoke,
self.context, **kwargs)
def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results."""
vol1 = db.volume_create(self.context, {})
vol2 = db.volume_create(self.context, {})
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
volume_id = ec2utils.id_to_ec2_vol_id(vol2['id'])
result = self.cloud.describe_volumes(self.context,
volume_id=[volume_id])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(
ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
vol2['id'])
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
def test_create_volume_in_availability_zone(self):
"""Makes sure create_volume works when we specify an availability
zone
"""
availability_zone = 'zone1:host1'
result = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
volume_id = result['volumeId']
availabilityZone = result['availabilityZone']
self.assertEqual(availabilityZone, availability_zone)
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
self.assertEqual(result['volumeSet'][0]['availabilityZone'],
availabilityZone)
db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id))
def test_create_volume_from_snapshot(self):
"""Makes sure create_volume works when we specify a snapshot."""
vol = db.volume_create(self.context, {'size': 1})
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
'volume_size': vol['size'],
'status': "available"})
snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id'])
result = self.cloud.create_volume(self.context,
snapshot_id=snapshot_id)
volume_id = result['volumeId']
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id)
db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id))
db.snapshot_destroy(self.context, snap['id'])
db.volume_destroy(self.context, vol['id'])
def test_describe_availability_zones(self):
"""Makes sure describe_availability_zones works and filters results."""
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0,
'availability_zone': "zone1"})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0,
'availability_zone': "zone2"})
result = self.cloud.describe_availability_zones(self.context)
self.assertEqual(len(result['availabilityZoneInfo']), 3)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def test_describe_snapshots(self):
"""Makes sure describe_snapshots works and filters results."""
vol = db.volume_create(self.context, {})
snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']})
snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']})
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 2)
snapshot_id = ec2utils.id_to_ec2_snap_id(snap2['id'])
result = self.cloud.describe_snapshots(self.context,
snapshot_id=[snapshot_id])
self.assertEqual(len(result['snapshotSet']), 1)
self.assertEqual(
ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']),
snap2['id'])
db.snapshot_destroy(self.context, snap1['id'])
db.snapshot_destroy(self.context, snap2['id'])
db.volume_destroy(self.context, vol['id'])
def test_create_snapshot(self):
"""Makes sure create_snapshot works."""
vol = db.volume_create(self.context,
{'status': "available", 'size': 0})
volume_id = ec2utils.id_to_ec2_vol_id(vol['id'])
result = self.cloud.create_snapshot(self.context,
volume_id=volume_id)
snapshot_id = result['snapshotId']
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 1)
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id))
db.volume_destroy(self.context, vol['id'])
def test_delete_snapshot(self):
"""Makes sure delete_snapshot works."""
vol = db.volume_create(self.context,
{'status': "available", 'size': 0})
snap = db.snapshot_create(self.context,
{'volume_id': vol['id'],
'status': "available",
'volume_size': 0})
snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id'])
result = self.cloud.delete_snapshot(self.context,
snapshot_id=snapshot_id)
self.assertTrue(result)
db.volume_destroy(self.context, vol['id'])
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'hostname': 'server-1234',
'vm_state': 'active'})
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host2',
'hostname': 'server-4321',
'vm_state': 'active'})
comp1 = db.service_create(self.context, {'host': 'host1',
'availability_zone': 'zone1',
'topic': "compute"})
comp2 = db.service_create(self.context, {'host': 'host2',
'availability_zone': 'zone2',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
# Now try filtering.
instance_id = ec2utils.id_to_ec2_id(inst2['id'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['placement']['availabilityZone'],
'zone2')
self.assertEqual(instance['publicDnsName'], '1.2.3.4')
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['privateDnsName'], 'server-4321')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertEqual(instance['dnsNameV6'],
'fe80:b33f::a8bb:ccff:fedd:eeff')
db.instance_destroy(self.context, inst1['id'])
db.instance_destroy(self.context, inst2['id'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
"""Makes sure describe_instances for instanceState works."""
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
values = values or {}
values.update({'image_ref': image_uuid, 'instance_type_id': 1,
'power_state': power_state_, 'vm_state': vm_state_})
inst = db.instance_create(self.context, values)
instance_id = ec2utils.id_to_ec2_id(inst['id'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
result = result['instancesSet'][0]['instanceState']
name = result['name']
code = result['code']
self.assertEqual(code, expected_code)
self.assertEqual(name, expected_name)
db.instance_destroy(self.context, inst['id'])
test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
power_state.RUNNING, vm_states.ACTIVE)
test_instance_state(inst_state.TERMINATED_CODE, inst_state.SHUTOFF,
power_state.NOSTATE, vm_states.SHUTOFF)
test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
power_state.NOSTATE, vm_states.SHUTOFF,
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
"""Makes sure describe_instances w/ no ipv6 works."""
self.flags(use_ipv6=False)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'hostname': 'server-1234',
'vm_state': 'active'})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
instance_id = ec2utils.id_to_ec2_id(inst1['id'])
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['publicDnsName'], '1.2.3.4')
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['privateDnsName'], 'server-1234')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertNotIn('dnsNameV6', instance)
db.instance_destroy(self.context, inst1['id'])
db.service_destroy(self.context, comp1['id'])
def test_describe_instances_deleted(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active'}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active'}
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context, inst1.id)
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 1)
result1 = result['reservationSet'][0]['instancesSet']
self.assertEqual(result1[0]['instanceId'],
ec2utils.id_to_ec2_id(inst2.id))
def _block_device_mapping_create(self, instance_id, mappings):
volumes = []
for bdm in mappings:
db.block_device_mapping_create(self.context, bdm)
if 'volume_id' in bdm:
values = {'id': bdm['volume_id']}
for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
('snapshot_size', 'volume_size'),
('delete_on_termination',
'delete_on_termination')]:
if bdm_key in bdm:
values[vol_key] = bdm[bdm_key]
vol = db.volume_create(self.context, values)
db.volume_attached(self.context, vol['id'],
instance_id, bdm['device_name'])
volumes.append(vol)
return volumes
def _setUpBlockDeviceMapping(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1 = db.instance_create(self.context,
{'image_ref': image_uuid,
'instance_type_id': 1,
'root_device_name': '/dev/sdb1'})
inst2 = db.instance_create(self.context,
{'image_ref': image_uuid,
'instance_type_id': 1,
'root_device_name': '/dev/sdc1'})
instance_id = inst1['id']
mappings0 = [
{'instance_id': instance_id,
'device_name': '/dev/sdb1',
'snapshot_id': '1',
'volume_id': '2'},
{'instance_id': instance_id,
'device_name': '/dev/sdb2',
'volume_id': '3',
'volume_size': 1},
{'instance_id': instance_id,
'device_name': '/dev/sdb3',
'delete_on_termination': True,
'snapshot_id': '4',
'volume_id': '5'},
{'instance_id': instance_id,
'device_name': '/dev/sdb4',
'delete_on_termination': False,
'snapshot_id': '6',
'volume_id': '7'},
{'instance_id': instance_id,
'device_name': '/dev/sdb5',
'snapshot_id': '8',
'volume_id': '9',
'volume_size': 0},
{'instance_id': instance_id,
'device_name': '/dev/sdb6',
'snapshot_id': '10',
'volume_id': '11',
'volume_size': 1},
{'instance_id': instance_id,
'device_name': '/dev/sdb7',
'no_device': True},
{'instance_id': instance_id,
'device_name': '/dev/sdb8',
'virtual_name': 'swap'},
{'instance_id': instance_id,
'device_name': '/dev/sdb9',
'virtual_name': 'ephemeral3'}]
volumes = self._block_device_mapping_create(instance_id, mappings0)
return (inst1, inst2, volumes)
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
for vol in volumes:
db.volume_destroy(self.context, vol['id'])
for id in (inst1['id'], inst2['id']):
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, id):
db.block_device_mapping_destroy(self.context, bdm['id'])
db.instance_destroy(self.context, inst2['id'])
db.instance_destroy(self.context, inst1['id'])
_expected_instance_bdm1 = {
'instanceId': 'i-00000001',
'rootDeviceName': '/dev/sdb1',
'rootDeviceType': 'ebs'}
_expected_block_device_mapping0 = [
{'deviceName': '/dev/sdb1',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 2,
}},
{'deviceName': '/dev/sdb2',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 3,
}},
{'deviceName': '/dev/sdb3',
'ebs': {'status': 'in-use',
'deleteOnTermination': True,
'volumeId': 5,
}},
{'deviceName': '/dev/sdb4',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 7,
}},
{'deviceName': '/dev/sdb5',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 9,
}},
{'deviceName': '/dev/sdb6',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 11, }}]
# NOTE(yamahata): swap/ephemeral device case isn't supported yet.
_expected_instance_bdm2 = {
'instanceId': 'i-00000002',
'rootDeviceName': '/dev/sdc1',
'rootDeviceType': 'instance-store'}
def test_format_instance_bdm(self):
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = {}
self.cloud._format_instance_bdm(self.context, inst1['id'], '/dev/sdb1',
result)
self.assertSubDictMatch(
{'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
result)
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = {}
self.cloud._format_instance_bdm(self.context, inst2['id'], '/dev/sdc1',
result)
self.assertSubDictMatch(
{'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
result)
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
def _assertInstance(self, instance_id):
ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
result = self.cloud.describe_instances(self.context,
instance_id=[ec2_instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
result = result['instancesSet'][0]
self.assertEqual(result['instanceId'], ec2_instance_id)
return result
def _assertEqualBlockDeviceMapping(self, expected, result):
self.assertEqual(len(expected), len(result))
for x in expected:
found = False
for y in result:
if x['deviceName'] == y['deviceName']:
self.assertSubDictMatch(x, y)
found = True
break
self.assertTrue(found)
def test_describe_instances_bdm(self):
"""Make sure describe_instances works with root_device_name and
block device mappings
"""
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = self._assertInstance(inst1['id'])
self.assertSubDictMatch(self._expected_instance_bdm1, result)
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = self._assertInstance(inst2['id'])
self.assertSubDictMatch(self._expected_instance_bdm2, result)
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
def test_describe_images(self):
describe_images = self.cloud.describe_images
def fake_detail(meh, context, **kwargs):
return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}}]
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
self.assertEqual(result1['imageId'], 'ami-00000001')
# provided a valid image_id
result2 = describe_images(self.context, ['ami-00000001'])
self.assertEqual(1, len(result2['imagesSet']))
# provide more than 1 valid image_id
result3 = describe_images(self.context, ['ami-00000001',
'ami-00000002'])
self.assertEqual(2, len(result3['imagesSet']))
# provide an non-existing image_id
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
def assertDictListUnorderedMatch(self, L1, L2, key):
self.assertEqual(len(L1), len(L2))
for d1 in L1:
self.assertTrue(key in d1)
for d2 in L2:
self.assertTrue(key in d2)
if d1[key] == d2[key]:
self.assertDictMatch(d1, d2)
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1', 'snapshot_id': 01234567},
{'device_name': '/dev/sdb2', 'volume_id': 01234567},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
{'device_name': '/dev/sdc2', 'volume_id': 12345678},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 01234567}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'properties': {
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
snap = db.snapshot_create(self.context,
{'id': bdm['snapshot_id'],
'volume_id': 76543210,
'status': "available",
'volume_size': 1})
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertTrue('rootDeviceType' in result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertTrue('rootDeviceName' in result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertTrue('blockDeviceMapping' in result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00053977'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00053977'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00bc614e'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00bc614e'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00053977'}}]
# NOTE(yamahata):
# InstanceBlockDeviceMappingItemType
# rootDeviceType
# rootDeviceName
# blockDeviceMapping
# deviceName
# virtualName
# ebs
# snapshotId
# volumeSize
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
"""test for rootDeviceName and blockDeiceMapping"""
describe_images = self.cloud.describe_images
self._setUpImageSet()
result = describe_images(self.context, ['ami-00000001'])
result = self._assertImageSet(result, 'instance-store',
self._expected_root_device_name1)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_images(self.context, ['ami-00000002'])
result = self._assertImageSet(result, 'ebs',
self._expected_root_device_name2)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
def fake_show(meh, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'is_public': True}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
def test_describe_image_attribute_root_device_name(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name1)
result = describe_image_attribute(self.context, 'ami-00000002',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name2)
def test_describe_image_attribute_block_device_mapping(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_image_attribute(self.context, 'ami-00000002',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_modify_image_attribute(self):
modify_image_attribute = self.cloud.modify_image_attribute
fake_metadata = {
'id': 1,
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'is_public': False}
def fake_show(meh, context, id):
return fake_metadata
def fake_update(meh, context, image_id, metadata, data=None):
fake_metadata.update(metadata)
return fake_metadata
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
self.assertEqual(True, result['is_public'])
def test_deregister_image(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertEqual(result['imageId'], 'ami-00000001')
# invalid image
self.stubs.UnsetAll()
def fake_detail_empty(self, context):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
def test_deregister_image_wrong_container_type(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
self.assertRaises(exception.NotFound, deregister_image, self.context,
'aki-00000001')
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=FLAGS.default_instance_type,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(base64.b64decode(output['output']),
'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_key_generation(self):
result = self._create_key('test')
private_key = result['private_key']
expected = db.key_pair_get(self.context,
self.context.user_id,
'test')['public_key']
(fd, fname) = tempfile.mkstemp()
os.write(fd, private_key)
public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
os.unlink(fname)
# assert key fields are equal
self.assertEqual(''.join(public_key.split("\n")[2:-2]),
expected.split(" ")[1].strip())
def test_describe_key_pairs(self):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
keys = result["keySet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_import_key_pair(self):
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
f = open(pubkey_path + '/dummy.pub', 'r')
dummypub = f.readline().rstrip()
f.close
f = open(pubkey_path + '/dummy.fingerprint', 'r')
dummyfprint = f.readline().rstrip()
f.close
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
result = self.cloud.import_key_pair(self.context,
key_name,
public_key_material)
self.assertEqual(result['keyName'], key_name)
self.assertEqual(result['keyFingerprint'], dummyfprint)
keydata = db.key_pair_get(self.context,
self.context.user_id,
key_name)
self.assertEqual(dummypub, keydata['public_key'])
self.assertEqual(dummyfprint, keydata['fingerprint'])
def test_delete_key_pair(self):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
pass
self.stubs.Set(utils, 'usage_from_instance', dumb)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['instanceId'], 'i-00000001')
self.assertEqual(instance['instanceState']['name'], 'running')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_availability_zone(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'placement': {'availability_zone': 'fake'},
}
run_instances = self.cloud.run_instances
def fake_show(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
def fake_format(*args, **kwargs):
pass
self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['availability_zone'], 'fake')
return ({'id': 'fake-instance'}, 'fake-res-id')
self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
# NOTE(vish) the assert for this call is in the fake_create method.
run_instances(self.context, **kwargs)
def test_run_instances_image_state_none(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_no_state(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}, 'container_format': 'ami'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.EC2APIError, run_instances,
self.context, **kwargs)
def test_run_instances_image_state_invalid(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_decrypt(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.EC2APIError, run_instances,
self.context, **kwargs)
def test_run_instances_image_status_active(self):
kwargs = {'image_id': FLAGS.default_image,
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_stat_active(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
def _restart_compute_service(self, periodic_interval=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval:
self.compute = self.start_service(
'compute', periodic_interval=periodic_interval)
else:
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
"""Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.start_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'shutdownState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
def test_start_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.start_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'shutdownState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_stop_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'shutdownState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'shutdownState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_invalid_instance_id(self):
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
self.assertRaises(exception.InstanceNotFound,
self.cloud.terminate_instances,
self.context, ['i-2'])
self._restart_compute_service()
def test_terminate_instances_disable_terminate(self):
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
internal_id = ec2utils.ec2_id_to_id(instance_id)
instance = db.instance_update(self.context, internal_id,
{'disable_terminate': True})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'shutdownState': {'code': 16,
'name': 'running'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
instance = db.instance_update(self.context, internal_id,
{'disable_terminate': False})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'shutdownState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_two_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
inst1 = self._run_instance(**kwargs)
inst2 = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [inst1])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'shutdownState': {'code': 48,
'name': 'terminated'}},
{'instanceId': 'i-00000002',
'previousState': {'code': 16,
'name': 'running'},
'shutdownState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [inst1, inst2])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_reboot_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
result = self.cloud.reboot_instances(self.context, [instance_id])
self.assertTrue(result)
def _volume_create(self, volume_id=None):
kwargs = {'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached', }
if volume_id:
kwargs['id'] = volume_id
return db.volume_create(self.context, kwargs)
def _assert_volume_attached(self, vol, instance_id, mountpoint):
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
def _assert_volume_detached(self, vol):
self.assertEqual(vol['instance_id'], None)
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
vol1 = self._volume_create()
vol2 = self._volume_create()
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': False},
{'device_name': '/dev/vdc',
'volume_id': vol2['id'],
'delete_on_termination': True},
]}
ec2_instance_id = self._run_instance(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_detached(vol)
self.cloud.start_instances(self.context, [ec2_instance_id])
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
vol['mountpoint'] == '/dev/vdc')
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
vol = db.volume_get(admin_ctxt, vol1['id'])
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1['id'])
admin_ctxt = context.get_admin_context(read_deleted="only")
vol = db.volume_get(admin_ctxt, vol2['id'])
self.assertTrue(vol['deleted'])
self._restart_compute_service()
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
vol1 = self._volume_create()
vol2 = self._volume_create()
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol1['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_detached(vol)
instance = db.instance_get(self.context, instance_id)
self.cloud.compute_api.attach_volume(self.context,
instance,
volume_id=vol2['id'],
device='/dev/vdc')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
self.cloud.compute_api.detach_volume(self.context,
volume_id=vol1['id'])
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
for vol_id in (vol1['id'], vol2['id']):
vol = db.volume_get(self.context, vol_id)
self._assert_volume_detached(vol)
self.cloud.start_instances(self.context, [ec2_instance_id])
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
for vol_id in (vol1['id'], vol2['id']):
vol = db.volume_get(self.context, vol_id)
self.assertEqual(vol['id'], vol_id)
self._assert_volume_detached(vol)
db.volume_destroy(self.context, vol_id)
self._restart_compute_service()
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
vol = self._volume_create()
ec2_volume_id = ec2utils.id_to_ec2_vol_id(vol['id'])
ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id)
ec2_snapshot2_id = self._create_snapshot(ec2_volume_id)
snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snapshot1_id,
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'snapshot_id': snapshot2_id,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
vol1_id = None
vol2_id = None
for vol in vols:
snapshot_id = vol['snapshot_id']
if snapshot_id == snapshot1_id:
vol1_id = vol['id']
mountpoint = '/dev/vdb'
elif snapshot_id == snapshot2_id:
vol2_id = vol['id']
mountpoint = '/dev/vdc'
else:
self.fail()
self._assert_volume_attached(vol, instance_id, mountpoint)
self.assertTrue(vol1_id)
self.assertTrue(vol2_id)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
vol = db.volume_get(admin_ctxt, vol1_id)
self._assert_volume_detached(vol)
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1_id)
admin_ctxt = context.get_admin_context(read_deleted="only")
vol = db.volume_get(admin_ctxt, vol2_id)
self.assertTrue(vol['deleted'])
for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
self.cloud.delete_snapshot(self.context, snapshot_id)
db.volume_destroy(self.context, vol['id'])
def test_create_image(self):
"""Make sure that CreateImage works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
# TODO(yamahata): s3._s3_create() can't be tested easily by unit test
# as there is no unit test for s3.create()
## result = self.cloud.create_image(self.context, ec2_instance_id,
## no_reboot=True)
## ec2_image_id = result['imageId']
## created_image = self.cloud.describe_images(self.context,
## [ec2_image_id])
self.cloud.terminate_instances(self.context, [ec2_instance_id])
for vol in volumes:
db.volume_destroy(self.context, vol)
for snap in snapshots:
db.snapshot_destroy(self.context, snap)
# TODO(yamahata): clean up snapshot created by CreateImage.
self._restart_compute_service()
@staticmethod
def _fake_bdm_get(ctxt, id):
return [{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': 98765432,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdi'},
{'volume_id': None,
'snapshot_id': None,
'no_device': True,
'virtual_name': None,
'delete_on_termination': None,
'device_name': None},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral0',
'delete_on_termination': None,
'device_name': '/dev/sdb'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral1',
'delete_on_termination': None,
'device_name': '/dev/sdd'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral2',
'delete_on_termination': None,
'device_name': '/dev/sd3'},
]
def test_describe_instance_attribute(self):
"""Make sure that describe_instance_attribute works"""
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
self._fake_bdm_get)
def fake_get(ctxt, instance_id):
return {
'id': 0,
'root_device_name': '/dev/sdh',
'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}],
'vm_state': vm_states.STOPPED,
'instance_type': {'name': 'fake_type'},
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'user_data': 'fake-user data',
'shutdown_terminate': False,
'disable_terminate': False,
}
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
def fake_volume_get(ctxt, volume_id, session=None):
if volume_id == 87654321:
return {'id': volume_id,
'attach_time': '13:56:24',
'status': 'in-use'}
raise exception.VolumeNotFound(volume_id=volume_id)
self.stubs.Set(db, 'volume_get', fake_volume_get)
get_attribute = functools.partial(
self.cloud.describe_instance_attribute,
self.context, 'i-12345678')
bdm = get_attribute('blockDeviceMapping')
bdm['blockDeviceMapping'].sort()
expected_bdm = {'instance_id': 'i-12345678',
'rootDeviceType': 'ebs',
'blockDeviceMapping': [
{'deviceName': '/dev/sdh',
'ebs': {'status': 'in-use',
'deleteOnTermination': True,
'volumeId': 87654321,
'attachTime': '13:56:24'}}]}
expected_bdm['blockDeviceMapping'].sort()
self.assertEqual(bdm, expected_bdm)
groupSet = get_attribute('groupSet')
groupSet['groupSet'].sort()
expected_groupSet = {'instance_id': 'i-12345678',
'groupSet': [{'groupId': 'fake0'},
{'groupId': 'fake1'}]}
expected_groupSet['groupSet'].sort()
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
'instanceInitiatedShutdownBehavior': 'stop'})
self.assertEqual(get_attribute('disableApiTermination'),
{'instance_id': 'i-12345678',
'disableApiTermination': False})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
self.assertEqual(get_attribute('kernel'),
{'instance_id': 'i-12345678',
'kernel': 'aki-00000001'})
self.assertEqual(get_attribute('ramdisk'),
{'instance_id': 'i-12345678',
'ramdisk': 'ari-00000002'})
self.assertEqual(get_attribute('rootDeviceName'),
{'instance_id': 'i-12345678',
'rootDeviceName': '/dev/sdh'})
# NOTE(yamahata): this isn't supported
# get_attribute('sourceDestCheck')
self.assertEqual(get_attribute('userData'),
{'instance_id': 'i-12345678',
'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
def test_instance_initiated_shutdown_behavior(self):
def test_dia_iisb(expected_result, **kwargs):
"""test describe_instance_attribute
attribute instance_initiated_shutdown_behavior"""
kwargs.update({'instance_type': FLAGS.default_instance_type,
'max_count': 1})
instance_id = self._run_instance(**kwargs)
result = self.cloud.describe_instance_attribute(self.context,
instance_id, 'instanceInitiatedShutdownBehavior')
self.assertEqual(result['instanceInitiatedShutdownBehavior'],
expected_result)
expected = {'instancesSet': [
{'instanceId': instance_id,
'previousState': {'code': 16,
'name': 'running'},
'shutdownState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context,
[instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
test_dia_iisb('terminate', image_id='ami-1')
block_device_mapping = [{'device_name': '/dev/vdb',
'virtual_name': 'ephemeral0'}]
test_dia_iisb('stop', image_id='ami-2',
block_device_mapping=block_device_mapping)
def fake_show(self, context, id_):
LOG.debug("id_ %s", id_)
print id_
prop = {}
if id_ == 'ami-3':
pass
elif id_ == 'ami-4':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}]}
elif id_ == 'ami-5':
prop = {'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
elif id_ == 'ami-6':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}],
'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}
prop_base.update(prop)
return {
'id': id_,
'properties': prop_base,
'container_format': 'ami',
'status': 'active'}
# NOTE(yamahata): create ami-3 ... ami-6
# ami-1 and ami-2 is already created by setUp()
for i in range(3, 7):
db.api.s3_image_create(self.context, 'ami-%d' % i)
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
test_dia_iisb('terminate', image_id='ami-3')
test_dia_iisb('stop', image_id='ami-4')
test_dia_iisb('stop', image_id='ami-5')
test_dia_iisb('stop', image_id='ami-6')
|
|
from __future__ import print_function, absolute_import, division
"""searchspace.py
This module contains code for specifying the hyperparameter search space.
The search space is specified as a product of bounded intervals. Each dimension
can be either an integer, floating-point or enumeration.
The base measure on the space, e.g. for random sampling (`rvs()`) is a product
of uniform distribution on the bounded intervals. This can, however, be
modified for floating-point dimensions using the `warp` keyword argument in
`add_float`
"""
from collections import namedtuple, Iterable
import numpy as np
from sklearn.utils import check_random_state
try:
from hyperopt import hp, pyll
except ImportError:
from .utils import mock_module
hp = mock_module('hyperopt')
pyll = mock_module('hyperopt')
class SearchSpace(object):
def __init__(self):
self.variables = {}
@property
def n_dims(self):
return len(self.variables)
def add_jump(self, name, min, max, step, var_type=float):
""" An integer/float valued dimension bounded between
range(min, max+step, step). Note that the right endpoint of the interval
includes `max`. This is a wrapper around the add_enum. It assumes
that the jump is float but can also use ints.
"""
choices = np.arange(min, max+step, step, dtype=var_type)
self.variables[name] = EnumVariable(name, list(choices))
def add_int(self, name, min, max, warp=None):
"""An integer-valued dimension bounded between `min` <= x <= `max`.
Note that the right endpoint of the interval includes `max`.
When `warp` is None, the base measure associated with this dimension
is a categorical distribution with each weight on each of the integers
in [min, max]. With `warp == 'log'`, the base measure is a uniform
distribution on the log of the variable, with bounds at `log(min)` and
`log(max)`. This is appropriate for variables that are "naturally" in
log-space. Other `warp` functions are not supported (yet), but may be
at a later time. Please note that this functionality is not supported
for `hyperopt_tpe`.
"""
min, max = map(int, (min, max))
if max < min:
raise ValueError('variable %s: max < min error' % name)
if warp not in (None, 'log'):
raise ValueError('variable %s: warp=%s is not supported. use '
'None or "log",' % (name, warp))
if min <= 0 and warp == 'log':
raise ValueError('variable %s: log-warping requires min > 0')
self.variables[name] = IntVariable(name, min, max, warp)
def add_float(self, name, min, max, warp=None):
"""A floating point-valued dimension bounded `min` <= x < `max`
When `warp` is None, the base measure associated with this dimension
is a uniform distribution on [min, max). With `warp == 'log'`, the
base measure is a uniform distribution on the log of the variable,
with bounds at `log(min)` and `log(max)`. This is appropriate for
variables that are "naturally" in log-space. Other `warp` functions
are not supported (yet), but may be at a later time.
"""
min, max = map(float, (min, max))
if not min < max:
raise ValueError('variable %s: min >= max error' % name)
if warp not in (None, 'log'):
raise ValueError('variable %s: warp=%s is not supported. use '
'None or "log",' % (name, warp))
if min <= 0 and warp == 'log':
raise ValueError('variable %s: log-warping requires min > 0')
self.variables[name] = FloatVariable(name, min, max, warp)
def add_enum(self, name, choices):
"""An enumeration-valued dimension.
The base measure associated with this dimension is a categorical
distribution with equal weight on each element in `choices`.
"""
if not isinstance(choices, Iterable):
raise ValueError('variable %s: choices must be iterable' % name)
self.variables[name] = EnumVariable(name, choices)
def __getitem__(self, name):
return self.variables[name]
def __iter__(self):
return iter(self.variables.values())
def rvs(self, seed=None):
random = check_random_state(seed)
return dict((param.name, param.rvs(random)) for param in self)
def to_hyperopt(self):
return dict((v.name, v.to_hyperopt()) for v in self)
def point_to_gp(self, point_dict):
return [var.point_to_gp(point_dict[var.name]) for var in self]
def __repr__(self):
lines = (['Hyperparameter search space:'] +
[' ' + repr(var) for var in self])
return '\n'.join(lines)
class IntVariable(namedtuple('IntVariable', ('name', 'min', 'max', 'warp'))):
# this pattern is a simple memory-efficient way to add some methods to
# a namedtuple, demonstrated in sklearn.
# https://github.com/scikit-learn/scikit-learn/blob/a38372998b560d184a195bbd10a16c8f20119aa8/sklearn/grid_search.py#L259-L278
__slots__ = ()
def __repr__(self):
return '{0:<25s}\t(int) {1:8d} <= x <= {2:d}'.format(
self.name, self.min, self.max)
def rvs(self, random):
# extra +1 here because of the _inclusive_ endpoint
if self.warp is None:
return random.randint(self.min, self.max+1)
elif self.warp == 'log':
return np.exp(random.uniform(np.log(self.min), np.log(self.max+1)))
raise ValueError('unknown warp: %s' % self.warp)
def to_hyperopt(self):
if self.warp is None:
return pyll.scope.int(hp.uniform(self.name, self.min, self.max+1))
raise ValueError('warped integers are not supported for hyperopt')
def domain_to_gp(self):
return {'min': 0.0, 'max': 1.0}
def point_to_gp(self, value):
if self.warp is None:
return (value - self.min) / (self.max - self.min)
elif self.warp == 'log':
rng = np.log(self.max) - np.log(self.min)
return (np.log(value) - np.log(self.min)) / rng
raise ValueError('unknown warp: %s' % self.warp)
def point_from_gp(self, gpvalue):
if self.warp is None:
return int(self.min + (gpvalue * (self.max - self.min)))
elif self.warp == 'log':
rng = np.log(self.max) - np.log(self.min)
outvalue = np.exp(np.log(self.min) + gpvalue * rng)
return np.clip(outvalue, self.min, self.max).astype(int)
raise ValueError('unknown warp: %s' % self.warp)
class FloatVariable(namedtuple('FloatVariable',
('name', 'min', 'max', 'warp'))):
__slots__ = ()
def __repr__(self):
return '{0:<25s}\t(float) {1:8f} <= x < {2:f}'.format(
self.name, self.min, self.max)
def rvs(self, random):
if self.warp is None:
return random.uniform(self.min, self.max)
elif self.warp == 'log':
return np.exp(random.uniform(np.log(self.min), np.log(self.max)))
raise ValueError('unknown warp: %s' % self.warp)
def to_hyperopt(self):
if self.warp is None:
return hp.uniform(self.name, self.min, self.max)
elif self.warp == 'log':
return hp.loguniform(self.name, np.log(self.min), np.log(self.max))
raise ValueError('unknown warp: %s' % self.warp)
def domain_to_gp(self):
return {'min': 0.0, 'max': 1.0}
def point_to_gp(self, value):
if self.warp is None:
return (value - self.min) / (self.max - self.min)
elif self.warp == 'log':
rng = np.log(self.max) - np.log(self.min)
return (np.log(value) - np.log(self.min)) / rng
raise ValueError('unknown warp: %s' % self.warp)
def point_from_gp(self, gpvalue):
if self.warp is None:
outvalue = self.min + (gpvalue * (self.max - self.min))
elif self.warp == 'log':
rng = np.log(self.max) - np.log(self.min)
outvalue = np.exp(np.log(self.min) + gpvalue * rng)
else:
raise ValueError('unknown warp: %s' % self.warp)
return np.clip(outvalue, self.min, self.max)
class EnumVariable(namedtuple('EnumVariable', ('name', 'choices'))):
__slots__ = ()
def __repr__(self):
c = [str(e) for e in self.choices]
return '{0:<25s}\t(enum) choices = ({1:s})'.format(
self.name, ', '.join(c))
def rvs(self, random):
return self.choices[random.randint(len(self.choices))]
def to_hyperopt(self):
return hp.choice(self.name, self.choices)
def domain_to_gp(self):
return {'min': 0.0, 'max': 1.0}
def point_to_gp(self, value):
try:
index = next(i for i, c in enumerate(self.choices) if c == value)
except StopIteration:
raise ValueError('%s not in %s' % (value, self.choices))
return float(index) / (len(self.choices) - 1)
def point_from_gp(self, gpvalue):
return self.choices[int(np.round(gpvalue * (len(self.choices) - 1)))]
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for autotuning performance knobs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import math_ops
class AutotuneBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for autotuning performance knobs."""
def _run_benchmark(self, dataset, autotune, autotune_buffers,
benchmark_iters, benchmark_label):
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = autotune
options.experimental_optimization.autotune_buffers = autotune_buffers
dataset = dataset.with_options(options)
autotune_string = "_autotune_{}".format(
"parallelism_and_buffer_sizes"
if autotune_buffers else "parallelism_only")
wall_time = self.run_and_report_benchmark(
dataset=dataset,
num_elements=benchmark_iters,
warmup=True,
iters=1,
name=benchmark_label + (autotune_string if autotune else ""))
return wall_time
def benchmark_batch(self):
a = self._benchmark_batch(autotune=False)
b = self._benchmark_batch(autotune=True, autotune_buffers=False)
c = self._benchmark_batch(autotune=True, autotune_buffers=True)
print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
.format(a / c))
def _benchmark_batch(self, autotune, autotune_buffers=False):
batch_size = 128
k = 1024
dataset = dataset_ops.Dataset.from_tensors(
(np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset.batch(
batch_size=batch_size, num_parallel_calls=dataset_ops.AUTOTUNE)
return self._run_benchmark(
dataset,
autotune,
autotune_buffers,
benchmark_iters=10000,
benchmark_label="batch")
def benchmark_map(self):
a = self._benchmark_map(autotune=False)
b = self._benchmark_map(autotune=True, autotune_buffers=False)
c = self._benchmark_map(autotune=True, autotune_buffers=True)
print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
.format(a / c))
def _benchmark_map(self, autotune, autotune_buffers=False):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors(
(np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
return self._run_benchmark(
dataset=dataset,
autotune=autotune,
autotune_buffers=autotune_buffers,
benchmark_iters=10000,
benchmark_label="map")
def benchmark_map_and_batch(self):
a = self._benchmark_map_and_batch(autotune=False)
b = self._benchmark_map_and_batch(autotune=True, autotune_buffers=False)
c = self._benchmark_map_and_batch(autotune=True, autotune_buffers=True)
print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
.format(a / c))
def _benchmark_map_and_batch(self, autotune, autotune_buffers=False):
batch_size = 16
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors(
(np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.batch(batch_size=batch_size)
return self._run_benchmark(
dataset=dataset,
autotune=autotune,
autotune_buffers=autotune_buffers,
benchmark_iters=1000,
benchmark_label="map_and_batch")
def benchmark_interleave(self):
a = self._benchmark_interleave(autotune=False)
b = self._benchmark_interleave(autotune=True, autotune_buffers=False)
c = self._benchmark_interleave(autotune=True, autotune_buffers=True)
print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
.format(a / c))
def _benchmark_interleave(self, autotune, autotune_buffers=False):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors(
(np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
cycle_length=10,
num_parallel_calls=dataset_ops.AUTOTUNE)
return self._run_benchmark(
dataset=dataset,
autotune=autotune,
autotune_buffers=autotune_buffers,
benchmark_iters=10000,
benchmark_label="interleave")
def benchmark_map_and_interleave(self):
a = self._benchmark_map_and_interleave(autotune=False)
b = self._benchmark_map_and_interleave(
autotune=True, autotune_buffers=False)
c = self._benchmark_map_and_interleave(autotune=True, autotune_buffers=True)
print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
.format(a / c))
def _benchmark_map_and_interleave(self, autotune, autotune_buffers=False):
k = 1024 * 1024
a = (np.random.rand(1, 8 * k), np.random.rand(8 * k, 1))
b = (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))
c = (np.random.rand(1, 2 * k), np.random.rand(2 * k, 1))
dataset_a = dataset_ops.Dataset.from_tensors(a).repeat()
dataset_b = dataset_ops.Dataset.from_tensors(b).repeat()
dataset_c = dataset_ops.Dataset.from_tensors(c).repeat()
def f1(x, y):
return math_ops.matmul(x, y)
def f2(a, b):
x, y = b
return a, math_ops.matmul(x, y)
dataset = dataset_a
dataset = dataset.map(f1, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_b))
dataset = dataset.map(f2, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_c))
dataset = dataset.map(f2, num_parallel_calls=dataset_ops.AUTOTUNE)
return self._run_benchmark(
dataset=dataset,
autotune=autotune,
autotune_buffers=autotune_buffers,
benchmark_iters=10000,
benchmark_label="map_and_interleave")
def benchmark_map_batch_and_interleave(self):
a = self._benchmark_map_batch_and_interleave(autotune=False)
b = self._benchmark_map_batch_and_interleave(
autotune=True, autotune_buffers=False)
c = self._benchmark_map_batch_and_interleave(
autotune=True, autotune_buffers=True)
print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
.format(a / c))
def _benchmark_map_batch_and_interleave(self,
autotune,
autotune_buffers=False):
batch_size = 16
k = 1024 * 1024
a = (np.random.rand(1, 8 * k), np.random.rand(8 * k, 1))
b = (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))
c = (np.random.rand(1, 2 * k), np.random.rand(2 * k, 1))
dataset_a = dataset_ops.Dataset.from_tensors(a).repeat()
dataset_b = dataset_ops.Dataset.from_tensors(b).repeat()
dataset_c = dataset_ops.Dataset.from_tensors(c).repeat()
dataset = dataset_a
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_b))
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset_c = dataset_c.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset_c = dataset_c.batch(batch_size=batch_size)
dataset = dataset_ops.Dataset.zip((dataset, dataset_c))
return self._run_benchmark(
dataset=dataset,
autotune=autotune,
autotune_buffers=autotune_buffers,
benchmark_iters=1000,
benchmark_label="map_batch_and_interleave")
if __name__ == "__main__":
benchmark_base.test.main()
|
|
from pyswagger import SwaggerApp, errs
from ..utils import get_test_data_folder
from pyswagger.primitives import Model, Array
from pyswagger.io import SwaggerRequest
import unittest
app = SwaggerApp._create_(get_test_data_folder(version='1.2', which='wordnik'))
class SwaggerRequest_Pet_TestCase(unittest.TestCase):
""" test SwaggerRequest from Operation's __call__ """
def test_updatePet(self):
""" Pet.updatePet """
req, _ = app.op['updatePet'](body=dict(id=1, name='Mary', category=dict(id=1, name='dog')))
req.prepare()
self.assertEqual(req.method, 'put')
self.assertEqual(req.header, {'Content-Type': 'application/json', 'Accept': 'application/json'})
self.assertEqual(req.url, 'http://petstore.swagger.wordnik.com/api/pet')
self.assertEqual(req.path, '/api/pet')
self.assertEqual(req.base_path, '')
self.assertEqual(req.query, [])
m = req._p['body']['body']
self.assertTrue(isinstance(m, Model))
self.assertEqual(m.id, 1)
self.assertEqual(m.name, 'Mary')
self.assertTrue(isinstance(m.category, Model))
self.assertEqual(m.category.id, 1)
self.assertEqual(m.category.name, 'dog')
def test_findPetsByStatus(self):
""" Pet.findPetsByStatus """
req, _ = app.op['findPetsByStatus'](status=['available', 'sold'])
req.prepare()
self.assertEqual(req.url, 'http://petstore.swagger.wordnik.com/api/pet/findByStatus')
self.assertEqual(req.path, '/api/pet/findByStatus')
self.assertEqual(req.base_path, '')
self.assertEqual(req.method, 'get')
self.assertEqual(req.header, {'Accept': 'application/json'})
self.assertEqual(req.data, None)
self.assertEqual(req.query, [('status', 'available,sold')])
def test_findPetsByTags(self):
""" Pet.findPetsByTags """
req, _ = app.op['findPetsByTags'](tags=['small', 'cute', 'north'])
req.prepare()
self.assertEqual(req.url, 'http://petstore.swagger.wordnik.com/api/pet/findByTags')
self.assertEqual(req.path, '/api/pet/findByTags')
self.assertEqual(req.base_path, '')
self.assertEqual(req.method, 'get')
self.assertEqual(req.header, {'Accept': 'application/json'})
self.assertEqual(req.data, None)
self.assertEqual(req.query, [('tags', 'small,cute,north')])
def test_partialUpdate(self):
""" Pet.partialUpdate """
req, _ = app.op['partialUpdate'](petId=0, body=dict(id=2, name='Tom', category=dict(id=2, name='cat'), tags=[dict(id=0, name='cute'), dict(id=1, name='small')]))
req.prepare()
self.assertEqual(req.url, 'http://petstore.swagger.wordnik.com/api/pet/0')
self.assertEqual(req.path, '/api/pet/0')
self.assertEqual(req.base_path, '')
self.assertEqual(req.method, 'patch')
self.assertEqual(req.header, {'Content-Type': 'application/json', 'Accept': 'application/json'})
m = req._p['body']['body']
self.assertTrue(isinstance(m, Model))
self.assertEqual(m.id, 2)
self.assertEqual(m.name, 'Tom')
self.assertTrue('photoUrls' not in m)
self.assertTrue('status' not in m)
self.assertTrue(isinstance(m.category, Model))
mm = m.category
self.assertEqual(mm.id, 2)
self.assertEqual(mm.name, 'cat')
self.assertTrue(isinstance(m.tags, Array))
self.assertEqual(len(m.tags), 2)
self.assertTrue(isinstance(m.tags[0], Model))
self.assertEqual(m.tags[0].id, 0)
self.assertEqual(m.tags[0].name, 'cute')
self.assertTrue(isinstance(m.tags[1], Model))
self.assertEqual(m.tags[1].id, 1)
self.assertEqual(m.tags[1].name, 'small')
self.assertEqual(req.query, [])
def test_updatePetWithForm(self):
""" Pet.updatePetWithForm """
req, _ = app.op['updatePetWithForm'](petId=23, name='Gary', status='pending')
req.prepare()
self.assertEqual(req.url, 'http://petstore.swagger.wordnik.com/api/pet/23')
self.assertEqual(req.path, '/api/pet/23')
self.assertEqual(req.base_path, '')
self.assertEqual(req.method, 'post')
self.assertEqual(req.header,{
'Content-Type': u'application/x-www-form-urlencoded',
'Accept': 'application/json'
})
self.assertTrue(req.data.find('status=pending') != -1)
self.assertTrue(req.data.find('name=Gary') != -1)
self.assertEqual(req.query, [])
def test_addPet(self):
""" Pet.addPet """
req, _ = app.op['addPet'](body=dict(id=34, name='Qoo', category=dict(id=2, name='cat'), status='available'))
req.prepare()
self.assertEqual(req.url, 'http://petstore.swagger.wordnik.com/api/pet')
self.assertEqual(req.path, '/api/pet')
self.assertEqual(req.base_path, '')
self.assertEqual(req.method, 'post')
self.assertEqual(req.header, {'Content-Type': 'application/json', 'Accept': 'application/json'})
m = req._p['body']['body']
self.assertTrue(isinstance(m, Model))
self.assertEqual(m.id, 34)
self.assertEqual(m.name, 'Qoo')
self.assertEqual(m.status, 'available')
mm = m.category
self.assertTrue(isinstance(mm, Model))
self.assertEqual(mm.id, 2)
self.assertEqual(mm.name, 'cat')
self.assertEqual(req.query, [])
def test_deletePet(self):
""" Pet.deletePet """
req, _ = app.op['deletePet'](petId=22)
req.prepare()
self.assertEqual(req.url, 'http://petstore.swagger.wordnik.com/api/pet/22')
self.assertEqual(req.path, '/api/pet/22')
self.assertEqual(req.method, 'delete')
self.assertEqual(req.header, {'Accept': 'application/json'})
self.assertEqual(req.data, None)
self.assertEqual(req.query, [])
def test_getPetById(self):
""" Pet.getPetById """
req, _ = app.op['getPetById'](petId=100)
req.prepare()
self.assertEqual(req.url, 'http://petstore.swagger.wordnik.com/api/pet/100')
self.assertEqual(req.path, '/api/pet/100')
self.assertEqual(req.method, 'get')
self.assertEqual(req.header, {'Accept': 'application/json'})
self.assertEqual(req.data, None)
self.assertEqual(req.query, [])
def test_opt_url_netloc(self):
""" test the replace of net loc """
req, _ = app.op['getPetById'](petId=100)
req.prepare()
req._patch({SwaggerRequest.opt_url_netloc: 'localhost:9001'})
self.assertEqual(req.url, 'http://localhost:9001/api/pet/100')
self.assertEqual(req.path, '/api/pet/100')
def test_uploadFile(self):
""" Pet.uploadFile """
# TODO: implement File upload
class SwaggerResponse_TestCase(unittest.TestCase):
""" test SwaggerResponse from Pet's Operation's __call__ """
def test_updatePet(self):
""" Pet.updatePet """
_, resp = app.op['updatePet'](body=dict(id=1, name='Mary'))
# update raw before status should raise exception
self.assertRaises(Exception, resp.apply_with, raw={})
resp.apply_with(status=400)
self.assertEqual(resp.status, 400)
def test_findPetsByTags(self):
""" Pet.findPetsByTags """
_, resp = app.op['findPetsByTags'](tags=[])
resp.apply_with(status=200, raw=[
dict(id=1, name='Tom', category=dict(id=1, name='dog'), tags=[dict(id=1, name='small')]),
dict(id=2, name='QQ', tags=[dict(id=1, name='small')])
])
d = resp.data
self.assertTrue(isinstance(d, Array))
d1 = d[0]
self.assertTrue(isinstance(d1, Model))
self.assertEqual(d1.id, 1)
self.assertEqual(d1.name, 'Tom')
self.assertEqual(d1.tags, [dict(id=1, name='small')])
self.assertEqual(d1.tags[0].name, 'small')
def test_updatePetWithForm(self):
""" Pet.updatePetWithForm, test void """
_, resp = app.op['updatePetWithForm'](petId=23)
resp.apply_with(status=200, raw={})
self.assertEqual(resp.data, None)
def test_invalid_enum(self):
""" invalid enum value """
self.assertRaises(errs.ValidationError, app.op['findPetsByStatus'], status=['wrong_enum'])
def test_default_value(self):
""" make sure defaultValue works """
req, _ = app.op['findPetsByStatus']()
self.assertEqual(req._p['query'], [(u'status', 'available')])
# when there is no defaultValue, we should not provide a 'None'
req, _ = app.op['updatePetWithForm'](petId=1, name='Tom')
self.assertEqual(req._p['formData'], [('name', 'Tom')])
req, _ = app.op['updatePetWithForm'](petId=1)
self.assertEqual(req._p['formData'], [])
def test_min_max(self):
""" make sure minimum/maximum works """
self.assertRaises(errs.ValidationError, app.op['getPetById'], petId=-100)
self.assertRaises(errs.ValidationError, app.op['getPetById'], petId=1000000)
|
|
from __future__ import unicode_literals
from base64 import b64encode
import unittest
from flask import Response
from route53_dyndns import app, route53, views
from mock import patch
from .helpers import new_resource_record
class FrontendTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.url = '/nic/update'
def make_auth_header(self, username, password):
value = b64encode("{0}:{1}".format(username, password).encode('ascii'))
return {
'Authorization': b'Basic ' + value
}
def get_with_auth(self, url, username='admin', password='admin', **kwargs):
auth_header = self.make_auth_header(username, password)
if 'environ_base' in kwargs:
kwargs['environ_base'].setdefault('HTTP_USER_AGENT', "Client")
else:
kwargs['environ_base'] = {'HTTP_USER_AGENT': "Client"}
return self.app.get(url, headers=auth_header, **kwargs)
def assertResponseEqual(self, expected, rv):
response = rv.get_data().decode('utf-8')
self.assertEqual(rv.status_code, 200)
self.assertEqual(expected + '\r\n', response)
def test_app_sanity_check_config(self):
# Test required settings
for setting in ('USERNAME', 'PASSWORD'):
with patch.dict(app.config):
app.config.pop(setting)
with self.assertRaises(RuntimeError):
app.sanity_check_config()
# Test optional settings
for setting in ('BAD_USER_AGENTS',):
with patch.dict(app.config):
app.config.pop(setting)
self.assertFalse(setting in app.config)
app.sanity_check_config()
self.assertTrue(setting in app.config)
def test_app_response(self):
response = "Hello World!"
# Check that a string response always has correct type and newline
rv = app.make_response(response)
self.assertEqual('text/plain', rv.headers['content-type'])
self.assertResponseEqual(response, rv)
# Check that a tuple response always has correct type and newline
rv = app.make_response((response, 200, None))
self.assertEqual('text/plain', rv.headers['content-type'])
self.assertResponseEqual(response, rv)
# Check that a response gets the correct type
rv = app.make_response(Response(response, 200, None))
self.assertEqual('text/plain', rv.headers['content-type'])
def test_verify_auth(self):
verified = views.verify_auth('admin', 'admin')
self.assertFalse(verified)
verified = views.verify_auth('admin', 'secret')
self.assertTrue(verified)
def test_nic_update_http(self):
# Check that the trailing slash on the URL is optional
rv = self.app.get(self.url + '/?hostname=foo.com')
self.assertNotEqual(rv.status_code, 404)
rv = self.app.get(self.url + '?hostname=foo.com')
self.assertNotEqual(rv.status_code, 404)
# Test the authentication logic for a view
with patch('route53_dyndns.views.verify_auth') as mocked:
auth_url = '/test_auth'
@app.route(auth_url)
@views.api_auth
def test_view():
return 'OK'
# Test no authentication
rv = self.app.get(auth_url)
self.assertEqual(rv.status_code, 401)
# Test incorrect authentication
mocked.return_value = False
rv = self.get_with_auth(auth_url)
self.assertEqual(rv.status_code, 403)
# Test correct authentication
mocked.return_value = True
rv = self.get_with_auth(auth_url)
self.assertEqual(rv.status_code, 200)
@patch('route53_dyndns.views.verify_auth', **{'method.return_value': True})
def test_nic_update_parameters(self, mocked_auth):
# Offline is an unsupported parameter
rv = self.get_with_auth(self.url + '?offline=True')
self.assertResponseEqual(views.NOT_SUPPORTED, rv)
@patch('route53_dyndns.views.verify_auth', **{'method.return_value': True})
def test_nic_update_user_agent(self, mocked_auth):
# Test no user agent
rv = self.get_with_auth(self.url, environ_base={'HTTP_USER_AGENT': ''})
self.assertResponseEqual(views.BAD_USER_AGENT, rv)
# Test no bad user agents
with patch.dict(app.config, {'BAD_USER_AGENTS': []}):
rv = self.get_with_auth(self.url,
environ_base={'HTTP_USER_AGENT': "foobar"})
self.assertResponseEqual(views.NO_HOST, rv)
# Test bad user agent
with patch.dict(app.config, {'BAD_USER_AGENTS': ["foobar"]}):
rv = self.get_with_auth(self.url + '?hostname=foo',
environ_base={'HTTP_USER_AGENT': "foobar"})
self.assertResponseEqual(views.BAD_USER_AGENT, rv)
@patch('route53_dyndns.views.verify_auth', **{'method.return_value': True})
def test_nic_update_hostname(self, mocked_auth):
# Test no hostname
rv = self.get_with_auth(self.url)
self.assertResponseEqual(views.NO_HOST, rv)
# Test a go wrong case when there's a Route 53 error
with patch('route53_dyndns.route53.find_resource_record') as mocked:
mocked.side_effect = route53.Route53Exception("Error")
rv = self.get_with_auth(self.url + '?hostname=foo')
self.assertResponseEqual(views.GENERAL_ERROR, rv)
# Test hostname not found
with patch('route53_dyndns.route53.find_resource_record') as mocked:
mocked.return_value = None
rv = self.get_with_auth(self.url + '?hostname=foo')
self.assertResponseEqual(views.NO_HOST, rv)
@patch('route53_dyndns.route53.find_resource_record')
@patch('route53_dyndns.views.verify_auth', **{'method.return_value': True})
def test_nic_update_record_exists(self, mocked_auth, mocked_find_record):
hostname = "www.google.com"
value = "10.1.10.1"
new_value = "192.168.1.1"
mocked_find_record.return_value = new_resource_record(hostname, value)
# Test IP hasn't changed
url = self.url + '?hostname=' + hostname + '&myip=' + value
rv = self.get_with_auth(url)
self.assertResponseEqual(views.NO_CHANGE % value, rv)
# Test IP hasn't changed, implicit IP from request
rv = self.get_with_auth(self.url + '?hostname=' + hostname,
environ_base={'REMOTE_ADDR': value})
self.assertResponseEqual(views.NO_CHANGE % value, rv)
# Test go wrong case when there's an exception
with patch('route53_dyndns.route53.update_resource_record') as mocked:
mocked.side_effect = route53.Route53Exception("Error")
url = self.url + '?hostname=' + hostname + '&myip=' + new_value
rv = self.get_with_auth(url)
self.assertResponseEqual(views.GENERAL_ERROR, rv)
# Test go wrong case when there's an expected failure
with patch('route53_dyndns.route53.update_resource_record') as mocked:
mocked.return_value = False
url = self.url + '?hostname=' + hostname + '&myip=' + new_value
rv = self.get_with_auth(url)
self.assertResponseEqual(views.GENERAL_ERROR, rv)
with patch('route53_dyndns.route53.update_resource_record') as mocked:
mocked.return_value = True
# Go right case with explicit new IP
url = self.url + '?hostname=' + hostname + '&myip=' + new_value
rv = self.get_with_auth(url)
self.assertResponseEqual(views.IP_CHANGED % new_value, rv)
# Go right case with implict new IP from request
url = self.url + '?hostname=' + hostname + '&myip=' + new_value
rv = self.get_with_auth(url,
environ_base={'REMOTE_ADDR': new_value})
self.assertResponseEqual(views.IP_CHANGED % new_value, rv)
|
|
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.test_SimulationParameters
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Tests for module `SimulationParameters`.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
import unittest
import logging
import os.path
import copy
# Third party modules.
from nose.plugins.skip import SkipTest
# Local modules.
# Project modules
import pymcxray.FileFormat.SimulationParameters as SimulationParameters
import pymcxray.FileFormat.testUtilities as testUtilities
import pymcxray.FileFormat.Version as Version
# Globals and constants variables.
class TestSimulationParameters(unittest.TestCase):
"""
TestCase class for the module `SimulationParameters`.
"""
def setUp(self):
"""
Setup method.
"""
unittest.TestCase.setUp(self)
self.testDataPath = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../test_data"))
self.tempDataPath = testUtilities.createTempDataPath(self.testDataPath)
def tearDown(self):
"""
Teardown method.
"""
unittest.TestCase.tearDown(self)
testUtilities.removeTempDataPath(self.tempDataPath)
def testSkeleton(self):
"""
First test to check if the testcase is working with the testing framework.
"""
#self.fail("Test if the testcase is working.")
self.assert_(True)
def test_read(self):
"""
Tests for method `read`.
"""
for title in testUtilities.getSimulationTitles():
simulationParameters = SimulationParameters.SimulationParameters()
filepath = os.path.abspath(os.path.join(self.testDataPath, "%s/%s.par" % (title, title)))
simulationParameters.read(filepath)
simulationParametersRef = self.getSimulationParametersReference(title)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
#self.fail("Test if the testcase is working.")
def test_read_1_1_1(self):
"""
Tests for method `read`.
"""
simulationParameters = SimulationParameters.SimulationParameters()
title = "AlMgBulk5keV_version_1_1_1"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
simulationParameters.read(filepath)
self.assertEquals(Version.VERSION_1_1_1.major, simulationParameters.version.major)
self.assertEquals(Version.VERSION_1_1_1.minor, simulationParameters.version.minor)
self.assertEquals(Version.VERSION_1_1_1.revision, simulationParameters.version.revision)
self.assertEquals(Version.VERSION_1_1_1, simulationParameters.version)
simulationParametersRef = self.getSimulationParametersReference(title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
#self.fail("Test if the testcase is working.")
def test_read_1_2_0(self):
"""
Tests for method `read`.
"""
simulationParameters = SimulationParameters.SimulationParameters()
title = "AlMgBulk5keV_version_1_2_0"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
simulationParameters.read(filepath)
self.assertEquals(Version.VERSION_1_2_0.major, simulationParameters.version.major)
self.assertEquals(Version.VERSION_1_2_0.minor, simulationParameters.version.minor)
self.assertEquals(Version.VERSION_1_2_0.revision, simulationParameters.version.revision)
self.assertEquals(Version.VERSION_1_2_0, simulationParameters.version)
simulationParametersRef = self.getSimulationParametersReference(title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
#self.fail("Test if the testcase is working.")
def test_read_1_2_1(self):
"""
Tests for method `read`.
"""
simulationParameters = SimulationParameters.SimulationParameters()
title = "AlMgBulk5keV_version_1_2_1"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
simulationParameters.read(filepath)
self.assertEquals(Version.VERSION_1_2_1.major, simulationParameters.version.major)
self.assertEquals(Version.VERSION_1_2_1.minor, simulationParameters.version.minor)
self.assertEquals(Version.VERSION_1_2_1.revision, simulationParameters.version.revision)
self.assertEquals(Version.VERSION_1_2_1, simulationParameters.version)
simulationParametersRef = self.getSimulationParametersReference(title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
#self.fail("Test if the testcase is working.")
def test_read_1_4_1(self):
"""
Tests for method `read`.
"""
simulationParameters = SimulationParameters.SimulationParameters()
title = "AlMgBulk5keV_version_1_4_1"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
simulationParameters.read(filepath)
self.assertEquals(Version.VERSION_1_4_1.major, simulationParameters.version.major)
self.assertEquals(Version.VERSION_1_4_1.minor, simulationParameters.version.minor)
self.assertEquals(Version.VERSION_1_4_1.revision, simulationParameters.version.revision)
self.assertEquals(Version.VERSION_1_4_1, simulationParameters.version)
simulationParametersRef = self.getSimulationParametersReference(title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
#self.fail("Test if the testcase is working.")
def test_read_1_4_4(self):
"""
Tests for method `read`.
"""
simulationParameters = SimulationParameters.SimulationParameters()
title = "AlMgBulk5keV_version_1_4_4"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
simulationParameters.read(filepath)
self.assertEquals(Version.VERSION_1_4_4.major, simulationParameters.version.major)
self.assertEquals(Version.VERSION_1_4_4.minor, simulationParameters.version.minor)
self.assertEquals(Version.VERSION_1_4_4.revision, simulationParameters.version.revision)
self.assertEquals(Version.VERSION_1_4_4, simulationParameters.version)
simulationParametersRef = self.getSimulationParametersReference(title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
self.assertEquals(simulationParametersRef.elasticCrossSectionScalingFactor, simulationParameters.elasticCrossSectionScalingFactor)
self.assertEquals(simulationParametersRef.energyLossScalingFactor, simulationParameters.energyLossScalingFactor)
#self.fail("Test if the testcase is working.")
def getSimulationParametersReference(self, title):
simulationParameters = SimulationParameters.SimulationParameters()
if title == "AuBC cyl":
baseFilenameRef = r"Results\%s Low Count" % (title)
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.numberElectrons = 20
simulationParameters.numberPhotons = 500
simulationParameters.numberWindows = 32
simulationParameters.numberFilmsX = 64
simulationParameters.numberFilmsY = 64
simulationParameters.numberFilmsZ = 64
simulationParameters.numberChannels = 1024
simulationParameters.spectrumInterpolationModel = 2
simulationParameters.voxelSimplification = None
elif title == "BioRitchieNew111017":
baseFilenameRef = r"Results\Ritchie60"
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.numberElectrons = 50
simulationParameters.numberPhotons = 10000
simulationParameters.numberWindows = 64
simulationParameters.numberFilmsX = 128
simulationParameters.numberFilmsY = 128
simulationParameters.numberFilmsZ = 128
simulationParameters.numberChannels = 1024
simulationParameters.spectrumInterpolationModel = 2
simulationParameters.voxelSimplification = 1
elif title == "Bug Al Zr Sphere":
baseFilenameRef = r"Results\McXRay"
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.numberElectrons = 1000
simulationParameters.numberPhotons = 10000
simulationParameters.numberWindows = 64
simulationParameters.numberFilmsX = 128
simulationParameters.numberFilmsY = 128
simulationParameters.numberFilmsZ = 128
simulationParameters.numberChannels = 1024
simulationParameters.spectrumInterpolationModel = 2
simulationParameters.voxelSimplification = None
elif title == "Mg2SiAlCube3kev":
baseFilenameRef = r"Results\%s" % (title)
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.numberElectrons = 30
simulationParameters.numberPhotons = 1000
simulationParameters.numberWindows = 32
simulationParameters.numberFilmsX = 64
simulationParameters.numberFilmsY = 64
simulationParameters.numberFilmsZ = 64
simulationParameters.numberChannels = 1024
simulationParameters.spectrumInterpolationModel = 2
simulationParameters.voxelSimplification = None
elif title == "AlMgBulk5keV_version_1_1_1":
baseFilenameRef = r"Results\%s" % ("AlMgBulk5keV")
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.version = Version.Version(1, 1, 1)
simulationParameters.numberElectrons = 1000
simulationParameters.numberPhotons = 127678
simulationParameters.numberWindows = 64
simulationParameters.numberFilmsX = 128
simulationParameters.numberFilmsY = 128
simulationParameters.numberFilmsZ = 128
simulationParameters.numberChannels = 1024
simulationParameters.spectrumInterpolationModel = 2
simulationParameters.voxelSimplification = None
elif title == "AlMgBulk5keV_version_1_2_0":
baseFilenameRef = r"Results\%s" % ("AlMgBulk5keV_1_2_0")
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.version = Version.Version(1, 2, 0)
simulationParameters.numberElectrons = 10000
simulationParameters.numberPhotons = 1000
simulationParameters.numberWindows = 128
simulationParameters.numberFilmsX = 64
simulationParameters.numberFilmsY = 66
simulationParameters.numberFilmsZ = 70
simulationParameters.energyChannelWidth_eV = 5
simulationParameters.spectrumInterpolationModel = 1
simulationParameters.voxelSimplification = None
elif title == "AlMgBulk5keV_version_1_2_1":
baseFilenameRef = r"Results\%s" % ("AlMgBulk5keV_1_2_1")
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.version = Version.Version(1, 2, 1)
simulationParameters.numberElectrons = 10000
simulationParameters.numberPhotons = 1000
simulationParameters.numberWindows = 128
simulationParameters.numberFilmsX = 64
simulationParameters.numberFilmsY = 66
simulationParameters.numberFilmsZ = 70
simulationParameters.energyChannelWidth_eV = 5
simulationParameters.spectrumInterpolationModel = 1
simulationParameters.voxelSimplification = None
elif title == "AlMgBulk5keV_version_1_4_1":
baseFilenameRef = r"Results\%s" % ("AlMgBulk5keV_1_4_1")
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.version = Version.Version(1, 4, 1)
simulationParameters.numberElectrons = 10000
simulationParameters.numberPhotons = 1000
simulationParameters.numberWindows = 128
simulationParameters.numberFilmsX = 64
simulationParameters.numberFilmsY = 66
simulationParameters.numberFilmsZ = 70
simulationParameters.energyChannelWidth_eV = 5
simulationParameters.spectrumInterpolationModel = 1
simulationParameters.voxelSimplification = None
elif title == "AlMgBulk5keV_version_1_4_4":
baseFilenameRef = r"Results\%s" % ("AlMgBulk5keV_1_4_4")
simulationParameters.baseFilename = baseFilenameRef
simulationParameters.version = Version.Version(1, 4, 4)
simulationParameters.numberElectrons = 10000
simulationParameters.numberPhotons = 1000
simulationParameters.numberWindows = 128
simulationParameters.numberFilmsX = 64
simulationParameters.numberFilmsY = 66
simulationParameters.numberFilmsZ = 70
simulationParameters.energyChannelWidth_eV = 5
simulationParameters.spectrumInterpolationModel = 1
simulationParameters.voxelSimplification = None
simulationParameters.elasticCrossSectionScalingFactor = 1.3
simulationParameters.energyLossScalingFactor = 0.7
return simulationParameters
def test__createKeys(self):
"""
Tests for method `_createKeys`.
"""
simulationParameters = SimulationParameters.SimulationParameters()
simulationParameters.version = copy.deepcopy(Version.VERSION_1_4_3)
numberKeys = 10
keys = simulationParameters._createKeys()
self.assertEquals(numberKeys, len(keys))
simulationParameters.version = copy.deepcopy(Version.VERSION_1_4_4)
numberKeys = 12
keys = simulationParameters._createKeys()
self.assertEquals(numberKeys, len(keys))
#self.fail("Test if the testcase is working.")
def test_write(self):
"""
Tests for method `write`.
"""
raise SkipTest
self.maxDiff = None
for title in testUtilities.getSimulationTitles():
simulationParametersRef = self.getSimulationParametersReference(title)
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "%s/%s.par" % (title, title)))
filepath = os.path.join(self.tempDataPath, "%s.par" % (title))
simulationParameters = SimulationParameters.SimulationParameters()
simulationParameters = simulationParametersRef
simulationParameters.write(filepath)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
def test_write_1_1_1(self):
"""
Tests for method `write`.
"""
raise SkipTest
self.maxDiff = None
title = "AlMgBulk5keV_version_1_1_1"
simulationParametersRef = self.getSimulationParametersReference(title)
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
filepath = os.path.join(self.tempDataPath, "%s.par" % (title))
simulationParameters = simulationParametersRef
simulationParameters.write(filepath)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
self.fail("Test if the testcase is working.")
def test_write_1_2_0(self):
"""
Tests for method `write`.
"""
self.maxDiff = None
title = "AlMgBulk5keV_version_1_2_0"
simulationParametersRef = self.getSimulationParametersReference(title)
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
filepath = os.path.join(self.tempDataPath, "%s.par" % (title))
simulationParameters = simulationParametersRef
simulationParameters.version = copy.deepcopy(Version.VERSION_1_2_0)
simulationParameters.write(filepath)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
def test_write_1_2_1(self):
"""
Tests for method `write`.
"""
self.maxDiff = None
title = "AlMgBulk5keV_version_1_2_1"
simulationParametersRef = self.getSimulationParametersReference(title)
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
filepath = os.path.join(self.tempDataPath, "%s.par" % (title))
simulationParameters = simulationParametersRef
simulationParameters.version = copy.deepcopy(Version.VERSION_1_2_1)
simulationParameters.write(filepath)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
def test_write_1_4_1(self):
"""
Tests for method `write`.
"""
self.maxDiff = None
title = "AlMgBulk5keV_version_1_4_1"
simulationParametersRef = self.getSimulationParametersReference(title)
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
filepath = os.path.join(self.tempDataPath, "%s.par" % (title))
simulationParameters = simulationParametersRef
simulationParameters.version = copy.deepcopy(Version.VERSION_1_4_1)
simulationParameters.write(filepath)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
def test_write_1_4_4(self):
"""
Tests for method `write`.
"""
self.maxDiff = None
title = "AlMgBulk5keV_version_1_4_4"
simulationParametersRef = self.getSimulationParametersReference(title)
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.par" % (title)))
filepath = os.path.join(self.tempDataPath, "%s.par" % (title))
simulationParameters = simulationParametersRef
simulationParameters.version = copy.deepcopy(Version.VERSION_1_4_4)
simulationParameters.write(filepath)
self.assertEquals(simulationParametersRef.baseFilename, simulationParameters.baseFilename)
self.assertEquals(simulationParametersRef.numberElectrons, simulationParameters.numberElectrons)
self.assertEquals(simulationParametersRef.numberPhotons, simulationParameters.numberPhotons)
self.assertEquals(simulationParametersRef.numberWindows, simulationParameters.numberWindows)
self.assertEquals(simulationParametersRef.numberFilmsX, simulationParameters.numberFilmsX)
self.assertEquals(simulationParametersRef.numberFilmsY, simulationParameters.numberFilmsY)
self.assertEquals(simulationParametersRef.numberFilmsZ, simulationParameters.numberFilmsZ)
self.assertEquals(simulationParametersRef.numberChannels, simulationParameters.numberChannels)
self.assertEquals(simulationParametersRef.energyChannelWidth_eV, simulationParameters.energyChannelWidth_eV)
self.assertEquals(simulationParametersRef.spectrumInterpolationModel, simulationParameters.spectrumInterpolationModel)
self.assertEquals(simulationParametersRef.voxelSimplification, simulationParameters.voxelSimplification, title)
self.assertEquals(simulationParametersRef.elasticCrossSectionScalingFactor, simulationParameters.elasticCrossSectionScalingFactor)
self.assertEquals(simulationParametersRef.energyLossScalingFactor, simulationParameters.energyLossScalingFactor)
self.assertEquals(simulationParametersRef.version.major, simulationParameters.version.major)
self.assertEquals(simulationParametersRef.version.minor, simulationParameters.version.minor)
self.assertEquals(simulationParametersRef.version.revision, simulationParameters.version.revision)
self.assertEquals(simulationParametersRef.version, simulationParameters.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
if __name__ == '__main__': #pragma: no cover
import nose
nose.runmodule()
|
|
from typing import ClassVar, FrozenSet
from ..config import Config
from .dependency import IngressClassesDependency, SecretDependency, ServiceDependency
from .k8sobject import KubernetesGVK, KubernetesObject
from .k8sprocessor import ManagedKubernetesProcessor
from .resource import NormalizedResource, ResourceManager
class IngressClassProcessor (ManagedKubernetesProcessor):
CONTROLLER: ClassVar[str] = 'getambassador.io/ingress-controller'
ingress_classes_dep: IngressClassesDependency
def __init__(self, manager: ResourceManager) -> None:
super().__init__(manager)
self.ingress_classes_dep = self.deps.provide(IngressClassesDependency)
def kinds(self) -> FrozenSet[KubernetesGVK]:
return frozenset([
KubernetesGVK('networking.k8s.io/v1beta1', 'IngressClass'),
KubernetesGVK('networking.k8s.io/v1', 'IngressClass'),
])
def _process(self, obj: KubernetesObject) -> None:
# We only want to deal with IngressClasses that belong to "spec.controller: getambassador.io/ingress-controller"
if obj.spec.get('controller', '').lower() != self.CONTROLLER:
self.logger.debug(f'ignoring IngressClass {obj.name} without controller - getambassador.io/ingress-controller')
return
if obj.ambassador_id != Config.ambassador_id:
self.logger.debug(f'IngressClass {obj.name} does not have Ambassador ID {Config.ambassador_id}, ignoring...')
return
# TODO: Do we intend to use this parameter in any way?
# `parameters` is of type TypedLocalObjectReference,
# meaning it links to another k8s resource in the same namespace.
# https://godoc.org/k8s.io/api/core/v1#TypedLocalObjectReference
#
# In this case, the resource referenced by TypedLocalObjectReference
# should not be namespaced, as IngressClass is a non-namespaced resource.
#
# It was designed to reference a CRD for this specific ingress-controller
# implementation... although usage is optional and not prescribed.
ingress_parameters = obj.spec.get('parameters', {})
self.logger.debug(f'Handling IngressClass {obj.name} with parameters {ingress_parameters}...')
self.aconf.incr_count('k8s_ingress_class')
# Don't emit this directly. We use it when we handle ingresses below. If
# we want to use the parameters, we should add them to this dependency
# type.
self.ingress_classes_dep.ingress_classes.add(obj.name)
class IngressProcessor (ManagedKubernetesProcessor):
service_dep: ServiceDependency
ingress_classes_dep: IngressClassesDependency
def __init__(self, manager: ResourceManager) -> None:
super().__init__(manager)
self.deps.want(SecretDependency)
self.service_dep = self.deps.want(ServiceDependency)
self.ingress_classes_dep = self.deps.want(IngressClassesDependency)
def kinds(self) -> FrozenSet[KubernetesGVK]:
return frozenset([
KubernetesGVK('extensions/v1beta1', 'Ingress'),
KubernetesGVK('networking.k8s.io/v1beta1', 'Ingress'),
KubernetesGVK('networking.k8s.io/v1', 'Ingress'),
])
def _update_status(self, obj: KubernetesObject) -> None:
service_status = None
if not self.service_dep.ambassador_service or not self.service_dep.ambassador_service.name:
self.logger.error(f"Unable to set Ingress {obj.name}'s load balancer, could not find Ambassador service")
else:
service_status = self.service_dep.ambassador_service.status
if obj.status != service_status:
if service_status:
status_update = (obj.gvk.kind, obj.namespace, service_status)
self.logger.debug(f"Updating Ingress {obj.name} status to {status_update}")
self.aconf.k8s_status_updates[f'{obj.name}.{obj.namespace}'] = status_update
else:
self.logger.debug(f"Not reconciling Ingress {obj.name}: observed and current statuses are in sync")
def _process(self, obj: KubernetesObject) -> None:
ingress_class_name = obj.spec.get('ingressClassName', '')
has_ingress_class = ingress_class_name in self.ingress_classes_dep.ingress_classes
has_ambassador_ingress_class_annotation = obj.annotations.get('kubernetes.io/ingress.class', '').lower() == 'ambassador'
# check the Ingress resource has either:
# - a `kubernetes.io/ingress.class: "ambassador"` annotation
# - a `spec.ingressClassName` that references an IngressClass with
# `spec.controller: getambassador.io/ingress-controller`
#
# also worth noting, the kube-apiserver might assign the `spec.ingressClassName` if unspecified
# and only 1 IngressClass has the following annotation:
# annotations:
# ingressclass.kubernetes.io/is-default-class: "true"
if not (has_ingress_class or has_ambassador_ingress_class_annotation):
self.logger.debug(f'ignoring Ingress {obj.name} without annotation (kubernetes.io/ingress.class: "ambassador") or IngressClass controller (getambassador.io/ingress-controller)')
return
# We don't want to deal with non-matching Ambassador IDs
if obj.ambassador_id != Config.ambassador_id:
self.logger.debug(f"Ingress {obj.name} does not have Ambassador ID {Config.ambassador_id}, ignoring...")
return
self.logger.debug(f"Handling Ingress {obj.name}...")
self.aconf.incr_count('k8s_ingress')
ingress_tls = obj.spec.get('tls', [])
for tls_count, tls in enumerate(ingress_tls):
tls_secret = tls.get('secretName', None)
if tls_secret is not None:
for host_count, host in enumerate(tls.get('hosts', ['*'])):
tls_unique_identifier = f"{obj.name}-{tls_count}-{host_count}"
spec = {
'ambassador_id': [obj.ambassador_id],
'hostname': host,
'acmeProvider': {
'authority': 'none'
},
'tlsSecret': {
'name': tls_secret
},
'requestPolicy': {
'insecure': {
'action': 'Route'
}
}
}
ingress_host = NormalizedResource.from_data(
'Host',
tls_unique_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec=spec,
)
self.logger.debug(f"Generated Host from ingress {obj.name}: {ingress_host}")
self.manager.emit(ingress_host)
# parse ingress.spec.defaultBackend
# using ingress.spec.backend as a fallback, for older versions of the Ingress resource.
default_backend = obj.spec.get('defaultBackend', obj.spec.get('backend', {}))
db_service_name = default_backend.get('serviceName', None)
db_service_port = default_backend.get('servicePort', None)
if db_service_name is not None and db_service_port is not None:
db_mapping_identifier = f"{obj.name}-default-backend"
default_backend_mapping = NormalizedResource.from_data(
'Mapping',
db_mapping_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec={
'ambassador_id': obj.ambassador_id,
'prefix': '/',
'service': f'{db_service_name}.{obj.namespace}:{db_service_port}'
},
)
self.logger.debug(f"Generated mapping from Ingress {obj.name}: {default_backend_mapping}")
self.manager.emit(default_backend_mapping)
# parse ingress.spec.rules
ingress_rules = obj.spec.get('rules', [])
for rule_count, rule in enumerate(ingress_rules):
rule_http = rule.get('http', {})
rule_host = rule.get('host', None)
http_paths = rule_http.get('paths', [])
for path_count, path in enumerate(http_paths):
path_backend = path.get('backend', {})
path_type = path.get('pathType', 'ImplementationSpecific')
service_name = path_backend.get('serviceName', None)
service_port = path_backend.get('servicePort', None)
path_location = path.get('path', '/')
if not service_name or not service_port or not path_location:
continue
unique_suffix = f"{rule_count}-{path_count}"
mapping_identifier = f"{obj.name}-{unique_suffix}"
# For cases where `pathType: Exact`,
# otherwise `Prefix` and `ImplementationSpecific` are handled as regular Mapping prefixes
is_exact_prefix = True if path_type == 'Exact' else False
spec = {
'ambassador_id': obj.ambassador_id,
'prefix': path_location,
'prefix_exact': is_exact_prefix,
'precedence': 1 if is_exact_prefix else 0, # Make sure exact paths are evaluated before prefix
'service': f'{service_name}.{obj.namespace}:{service_port}'
}
if rule_host is not None:
if rule_host.startswith('*.'):
# Ingress allow specifying hosts with a single wildcard as the first label in the hostname.
# Transform the rule_host into a host_regex:
# *.star.com becomes ^[a-z0-9]([-a-z0-9]*[a-z0-9])?\.star\.com$
spec['host'] = rule_host\
.replace('.', '\\.')\
.replace('*', '^[a-z0-9]([-a-z0-9]*[a-z0-9])?', 1) + '$'
spec['host_regex'] = True
else:
spec['host'] = rule_host
path_mapping = NormalizedResource.from_data(
'Mapping',
mapping_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec=spec,
)
self.logger.debug(f"Generated mapping from Ingress {obj.name}: {path_mapping}")
self.manager.emit(path_mapping)
# let's make arrangements to update Ingress' status now
self._update_status(obj)
# Let's see if our Ingress resource has Ambassador annotations on it
self.manager.emit_annotated(NormalizedResource.from_kubernetes_object_annotation(obj))
|
|
""" Module for visualizer controller.
"""
import os
import imp
import gtk
import cairo
import json
import hashlib
import traceback
import settings
from gi.repository import Gtk
from views.visualizer import MainFrame
from views.network_view import NetworkView
from plots.base_plot import REGISTERED_PLOTS
class VisualizerController(object):
""" Controller class for visualizer.
Handles adding, removing plots, loading, saving of models and layouts.
"""
def __init__(self, sim_manager, model_file_name=None):
""" Initializes simulator manager, and lists for plots.
Instantiates main frame as well.
Then if a model file name was provided,
load that model file name. """
self.sim_manager = sim_manager
self.network_view = NetworkView(self)
self.dt = settings.SIMULATOR_DEFAULT_DELTA_TIME
self.registered = []
self.plots = []
self.load_plots()
self._has_network = False
self._loaded_model_file = None
# instantiates main visualizer frame
self.main_frame = MainFrame(self.sim_manager, self)
if (model_file_name):
self.load_model_from_filename(model_file_name, load_layout=True)
def plots_for_object(self, obj):
""" Gets list of capabilities of nodes in plot object.
For each capability and for each registered plot,
if the plot supports that type of capability,
append to list of available plots.
Returns the list of plots available for this object.
"""
supported_plots = []
node_caps = self.sim_manager.get_caps_for_obj(obj)
for cap in node_caps:
for vz in self.registered:
if vz.supports_cap(cap):
supported_plots.append((vz, obj, cap))
return supported_plots
def add_plot_for_obj(self, plt, obj, cap, config=None, position=None,
size=None):
""" Instantiates plot object, set its config values if
config is provided, then appends to list of plots.
Calls the connect_to_obj method to set up simulator manager
and plot object. Then finally calls show_plot to set up
plot in visualizer frame.
"""
plot = plt(self, obj, cap)
plot.set_config_values(config)
self.plots.append(plot)
self.sim_manager.connect_to_obj(obj, cap, plot.update)
self.main_frame.show_plot(plot, False, position, size)
def remove_plot_for_obj(self, plot, obj, cap):
""" Disconnects object from simulator manager,
calls remove method on plot to remove from visualizer
and list of plots.
"""
self.sim_manager.disconnect_from_obj(obj, cap, plot.update)
self.plots.remove(plot)
self.main_frame.remove_plot(plot)
def on_quit(self):
""" Event handler for when program quits
saves a layout file in json format of data from
get_layout_dict(), which contains layout data.
"""
if self._loaded_model_file:
filename = os.path.splitext(self._loaded_model_file)[0] + \
settings.LAYOUT_FILE_EXTENSION
with open(filename, 'wb') as f:
json.dump(self.get_layout_dict(), f)
def on_open_model(self, widget):
""" Event handler for when open model menu item is clicked.
"""
filename = self.file_open(ext=settings.PYTHON_FILE_EXTENSION,
ext_name=settings.PYTHON_FILE_EXTENSION_NAME)
if not filename:
return
self.load_model_from_filename(filename, load_layout=True)
def restore_layout_dict(self, dct, load_model=False):
""" Given data from layout dictionary, load model file.
Loop through all plot objects in layout dictionary,
add the plot objects, then calls the restore method
on network view.
Throws:
ValueError - dct could not be loaded.
"""
layout_dict = dct['layout']
# Restore model file
if load_model:
self.load_model_from_filename(layout_dict['model'],
load_layout=False)
# Restore plots
for plot_dict in layout_dict['plots']:
target_obj = self.get_nengo_for_uid(plot_dict['target_obj'])
target_cap_name = plot_dict['target_cap']
target_cap = None
for cap in self.sim_manager.get_caps_for_obj(target_obj):
if cap.name == target_cap_name:
target_cap = cap
if not target_cap:
raise ValueError("No capability for nengo object: " +
target_obj + " with name: " + target_cap_name)
plot_type = plot_dict['plot_type']
for plot_cls in self.registered:
if plot_type == plot_cls.__name__:
self.add_plot_for_obj(plot_cls, target_obj,
target_cap, plot_dict['config'],
plot_dict['position'],
plot_dict['size'])
break
else:
# loop exited without break
raise ValueError("No plot:" + plot_type + "for nengo object: "
+ target_obj + " with name: " +
target_cap_name)
# Restore network
net_dict = layout_dict['network']
self.main_frame.set_item_position(self.network_view,
net_dict['position'])
self.main_frame.set_item_size(self.network_view, net_dict['size'])
self.network_view.restore_layout(net_dict['network_layout'])
def get_layout_dict(self):
""" Saves plot data, layout data and network layout data into a json
data.
Returns json data object.
"""
layout_dict = {}
# Save model file
layout_dict['model'] = self._loaded_model_file
# Save plots
layout_dict['plots'] = []
for plot in self.plots:
plot_dict = {}
plot_dict['plot_type'] = plot.__class__.__name__
plot_dict['target_obj'] = self.get_uid_for_nengo(plot.nengo_obj)
plot_dict['target_cap'] = plot.capability.name
plot_dict['position'] = self.main_frame.get_item_position(plot)
plot_dict['size'] = self.main_frame.get_item_size(plot)
plot_dict['config'] = plot.get_config_values()
layout_dict['plots'].append(plot_dict)
# Save network
net_dict = {}
net_dict['position'] = \
self.main_frame.get_item_position(self.network_view)
net_dict['size'] = self.main_frame.get_item_size(self.network_view)
net_dict['network_layout'] = self.network_view.store_layout()
layout_dict['network'] = net_dict
return {'layout': layout_dict}
def get_uid_for_nengo(self, nengo_obj):
""" Returns a consistent unique id for the given nengo object.
"""
# Let's just use the network view's method right now,
# since that seems to be working great
return self.network_view.get_name_from_obj(nengo_obj)
def get_nengo_for_uid(self, uid):
""" Returns a nengo object for a given uid.
"""
return self.network_view.get_obj_from_name(uid)
def load_model_from_filename(self, filename, load_layout=False):
""" Tries to load model from a filename.
If successful, load a layout file with the associated model
file.
"""
mod_name, file_ext = os.path.splitext(os.path.basename(filename))
try:
module = imp.load_source(mod_name, filename)
self.load_model(module.model)
self._loaded_model_file = filename
if (not self._has_network):
self.main_frame.show_plot(self.network_view, True)
self._has_network = True
self.main_frame.controller_panel.enable_controls()
except (AttributeError, ImportError, IOError, SyntaxError) as e:
print e
self.show_err_dialog("Error loading model",
"Could not load model from " + str(filename))
if load_layout:
try:
layout_file = \
os.path.splitext(self._loaded_model_file)[0] + '.bpwn'
if not layout_file:
return
with open(layout_file, 'rb') as f:
self.restore_layout_dict(json.load(f))
except Exception as e:
# If the layout file isn't there, don't bug user...
if type(e) is not IOError:
traceback.print_exc()
self.show_err_dialog("Error loading layout for model",
"Could not load layout from " +
str(layout_file))
self.load_model_from_filename(filename, load_layout=False)
self.network_view.init_default_config()
def show_err_dialog(self, message, secondary):
""" Show an error message in a dialog.
"""
dialog = Gtk.MessageDialog(self.main_frame.window, 0,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK, message)
dialog.format_secondary_text(secondary)
dialog.run()
dialog.destroy()
def set_nengo_obj_id(self, nengo_obj):
""" Creates an id for a given nengo obj.
We did notice that NengoObject has a key property,
however we required an id would give us reasonable behavior
given a model with minor changes (ie somewhat resistant to
reordering, addition, removal of nodes). Also we use md5
because of its collision resistance.
NOTE: We identify a nengo object given its label, class,
and output dimensions. These are not necessarily unique,
and in the case where they are not (ie objects are poorly
labeled), we have undefined behavior.
"""
obj_id = hashlib.md5()
obj_id.update(nengo_obj.label)
obj_id.update(nengo_obj.__class__.__name__)
obj_caps = self.sim_manager.get_caps_for_obj(nengo_obj)
for cap in obj_caps:
obj_id.update(cap.get_out_dimensions(nengo_obj))
nengo_obj.id = obj_id.hexdigest()
def load_model(self, model):
""" Removes current models, and resets current visualizer state.
Loads new model in network view and simulator manager.
"""
copy_plots = self.plots[:]
for plt in copy_plots:
plt.remove_plot(None, None)
self.plots = []
if self.sim_manager.current_step > 0:
self.main_frame.reset_button(None) # a little hacky, but hey
self.model = model
for nengo_obj in model.objs:
self.set_nengo_obj_id(nengo_obj)
self.main_frame.window.set_title("Nengo Visualizer - " + model.label)
self.network_view.load_model(model)
self.sim_manager.load_new_model(model, self.dt)
def load_plots(self):
""" Loads all plot files by looking for python file extension
in plot directory.
"""
plots_dir = os.path.join(os.path.dirname(__file__), "../plots/")
for name in os.listdir(plots_dir):
if name.endswith(".py") and not name.startswith("_"):
self.load_module(os.path.join(plots_dir, name))
self.registered = REGISTERED_PLOTS.values()
def load_module(self, filename):
""" Loads a module by file name and returns module object.
"""
mod_name, ext = os.path.splitext(os.path.basename(filename))
return imp.load_source(mod_name, filename)
def on_layout_button_release(self, widget, event):
""" Event handler for when export to pdf menu item is
activated in right click context menu for plots.
Returns true if event is handled here, false otherwise.
"""
if event.button == settings.EVENT_BUTTON_RIGHT_CLICK:
export_pdf_item = gtk.MenuItem("Export to PDF...")
export_pdf_item.connect("activate", self.on_export_pdf, widget)
export_pdf_item.show()
self.layout_context_menu = gtk.Menu()
self.layout_context_menu.append(export_pdf_item)
self.layout_context_menu.popup(None, None, None, None,
event.button, event.time)
return True
return False
def on_export_pdf(self, event_widget, widget, name=None):
""" Opens file to write pdf.
Creates a pdf surface with file handler, then draws to pdf surface and
close file.
"""
if not name:
name = self.main_frame.window.get_title()
filename = self.file_save(name + ".pdf")
if not filename:
return
with open(filename, "wb") as f:
allocation = widget.get_allocation()
cr = cairo.Context(cairo.PDFSurface(f, allocation.width,
allocation.height))
try:
widget.on_draw_event(None, cr)
except AttributeError:
widget.draw(cr)
cr.show_page()
cr.get_target().finish()
def file_open(self, ext="", ext_name=""):
""" Calls _file_browse which opens a file browser with open button.
Returns result.
"""
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK)
return self._file_browse(gtk.FILE_CHOOSER_ACTION_OPEN,
buttons, "", ext, ext_name)
def file_save(self, name="", ext="", ext_name=""):
""" Calls _file_browse which opens a file browser with save button.
Returns result.
"""
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK)
return self._file_browse(gtk.FILE_CHOOSER_ACTION_SAVE,
buttons, name, ext, ext_name)
def _file_browse(self, action, buttons, name="", ext="", ext_name=""):
""" Creates a file chooser dialog.
Use a * for filter.
Returns result of instantiating dialog.
"""
dialog = gtk.FileChooserDialog(title="Select File",
action=action, buttons=buttons)
dialog.set_current_folder(os.getcwd())
dialog.set_do_overwrite_confirmation(True)
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
dialog.set_current_name(name)
if ext:
filt = gtk.FileFilter()
filt.set_name(ext_name if ext_name else ext)
filt.add_pattern("*." + ext)
dialog.add_filter(filt)
filt = gtk.FileFilter()
filt.set_name("All files")
filt.add_pattern("*")
dialog.add_filter(filt)
result = ""
if dialog.run() == gtk.RESPONSE_OK:
result = dialog.get_filename()
dialog.destroy()
return result
|
|
"""Stupid tests that ensure logging works as expected"""
import sys
import threading
import logging as log
from io import StringIO
import unittest
import beets.logging as blog
from beets import plugins, ui
import beetsplug
from test import _common
from test._common import TestCase
from test import helper
class LoggingTest(TestCase):
def test_logging_management(self):
l1 = log.getLogger("foo123")
l2 = blog.getLogger("foo123")
self.assertEqual(l1, l2)
self.assertEqual(l1.__class__, log.Logger)
l3 = blog.getLogger("bar123")
l4 = log.getLogger("bar123")
self.assertEqual(l3, l4)
self.assertEqual(l3.__class__, blog.BeetsLogger)
self.assertIsInstance(l3, (blog.StrFormatLogger,
blog.ThreadLocalLevelLogger))
l5 = l3.getChild("shalala")
self.assertEqual(l5.__class__, blog.BeetsLogger)
l6 = blog.getLogger()
self.assertNotEqual(l1, l6)
def test_str_format_logging(self):
l = blog.getLogger("baz123")
stream = StringIO()
handler = log.StreamHandler(stream)
l.addHandler(handler)
l.propagate = False
l.warning("foo {0} {bar}", "oof", bar="baz")
handler.flush()
self.assertTrue(stream.getvalue(), "foo oof baz")
class LoggingLevelTest(unittest.TestCase, helper.TestHelper):
class DummyModule:
class DummyPlugin(plugins.BeetsPlugin):
def __init__(self):
plugins.BeetsPlugin.__init__(self, 'dummy')
self.import_stages = [self.import_stage]
self.register_listener('dummy_event', self.listener)
def log_all(self, name):
self._log.debug('debug ' + name)
self._log.info('info ' + name)
self._log.warning('warning ' + name)
def commands(self):
cmd = ui.Subcommand('dummy')
cmd.func = lambda _, __, ___: self.log_all('cmd')
return (cmd,)
def import_stage(self, session, task):
self.log_all('import_stage')
def listener(self):
self.log_all('listener')
def setUp(self):
sys.modules['beetsplug.dummy'] = self.DummyModule
beetsplug.dummy = self.DummyModule
self.setup_beets()
self.load_plugins('dummy')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
del beetsplug.dummy
sys.modules.pop('beetsplug.dummy')
self.DummyModule.DummyPlugin.listeners = None
self.DummyModule.DummyPlugin._raw_listeners = None
def test_command_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn('dummy: warning cmd', logs)
self.assertIn('dummy: info cmd', logs)
self.assertNotIn('dummy: debug cmd', logs)
def test_command_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn('dummy: warning cmd', logs)
self.assertIn('dummy: info cmd', logs)
self.assertIn('dummy: debug cmd', logs)
def test_command_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn('dummy: warning cmd', logs)
self.assertIn('dummy: info cmd', logs)
self.assertIn('dummy: debug cmd', logs)
def test_listener_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn('dummy: warning listener', logs)
self.assertNotIn('dummy: info listener', logs)
self.assertNotIn('dummy: debug listener', logs)
def test_listener_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn('dummy: warning listener', logs)
self.assertIn('dummy: info listener', logs)
self.assertNotIn('dummy: debug listener', logs)
def test_listener_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn('dummy: warning listener', logs)
self.assertIn('dummy: info listener', logs)
self.assertIn('dummy: debug listener', logs)
def test_import_stage_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn('dummy: warning import_stage', logs)
self.assertNotIn('dummy: info import_stage', logs)
self.assertNotIn('dummy: debug import_stage', logs)
def test_import_stage_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn('dummy: warning import_stage', logs)
self.assertIn('dummy: info import_stage', logs)
self.assertNotIn('dummy: debug import_stage', logs)
def test_import_stage_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn('dummy: warning import_stage', logs)
self.assertIn('dummy: info import_stage', logs)
self.assertIn('dummy: debug import_stage', logs)
@_common.slow_test()
class ConcurrentEventsTest(TestCase, helper.TestHelper):
"""Similar to LoggingLevelTest but lower-level and focused on multiple
events interaction. Since this is a bit heavy we don't do it in
LoggingLevelTest.
"""
class DummyPlugin(plugins.BeetsPlugin):
def __init__(self, test_case):
plugins.BeetsPlugin.__init__(self, 'dummy')
self.register_listener('dummy_event1', self.listener1)
self.register_listener('dummy_event2', self.listener2)
self.lock1 = threading.Lock()
self.lock2 = threading.Lock()
self.test_case = test_case
self.exc = None
self.t1_step = self.t2_step = 0
def log_all(self, name):
self._log.debug('debug ' + name)
self._log.info('info ' + name)
self._log.warning('warning ' + name)
def listener1(self):
try:
self.test_case.assertEqual(self._log.level, log.INFO)
self.t1_step = 1
self.lock1.acquire()
self.test_case.assertEqual(self._log.level, log.INFO)
self.t1_step = 2
except Exception as e:
self.exc = e
def listener2(self):
try:
self.test_case.assertEqual(self._log.level, log.DEBUG)
self.t2_step = 1
self.lock2.acquire()
self.test_case.assertEqual(self._log.level, log.DEBUG)
self.t2_step = 2
except Exception as e:
self.exc = e
def setUp(self):
self.setup_beets(disk=True)
def tearDown(self):
self.teardown_beets()
def test_concurrent_events(self):
dp = self.DummyPlugin(self)
def check_dp_exc():
if dp.exc:
raise dp.exc
try:
dp.lock1.acquire()
dp.lock2.acquire()
self.assertEqual(dp._log.level, log.NOTSET)
self.config['verbose'] = 1
t1 = threading.Thread(target=dp.listeners['dummy_event1'][0])
t1.start() # blocked. t1 tested its log level
while dp.t1_step != 1:
check_dp_exc()
self.assertTrue(t1.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
self.config['verbose'] = 2
t2 = threading.Thread(target=dp.listeners['dummy_event2'][0])
t2.start() # blocked. t2 tested its log level
while dp.t2_step != 1:
check_dp_exc()
self.assertTrue(t2.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
dp.lock1.release() # dummy_event1 tests its log level + finishes
while dp.t1_step != 2:
check_dp_exc()
t1.join(.1)
self.assertFalse(t1.is_alive())
self.assertTrue(t2.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
dp.lock2.release() # dummy_event2 tests its log level + finishes
while dp.t2_step != 2:
check_dp_exc()
t2.join(.1)
self.assertFalse(t2.is_alive())
except Exception:
print("Alive threads:", threading.enumerate())
if dp.lock1.locked():
print("Releasing lock1 after exception in test")
dp.lock1.release()
if dp.lock2.locked():
print("Releasing lock2 after exception in test")
dp.lock2.release()
print("Alive threads:", threading.enumerate())
raise
def test_root_logger_levels(self):
"""Root logger level should be shared between threads.
"""
self.config['threaded'] = True
blog.getLogger('beets').set_global_level(blog.WARNING)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertEqual(logs, [])
blog.getLogger('beets').set_global_level(blog.INFO)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
for l in logs:
self.assertIn("import", l)
self.assertIn("album", l)
blog.getLogger('beets').set_global_level(blog.DEBUG)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn("Sending event: database_change", logs)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# Generated by Django 3.1.3 on 2020-12-08 17:56
import django.db.models.deletion
import netfields.fields
from django.db import migrations, models
import peering.fields
import peeringdb.models
import utils.validators
class Migration(migrations.Migration):
dependencies = [
("peering", "0065_auto_20201025_2137"),
("peeringdb", "0013_auto_20201207_2233"),
]
operations = [
migrations.DeleteModel(
name="Contact",
),
migrations.DeleteModel(
name="PeerRecord",
),
migrations.DeleteModel(
name="Prefix",
),
migrations.CreateModel(
name="Facility",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("address1", models.CharField(blank=True, max_length=255)),
("address2", models.CharField(blank=True, max_length=255)),
("city", models.CharField(blank=True, max_length=255)),
("state", models.CharField(blank=True, max_length=255)),
("zipcode", models.CharField(blank=True, max_length=48)),
("country", models.CharField(blank=True, max_length=7)),
(
"latitude",
models.DecimalField(
blank=True, decimal_places=6, max_digits=9, null=True
),
),
(
"longitude",
models.DecimalField(
blank=True, decimal_places=6, max_digits=9, null=True
),
),
("name", models.CharField(max_length=255, unique=True)),
("website", peeringdb.models.URLField(blank=True, max_length=255)),
("clli", models.CharField(blank=True, max_length=18)),
("rencode", models.CharField(blank=True, max_length=18)),
("npanxx", models.CharField(blank=True, max_length=21)),
("tech_email", models.EmailField(blank=True, max_length=254)),
("tech_phone", models.CharField(blank=True, max_length=192)),
("sales_email", models.EmailField(blank=True, max_length=254)),
("sales_phone", models.CharField(blank=True, max_length=192)),
("notes", models.TextField(blank=True)),
],
options={
"verbose_name_plural": "facilities",
},
),
migrations.CreateModel(
name="InternetExchange",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=64, unique=True)),
("name_long", models.CharField(blank=True, max_length=254)),
("city", models.CharField(max_length=192)),
("country", models.CharField(blank=True, max_length=7)),
("notes", models.TextField(blank=True)),
(
"region_continent",
models.CharField(
choices=[
("North America", "North America"),
("Asia Pacific", "Asia Pacific"),
("Europe", "Europe"),
("South America", "South America"),
("Africa", "Africa"),
("Australia", "Australia"),
("Middle East", "Middle East"),
],
max_length=255,
),
),
(
"media",
models.CharField(
choices=[
("Ethernet", "Ethernet"),
("ATM", "Atm"),
("Multiple", "Multiple"),
],
max_length=128,
),
),
("proto_unicast", models.BooleanField(default=False)),
("proto_multicast", models.BooleanField(default=False)),
("proto_ipv6", models.BooleanField(default=False)),
("website", peeringdb.models.URLField(blank=True, max_length=255)),
("url_stats", peeringdb.models.URLField(blank=True, max_length=255)),
("tech_email", models.EmailField(blank=True, max_length=254)),
("tech_phone", models.CharField(blank=True, max_length=192)),
("policy_email", models.EmailField(blank=True, max_length=254)),
("policy_phone", models.CharField(blank=True, max_length=192)),
("ixf_net_count", models.IntegerField(default=0)),
("ixf_last_import", models.DateTimeField(blank=True, null=True)),
],
options={
"verbose_name": "Internet Exchange",
"verbose_name_plural": "Internet Exchanges",
},
),
migrations.CreateModel(
name="InternetExchangeFacility",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"fac",
models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="ixfac_set",
to="peeringdb.facility",
verbose_name="Facility",
),
),
(
"ix",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="ixfac_set",
to="peeringdb.internetexchange",
verbose_name="Internet Exchange",
),
),
],
options={
"verbose_name": "Internet Exchange facility",
"verbose_name_plural": "Internet Exchange facilities",
"unique_together": {("ix", "fac")},
},
),
migrations.CreateModel(
name="IXLan",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(blank=True, max_length=255)),
("descr", models.TextField(blank=True)),
("mtu", models.PositiveIntegerField(blank=True, null=True)),
("vlan", models.PositiveIntegerField(blank=True, null=True)),
("dot1q_support", models.BooleanField(default=False)),
(
"rs_asn",
peering.fields.ASNField(
blank=True,
default=0,
null=True,
verbose_name="Route Server ASN",
),
),
(
"arp_sponge",
netfields.fields.MACAddressField(
blank=True,
null=True,
unique=True,
verbose_name="ARP sponging MAC",
),
),
(
"ixf_ixp_member_list_url",
models.URLField(
blank=True, null=True, verbose_name="IX-F Member Export URL"
),
),
(
"ixf_ixp_member_list_url_visible",
models.CharField(
choices=[
("Private", "Private"),
("Users", "Users"),
("Public", "Public"),
],
default="Private",
max_length=64,
verbose_name="IX-F Member Export URL Visibility",
),
),
(
"ix",
models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="ixlan_set",
to="peeringdb.internetexchange",
verbose_name="Internet Exchange",
),
),
],
options={
"verbose_name": "Internet Exchange LAN",
"verbose_name_plural": "Internet Exchange LANs",
},
),
migrations.CreateModel(
name="IXLanPrefix",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("notes", models.CharField(blank=True, max_length=255)),
(
"protocol",
models.CharField(
choices=[("IPv4", "Ipv4"), ("IPv6", "Ipv6")], max_length=64
),
),
(
"prefix",
netfields.fields.CidrAddressField(max_length=43, unique=True),
),
("in_dfz", models.BooleanField(default=False)),
(
"ixlan",
models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="ixpfx_set",
to="peeringdb.ixlan",
verbose_name="Internet Exchange LAN",
),
),
],
options={
"verbose_name": "Internet Exchange LAN prefix",
"verbose_name_plural": "Internet Exchange LAN prefixes",
},
),
migrations.CreateModel(
name="NetworkContact",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"role",
models.CharField(
choices=[
("Abuse", "Abuse"),
("Maintenance", "Maintenance"),
("Policy", "Policy"),
("Technical", "Technical"),
("NOC", "NOC"),
("Public Relations", "Public Relations"),
("Sales", "Sales"),
],
max_length=27,
),
),
(
"visible",
models.CharField(
choices=[
("Private", "Private"),
("Users", "Users"),
("Public", "Public"),
],
default="Public",
max_length=64,
),
),
("name", models.CharField(blank=True, max_length=254)),
("phone", models.CharField(blank=True, max_length=100)),
("email", models.EmailField(blank=True, max_length=254)),
("url", peeringdb.models.URLField(blank=True, max_length=255)),
],
),
migrations.CreateModel(
name="NetworkFacility",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"local_asn",
peering.fields.ASNField(
blank=True, null=True, verbose_name="Local ASN"
),
),
("avail_sonet", models.BooleanField(default=False)),
("avail_ethernet", models.BooleanField(default=False)),
("avail_atm", models.BooleanField(default=False)),
(
"fac",
models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="netfac_set",
to="peeringdb.facility",
verbose_name="Facility",
),
),
],
options={
"verbose_name": "Network Facility",
"verbose_name_plural": "Network Facilities",
},
),
migrations.CreateModel(
name="Organization",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("address1", models.CharField(blank=True, max_length=255)),
("address2", models.CharField(blank=True, max_length=255)),
("city", models.CharField(blank=True, max_length=255)),
("state", models.CharField(blank=True, max_length=255)),
("zipcode", models.CharField(blank=True, max_length=48)),
("country", models.CharField(blank=True, max_length=7)),
(
"latitude",
models.DecimalField(
blank=True, decimal_places=6, max_digits=9, null=True
),
),
(
"longitude",
models.DecimalField(
blank=True, decimal_places=6, max_digits=9, null=True
),
),
("name", models.CharField(max_length=255, unique=True)),
("website", peeringdb.models.URLField(blank=True, max_length=255)),
("notes", models.TextField(blank=True)),
],
options={
"abstract": False,
},
),
migrations.AlterModelOptions(
name="network",
options={},
),
migrations.AlterModelOptions(
name="networkixlan",
options={
"verbose_name": "Public Peering Exchange Point",
"verbose_name_plural": "Public Peering Exchange Points",
},
),
migrations.RenameField(
model_name="synchronization",
old_name="added",
new_name="created",
),
migrations.RemoveField(
model_name="networkixlan",
name="ix_id",
),
migrations.RemoveField(
model_name="networkixlan",
name="ixlan_id",
),
migrations.RemoveField(
model_name="networkixlan",
name="name",
),
migrations.AddField(
model_name="network",
name="aka",
field=models.CharField(
blank=True, max_length=255, verbose_name="Also Known As"
),
),
migrations.AddField(
model_name="network",
name="info_ipv6",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="network",
name="info_multicast",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="network",
name="info_never_via_route_servers",
field=models.BooleanField(
default=False,
help_text="Indicates if this network will announce its routes via route servers or not",
),
),
migrations.AddField(
model_name="network",
name="info_ratio",
field=models.CharField(
blank=True,
choices=[
("", "Not Disclosed"),
("Not Disclosed", "Not Disclosed Bis"),
("Heavy Outbound", "Heavy Outbound"),
("Mostly Outbound", "Mostly Outbound"),
("Balanced", "Balanced"),
("Mostly Inbound", "Mostly Inbound"),
("Heavy Inbound", "Heavy Inbound"),
],
default="",
max_length=45,
),
),
migrations.AddField(
model_name="network",
name="info_scope",
field=models.CharField(
blank=True,
choices=[
("", "Not Disclosed"),
("Not Disclosed", "Not Disclosed Bis"),
("Regional", "Regional"),
("North America", "North America"),
("Asia Pacific", "Asia Pacific"),
("Europe", "Europe"),
("South America", "South America"),
("Africa", "Africa"),
("Australia", "Australia"),
("Middle East", "Middle East"),
("Global", "Global"),
],
default="",
max_length=39,
),
),
migrations.AddField(
model_name="network",
name="info_traffic",
field=models.CharField(
blank=True,
choices=[
("", "Not Disclosed"),
("0-20Mbps", "Mbps 20"),
("20-100Mbps", "Mbps 100"),
("100-1000Mbps", "Gbps 1"),
("1-5Gbps", "Gbps 5"),
("5-10Gbps", "Gbps 10"),
("10-20Gbps", "Gbps 20"),
("20-50Gbps", "Gbps 50"),
("50-100Gbps", "Gbps 100"),
("100-200Gbps", "Gbps 200"),
("200-300Gbps", "Gbps 300"),
("300-500Gbps", "Gbps 500"),
("500-1000Gbps", "Tbps 1"),
("1-5Tbps", "Tbps 5"),
("5-10Tbps", "Tbps 10"),
("10-20Tbps", "Tbps 20"),
("20-50Tbps", "Tbps 50"),
("50-100Tbps", "Tbps 100"),
("100+Tbps", "Tbps 100 Plus"),
],
max_length=39,
),
),
migrations.AddField(
model_name="network",
name="info_type",
field=models.CharField(
blank=True,
choices=[
("", "Not Disclosed"),
("Not Disclosed", "Not Disclosed Bis"),
("NSP", "Nsp"),
("Content", "Content"),
("Cable/DSL/ISP", "Cable Dsl Isp"),
("Enterprise", "Entreprise"),
("Educational/Research", "Educational Research"),
("Non-Profit", "Non Profit"),
("Route Server", "Route Server"),
("Network Services", "Network Services"),
("Route Collector", "Route Collector"),
("Government", "Government"),
],
default="",
max_length=60,
),
),
migrations.AddField(
model_name="network",
name="info_unicast",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="network",
name="looking_glass",
field=peeringdb.models.URLField(blank=True, max_length=255),
),
migrations.AddField(
model_name="network",
name="notes",
field=models.TextField(blank=True),
),
migrations.AddField(
model_name="network",
name="notes_private",
field=models.TextField(blank=True),
),
migrations.AddField(
model_name="network",
name="policy_contracts",
field=models.CharField(
blank=True,
choices=[
("Not Required", "Not Required"),
("Private Only", "Private Only"),
("Required", "Required"),
],
max_length=36,
),
),
migrations.AddField(
model_name="network",
name="policy_general",
field=models.CharField(
blank=True,
choices=[
("Open", "Open"),
("Selective", "Selective"),
("Restrictive", "Restrictive"),
("No", "No"),
],
max_length=72,
),
),
migrations.AddField(
model_name="network",
name="policy_locations",
field=models.CharField(
blank=True,
choices=[
("Not Required", "Not Required"),
("Preferred", "Preferred"),
("Required - US", "Required Us"),
("Required - EU", "Required Eu"),
("Required - International", "Required Int"),
],
max_length=72,
),
),
migrations.AddField(
model_name="network",
name="policy_ratio",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="network",
name="policy_url",
field=peeringdb.models.URLField(blank=True, max_length=255),
),
migrations.AddField(
model_name="network",
name="route_server",
field=peeringdb.models.URLField(blank=True, max_length=255),
),
migrations.AddField(
model_name="network",
name="website",
field=peeringdb.models.URLField(blank=True, max_length=255),
),
migrations.AddField(
model_name="networkixlan",
name="net",
field=models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="netixlan_set",
to="peeringdb.network",
verbose_name="Network",
),
),
migrations.AddField(
model_name="networkixlan",
name="notes",
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name="networkixlan",
name="operational",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="networkixlan",
name="speed",
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name="network",
name="asn",
field=peering.fields.ASNField(unique=True, verbose_name="ASN"),
),
migrations.AlterField(
model_name="network",
name="info_prefixes4",
field=models.PositiveIntegerField(
blank=True,
help_text="Recommended maximum number of IPv4 routes/prefixes to be configured on peering sessions for this ASN",
null=True,
),
),
migrations.AlterField(
model_name="network",
name="info_prefixes6",
field=models.PositiveIntegerField(
blank=True,
help_text="Recommended maximum number of IPv6 routes/prefixes to be configured on peering sessions for this ASN",
null=True,
),
),
migrations.AlterField(
model_name="network",
name="irr_as_set",
field=models.CharField(
blank=True,
default="",
help_text="Reference to an AS-SET or ROUTE-SET in Internet Routing Registry (IRR)",
max_length=255,
verbose_name="IRR AS-SET/ROUTE-SET",
),
preserve_default=False,
),
migrations.AlterField(
model_name="network",
name="name",
field=models.CharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name="networkixlan",
name="asn",
field=peering.fields.ASNField(verbose_name="ASN"),
),
migrations.AlterField(
model_name="networkixlan",
name="ipaddr4",
field=netfields.fields.InetAddressField(
blank=True,
max_length=39,
null=True,
validators=[utils.validators.AddressFamilyValidator(4)],
verbose_name="IPv4",
),
),
migrations.AlterField(
model_name="networkixlan",
name="ipaddr6",
field=netfields.fields.InetAddressField(
blank=True,
max_length=39,
null=True,
validators=[utils.validators.AddressFamilyValidator(6)],
verbose_name="IPv6",
),
),
migrations.AddField(
model_name="networkfacility",
name="net",
field=models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="netfac_set",
to="peeringdb.network",
verbose_name="Network",
),
),
migrations.AddField(
model_name="networkcontact",
name="net",
field=models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="poc_set",
to="peeringdb.network",
verbose_name="Network",
),
),
migrations.AddField(
model_name="internetexchange",
name="org",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="ix_set",
to="peeringdb.organization",
verbose_name="Organization",
),
),
migrations.AddField(
model_name="facility",
name="org",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="fac_set",
to="peeringdb.organization",
verbose_name="Organization",
),
),
migrations.AddField(
model_name="network",
name="org",
field=models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="net_set",
to="peeringdb.organization",
verbose_name="Organization",
),
preserve_default=False,
),
migrations.AddField(
model_name="networkixlan",
name="ixlan",
field=models.ForeignKey(
default=0,
on_delete=django.db.models.deletion.CASCADE,
related_name="netixlan_set",
to="peeringdb.ixlan",
verbose_name="Internet Exchange LAN",
),
),
migrations.AlterUniqueTogether(
name="networkfacility",
unique_together={("net", "fac", "local_asn")},
),
]
|
|
from __future__ import unicode_literals, division, absolute_import
import logging
import os
import re
import sys
from copy import copy
from datetime import datetime, date, time
import locale
from email.utils import parsedate
from time import mktime
from jinja2 import (Environment, StrictUndefined, ChoiceLoader,
FileSystemLoader, PackageLoader, TemplateNotFound,
TemplateSyntaxError, Undefined)
from flexget.event import event
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('utils.template')
# The environment will be created after the manager has started
environment = None
class RenderError(Exception):
"""Error raised when there is a problem with jinja rendering."""
pass
def filter_pathbase(val):
"""Base name of a path."""
return os.path.basename(val or '')
def filter_pathname(val):
"""Base name of a path, without its extension."""
return os.path.splitext(os.path.basename(val or ''))[0]
def filter_pathext(val):
"""Extension of a path (including the '.')."""
return os.path.splitext(val or '')[1]
def filter_pathdir(val):
"""Directory containing the given path."""
return os.path.dirname(val or '')
def filter_pathscrub(val, os_mode=None):
"""Replace problematic characters in a path."""
return pathscrub(val, os_mode)
def filter_re_replace(val, pattern, repl):
"""Perform a regexp replacement on the given string."""
return re.sub(pattern, repl, unicode(val))
def filter_re_search(val, pattern):
"""Perform a search for given regexp pattern, return the matching portion of the text."""
if not isinstance(val, basestring):
return val
result = re.search(pattern, val)
if result:
return result.group(0)
return ''
def filter_formatdate(val, format):
"""Returns a string representation of a datetime object according to format string."""
encoding = locale.getpreferredencoding()
if not isinstance(val, (datetime, date, time)):
return val
return val.strftime(format.encode(encoding)).decode(encoding)
def filter_parsedate(val):
"""Attempts to parse a date according to the rules in RFC 2822"""
return datetime.fromtimestamp(mktime(parsedate(val)))
def filter_date_suffix(date):
day = int(date[-2:])
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
return date + suffix
def filter_format_number(val, places=None, grouping=True):
"""Formats a number according to the user's locale."""
if not isinstance(val, (int, float, long)):
return val
if places is not None:
format = '%.' + str(places) + 'f'
elif isinstance(val, (int, long)):
format = '%d'
else:
format = '%.02f'
locale.setlocale(locale.LC_ALL, '')
return locale.format(format, val, grouping)
def filter_pad(val, width, fillchar='0'):
"""Pads a number or string with fillchar to the specified width."""
return unicode(val).rjust(width, fillchar)
def filter_to_date(date_time_val):
if not isinstance(date_time_val, (datetime, date, time)):
return date_time_val
return date_time_val.date()
def now():
return datetime.now()
# Override the built-in Jinja default filter due to Jinja bug
# https://github.com/mitsuhiko/jinja2/pull/138
def filter_default(value, default_value=u'', boolean=False):
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
filter_d = filter_default
@event('manager.startup')
def make_environment(manager):
"""Create our environment and add our custom filters"""
global environment
environment = Environment(undefined=StrictUndefined,
loader=ChoiceLoader([PackageLoader('flexget'),
FileSystemLoader(os.path.join(manager.config_base, 'templates'))]),
extensions=['jinja2.ext.loopcontrols'])
for name, filt in globals().items():
if name.startswith('filter_'):
environment.filters[name.split('_', 1)[1]] = filt
elif name == 'now':
environment.globals['now'] = now
# TODO: list_templates function
def get_template(templatename, pluginname=None):
"""Loads a template from disk. Looks in both included plugins and users custom plugin dir."""
if not templatename.endswith('.template'):
templatename += '.template'
locations = []
if pluginname:
locations.append(pluginname + '/' + templatename)
locations.append(templatename)
for location in locations:
try:
return environment.get_template(location)
except TemplateNotFound:
pass
else:
# TODO: Plugins need to catch and reraise this as PluginError, or perhaps we should have
# a validator for template files
raise ValueError('Template not found: %s (%s)' % (templatename, pluginname))
def render(template, context):
"""
Renders a Template with `context` as its context.
:param template: Template or template string to render.
:param context: Context to render the template from.
:return: The rendered template text.
"""
if isinstance(template, basestring):
template = environment.from_string(template)
try:
result = template.render(context)
except Exception as e:
raise RenderError('(%s) %s' % (type(e).__name__, e))
return result
def render_from_entry(template_string, entry):
"""Renders a Template or template string with an Entry as its context."""
# If a plain string was passed, turn it into a Template
if isinstance(template_string, basestring):
try:
template = environment.from_string(template_string)
except TemplateSyntaxError as e:
raise RenderError('Error in template syntax: ' + e.message)
else:
# We can also support an actual Template being passed in
template = template_string
# Make a copy of the Entry so we can add some more fields
variables = copy(entry)
variables['now'] = datetime.now()
# Add task name to variables, usually it's there because metainfo_task plugin, but not always
if 'task' not in variables and hasattr(entry, 'task'):
variables['task'] = entry.task.name
# We use the lower level render function, so that our Entry is not cast into a dict (and lazy loading lost)
try:
result = u''.join(template.root_render_func(template.new_context(variables, shared=True)))
except:
exc_info = sys.exc_info()
try:
return environment.handle_exception(exc_info, True)
except Exception as e:
error = RenderError('(%s) %s' % (type(e).__name__, e))
log.debug('Error during rendering: %s' % error)
raise error
# Only try string replacement if jinja didn't do anything
if result == template_string:
try:
result = template_string % entry
except KeyError as e:
raise RenderError('Does not contain the field `%s` for string replacement.' % e)
except ValueError as e:
raise RenderError('Invalid string replacement template: %s (%s)' % (template_string, e))
except TypeError as e:
raise RenderError('Error during string replacement: %s' % e.message)
return result
def render_from_task(template, task):
"""
Renders a Template with a task as its context.
:param template: Template or template string to render.
:param task: Task to render the template from.
:return: The rendered template text.
"""
if isinstance(template, basestring):
template = environment.from_string(template)
try:
result = template.render({'task': task})
except Exception as e:
raise RenderError('(%s) %s' % (type(e).__name__, e))
return result
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NPCType'
db.create_table('dm_npctype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('race', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.Race'])),
('level', self.gf('django.db.models.fields.IntegerField')()),
('vision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.Vision'])),
('xp_reward', self.gf('django.db.models.fields.IntegerField')()),
('max_hit_points', self.gf('django.db.models.fields.IntegerField')()),
('alignment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.Alignment'], blank=True)),
))
db.send_create_signal('dm', ['NPCType'])
# Adding M2M table for field roles on 'NPCType'
db.create_table('dm_npctype_roles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('npctype', models.ForeignKey(orm['dm.npctype'], null=False)),
('role', models.ForeignKey(orm['character_builder.role'], null=False))
))
db.create_unique('dm_npctype_roles', ['npctype_id', 'role_id'])
# Adding model 'NPCTypeAbility'
db.create_table('dm_npctypeability', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('npc_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='abilities', to=orm['dm.NPCType'])),
('ability', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.Ability'])),
('value', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('dm', ['NPCTypeAbility'])
# Adding model 'NPCTypeDefense'
db.create_table('dm_npctypedefense', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('npc_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='defenses', to=orm['dm.NPCType'])),
('defense', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.Defense'])),
('value', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('dm', ['NPCTypeDefense'])
# Adding model 'NPC'
db.create_table('dm_npc', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('is_alive', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('dm', ['NPC'])
# Adding model 'MonsterNPC'
db.create_table('dm_monsternpc', (
('npc_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['dm.NPC'], unique=True, primary_key=True)),
('npc_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.NPCType'])),
('hit_points', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('dm', ['MonsterNPC'])
# Adding model 'BasicStoryNPC'
db.create_table('dm_basicstorynpc', (
('npc_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['dm.NPC'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('dm', ['BasicStoryNPC'])
# Adding model 'StoryNPC'
db.create_table('dm_storynpc', (
('basicstorynpc_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['dm.BasicStoryNPC'], unique=True, primary_key=True)),
('npc_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.NPCType'])),
('hit_points', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('dm', ['StoryNPC'])
# Adding model 'Party'
db.create_table('dm_party', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('background', self.gf('django.db.models.fields.TextField')(blank=True)),
('formed_on', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('dm', ['Party'])
# Adding M2M table for field characters on 'Party'
db.create_table('dm_party_characters', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('party', models.ForeignKey(orm['dm.party'], null=False)),
('character', models.ForeignKey(orm['character_builder.character'], null=False))
))
db.create_unique('dm_party_characters', ['party_id', 'character_id'])
# Adding model 'Campaign'
db.create_table('dm_campaign', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('dm', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('party', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.Party'])),
))
db.send_create_signal('dm', ['Campaign'])
# Adding model 'Session'
db.create_table('dm_session', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('start_time', self.gf('django.db.models.fields.DateTimeField')(blank=True)),
('end_time', self.gf('django.db.models.fields.DateTimeField')(blank=True)),
('campaign', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.Campaign'])),
))
db.send_create_signal('dm', ['Session'])
# Adding model 'HistoryLine'
db.create_table('dm_historyline', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('session', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.Session'])),
('text', self.gf('django.db.models.fields.TextField')()),
('logged_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('dm', ['HistoryLine'])
# Adding model 'EncounterTemplate'
db.create_table('dm_encountertemplate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('dm', ['EncounterTemplate'])
# Adding M2M table for field npcs on 'EncounterTemplate'
db.create_table('dm_encountertemplate_npcs', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('encountertemplate', models.ForeignKey(orm['dm.encountertemplate'], null=False)),
('npc', models.ForeignKey(orm['dm.npc'], null=False))
))
db.create_unique('dm_encountertemplate_npcs', ['encountertemplate_id', 'npc_id'])
# Adding model 'Encounter'
db.create_table('dm_encounter', (
('encountertemplate_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['dm.EncounterTemplate'], unique=True, primary_key=True)),
('party', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.Party'])),
))
db.send_create_signal('dm', ['Encounter'])
# Adding model 'EncounterParticipant'
db.create_table('dm_encounterparticipant', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('dm', ['EncounterParticipant'])
# Adding model 'PCEncounterParticipant'
db.create_table('dm_pcencounterparticipant', (
('encounterparticipant_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['dm.EncounterParticipant'], unique=True, primary_key=True)),
('character', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.Character'])),
))
db.send_create_signal('dm', ['PCEncounterParticipant'])
# Adding model 'NPCEncounterParticipant'
db.create_table('dm_npcencounterparticipant', (
('encounterparticipant_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['dm.EncounterParticipant'], unique=True, primary_key=True)),
('npc', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.NPC'])),
))
db.send_create_signal('dm', ['NPCEncounterParticipant'])
# Adding model 'EncounterInitiative'
db.create_table('dm_encounterinitiative', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('encounter', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.Encounter'])),
('participant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.EncounterParticipant'])),
('initiative', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('dm', ['EncounterInitiative'])
def backwards(self, orm):
# Deleting model 'NPCType'
db.delete_table('dm_npctype')
# Removing M2M table for field roles on 'NPCType'
db.delete_table('dm_npctype_roles')
# Deleting model 'NPCTypeAbility'
db.delete_table('dm_npctypeability')
# Deleting model 'NPCTypeDefense'
db.delete_table('dm_npctypedefense')
# Deleting model 'NPC'
db.delete_table('dm_npc')
# Deleting model 'MonsterNPC'
db.delete_table('dm_monsternpc')
# Deleting model 'BasicStoryNPC'
db.delete_table('dm_basicstorynpc')
# Deleting model 'StoryNPC'
db.delete_table('dm_storynpc')
# Deleting model 'Party'
db.delete_table('dm_party')
# Removing M2M table for field characters on 'Party'
db.delete_table('dm_party_characters')
# Deleting model 'Campaign'
db.delete_table('dm_campaign')
# Deleting model 'Session'
db.delete_table('dm_session')
# Deleting model 'HistoryLine'
db.delete_table('dm_historyline')
# Deleting model 'EncounterTemplate'
db.delete_table('dm_encountertemplate')
# Removing M2M table for field npcs on 'EncounterTemplate'
db.delete_table('dm_encountertemplate_npcs')
# Deleting model 'Encounter'
db.delete_table('dm_encounter')
# Deleting model 'EncounterParticipant'
db.delete_table('dm_encounterparticipant')
# Deleting model 'PCEncounterParticipant'
db.delete_table('dm_pcencounterparticipant')
# Deleting model 'NPCEncounterParticipant'
db.delete_table('dm_npcencounterparticipant')
# Deleting model 'EncounterInitiative'
db.delete_table('dm_encounterinitiative')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'character_builder.ability': {
'Meta': {'object_name': 'Ability'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'help_text': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'character_builder.armorclass': {
'Meta': {'object_name': 'ArmorClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.armortype': {
'Meta': {'object_name': 'ArmorType'},
'armor_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.ArmorClass']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.character': {
'Meta': {'object_name': 'Character'},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']"}),
'class_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.ClassType']"}),
'deity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Deity']"}),
'gender': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Gender']"}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Race']"}),
'slug_name': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'xp': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'character_builder.classtype': {
'Meta': {'ordering': "['name']", 'object_name': 'ClassType'},
'armor_proficiencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.ArmorType']", 'symmetrical': 'False'}),
'base_hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {}),
'favored_abilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Ability']", 'symmetrical': 'False'}),
'hit_points_per_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Modifier']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Role']"}),
'role_flavor': ('django.db.models.fields.TextField', [], {}),
'skill_choices': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Source']"}),
'trained_skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['character_builder.Skill']", 'null': 'True', 'blank': 'True'}),
'weapon_proficiencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.WeaponProficiencyGroup']", 'symmetrical': 'False'})
},
'character_builder.defense': {
'Meta': {'object_name': 'Defense'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'abilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Ability']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'character_builder.deity': {
'Meta': {'object_name': 'Deity'},
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'character_builder.gender': {
'Meta': {'object_name': 'Gender'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'character_builder.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'script': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.modifier': {
'Meta': {'object_name': 'Modifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'character_builder.race': {
'Meta': {'ordering': "['name']", 'object_name': 'Race'},
'average_height_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'average_weight_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Language']", 'symmetrical': 'False'}),
'modifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Modifier']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'playable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Size']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Source']"}),
'speed': ('django.db.models.fields.IntegerField', [], {}),
'vision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Vision']"})
},
'character_builder.role': {
'Meta': {'object_name': 'Role'},
'flavor': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.size': {
'Meta': {'object_name': 'Size'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'reach': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'space': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.skill': {
'Meta': {'object_name': 'Skill'},
'ability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Ability']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.source': {
'Meta': {'ordering': "['name']", 'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.vision': {
'Meta': {'object_name': 'Vision'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.weaponcategory': {
'Meta': {'object_name': 'WeaponCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.weaponproficiencygroup': {
'Meta': {'object_name': 'WeaponProficiencyGroup'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.WeaponCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.basicstorynpc': {
'Meta': {'object_name': 'BasicStoryNPC', '_ormbases': ['dm.NPC']},
'description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'npc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.NPC']", 'unique': 'True', 'primary_key': 'True'})
},
'dm.campaign': {
'Meta': {'object_name': 'Campaign'},
'dm': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Party']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.encounter': {
'Meta': {'object_name': 'Encounter', '_ormbases': ['dm.EncounterTemplate']},
'encountertemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.EncounterTemplate']", 'unique': 'True', 'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Party']"})
},
'dm.encounterinitiative': {
'Meta': {'object_name': 'EncounterInitiative'},
'encounter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Encounter']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initiative': ('django.db.models.fields.IntegerField', [], {}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.EncounterParticipant']"})
},
'dm.encounterparticipant': {
'Meta': {'object_name': 'EncounterParticipant'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'dm.encountertemplate': {
'Meta': {'object_name': 'EncounterTemplate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'npcs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dm.NPC']", 'symmetrical': 'False'})
},
'dm.historyline': {
'Meta': {'object_name': 'HistoryLine'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Session']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'dm.monsternpc': {
'Meta': {'object_name': 'MonsterNPC', '_ormbases': ['dm.NPC']},
'hit_points': ('django.db.models.fields.IntegerField', [], {}),
'npc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.NPC']", 'unique': 'True', 'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPCType']"})
},
'dm.npc': {
'Meta': {'object_name': 'NPC'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_alive': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'dm.npcencounterparticipant': {
'Meta': {'object_name': 'NPCEncounterParticipant', '_ormbases': ['dm.EncounterParticipant']},
'encounterparticipant_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.EncounterParticipant']", 'unique': 'True', 'primary_key': 'True'}),
'npc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPC']"})
},
'dm.npctype': {
'Meta': {'object_name': 'NPCType'},
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'max_hit_points': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Race']"}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Role']", 'symmetrical': 'False'}),
'vision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Vision']"}),
'xp_reward': ('django.db.models.fields.IntegerField', [], {})
},
'dm.npctypeability': {
'Meta': {'object_name': 'NPCTypeAbility'},
'ability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Ability']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'abilities'", 'to': "orm['dm.NPCType']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'dm.npctypedefense': {
'Meta': {'object_name': 'NPCTypeDefense'},
'defense': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Defense']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'defenses'", 'to': "orm['dm.NPCType']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'dm.party': {
'Meta': {'object_name': 'Party'},
'background': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'characters': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Character']", 'symmetrical': 'False'}),
'formed_on': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.pcencounterparticipant': {
'Meta': {'object_name': 'PCEncounterParticipant', '_ormbases': ['dm.EncounterParticipant']},
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Character']"}),
'encounterparticipant_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.EncounterParticipant']", 'unique': 'True', 'primary_key': 'True'})
},
'dm.session': {
'Meta': {'object_name': 'Session'},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Campaign']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
'dm.storynpc': {
'Meta': {'object_name': 'StoryNPC', '_ormbases': ['dm.BasicStoryNPC']},
'basicstorynpc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.BasicStoryNPC']", 'unique': 'True', 'primary_key': 'True'}),
'hit_points': ('django.db.models.fields.IntegerField', [], {}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPCType']"})
}
}
complete_apps = ['dm']
|
|
"""Quizzes API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class QuizzesAPI(BaseCanvasAPI):
"""Quizzes API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for QuizzesAPI."""
super(QuizzesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.QuizzesAPI")
def list_quizzes_in_course(self, course_id, search_term=None):
"""
List quizzes in a course.
Returns the paginated list of Quizzes in this course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - search_term
"""
The partial title of the quizzes to match and return.
"""
if search_term is not None:
params["search_term"] = search_term
self.logger.debug(
"GET /api/v1/courses/{course_id}/quizzes with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/quizzes".format(**path),
data=data,
params=params,
all_pages=True,
)
def get_single_quiz(self, course_id, id):
"""
Get a single quiz.
Returns the quiz with the given id.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"GET /api/v1/courses/{course_id}/quizzes/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/quizzes/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def create_quiz(
self,
course_id,
quiz_title,
quiz_access_code=None,
quiz_allowed_attempts=None,
quiz_assignment_group_id=None,
quiz_cant_go_back=None,
quiz_description=None,
quiz_due_at=None,
quiz_hide_correct_answers_at=None,
quiz_hide_results=None,
quiz_ip_filter=None,
quiz_lock_at=None,
quiz_one_question_at_a_time=None,
quiz_one_time_results=None,
quiz_only_visible_to_overrides=None,
quiz_published=None,
quiz_quiz_type=None,
quiz_scoring_policy=None,
quiz_show_correct_answers=None,
quiz_show_correct_answers_at=None,
quiz_show_correct_answers_last_attempt=None,
quiz_shuffle_answers=None,
quiz_time_limit=None,
quiz_unlock_at=None,
):
"""
Create a quiz.
Create a new quiz for this course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - quiz[title]
"""
The quiz title.
"""
data["quiz[title]"] = quiz_title
# OPTIONAL - quiz[description]
"""
A description of the quiz.
"""
if quiz_description is not None:
data["quiz[description]"] = quiz_description
# OPTIONAL - quiz[quiz_type]
"""
The type of quiz.
"""
if quiz_quiz_type is not None:
self._validate_enum(
quiz_quiz_type,
["practice_quiz", "assignment", "graded_survey", "survey"],
)
data["quiz[quiz_type]"] = quiz_quiz_type
# OPTIONAL - quiz[assignment_group_id]
"""
The assignment group id to put the assignment in. Defaults to the top
assignment group in the course. Only valid if the quiz is graded, i.e. if
quiz_type is "assignment" or "graded_survey".
"""
if quiz_assignment_group_id is not None:
data["quiz[assignment_group_id]"] = quiz_assignment_group_id
# OPTIONAL - quiz[time_limit]
"""
Time limit to take this quiz, in minutes. Set to null for no time limit.
Defaults to null.
"""
if quiz_time_limit is not None:
data["quiz[time_limit]"] = quiz_time_limit
# OPTIONAL - quiz[shuffle_answers]
"""
If true, quiz answers for multiple choice questions will be randomized for
each student. Defaults to false.
"""
if quiz_shuffle_answers is not None:
data["quiz[shuffle_answers]"] = quiz_shuffle_answers
# OPTIONAL - quiz[hide_results]
"""
Dictates whether or not quiz results are hidden from students.
If null, students can see their results after any attempt.
If "always", students can never see their results.
If "until_after_last_attempt", students can only see results after their
last attempt. (Only valid if allowed_attempts > 1). Defaults to null.
"""
if quiz_hide_results is not None:
self._validate_enum(
quiz_hide_results, ["always", "until_after_last_attempt"]
)
data["quiz[hide_results]"] = quiz_hide_results
# OPTIONAL - quiz[show_correct_answers]
"""
Only valid if hide_results=null
If false, hides correct answers from students when quiz results are viewed.
Defaults to true.
"""
if quiz_show_correct_answers is not None:
data["quiz[show_correct_answers]"] = quiz_show_correct_answers
# OPTIONAL - quiz[show_correct_answers_last_attempt]
"""
Only valid if show_correct_answers=true and allowed_attempts > 1
If true, hides correct answers from students when quiz results are viewed
until they submit the last attempt for the quiz.
Defaults to false.
"""
if quiz_show_correct_answers_last_attempt is not None:
data[
"quiz[show_correct_answers_last_attempt]"
] = quiz_show_correct_answers_last_attempt
# OPTIONAL - quiz[show_correct_answers_at]
"""
Only valid if show_correct_answers=true
If set, the correct answers will be visible by students only after this
date, otherwise the correct answers are visible once the student hands in
their quiz submission.
"""
if quiz_show_correct_answers_at is not None:
if issubclass(quiz_show_correct_answers_at.__class__, str):
quiz_show_correct_answers_at = self._validate_iso8601_string(
quiz_show_correct_answers_at
)
elif issubclass(quiz_show_correct_answers_at.__class__, date) or issubclass(
quiz_show_correct_answers_at.__class__, datetime
):
quiz_show_correct_answers_at = quiz_show_correct_answers_at.strftime(
"%Y-%m-%dT%H:%M:%S+00:00"
)
data["quiz[show_correct_answers_at]"] = quiz_show_correct_answers_at
# OPTIONAL - quiz[hide_correct_answers_at]
"""
Only valid if show_correct_answers=true
If set, the correct answers will stop being visible once this date has
passed. Otherwise, the correct answers will be visible indefinitely.
"""
if quiz_hide_correct_answers_at is not None:
if issubclass(quiz_hide_correct_answers_at.__class__, str):
quiz_hide_correct_answers_at = self._validate_iso8601_string(
quiz_hide_correct_answers_at
)
elif issubclass(quiz_hide_correct_answers_at.__class__, date) or issubclass(
quiz_hide_correct_answers_at.__class__, datetime
):
quiz_hide_correct_answers_at = quiz_hide_correct_answers_at.strftime(
"%Y-%m-%dT%H:%M:%S+00:00"
)
data["quiz[hide_correct_answers_at]"] = quiz_hide_correct_answers_at
# OPTIONAL - quiz[allowed_attempts]
"""
Number of times a student is allowed to take a quiz.
Set to -1 for unlimited attempts.
Defaults to 1.
"""
if quiz_allowed_attempts is not None:
data["quiz[allowed_attempts]"] = quiz_allowed_attempts
# OPTIONAL - quiz[scoring_policy]
"""
Required and only valid if allowed_attempts > 1.
Scoring policy for a quiz that students can take multiple times.
Defaults to "keep_highest".
"""
if quiz_scoring_policy is not None:
self._validate_enum(quiz_scoring_policy, ["keep_highest", "keep_latest"])
data["quiz[scoring_policy]"] = quiz_scoring_policy
# OPTIONAL - quiz[one_question_at_a_time]
"""
If true, shows quiz to student one question at a time.
Defaults to false.
"""
if quiz_one_question_at_a_time is not None:
data["quiz[one_question_at_a_time]"] = quiz_one_question_at_a_time
# OPTIONAL - quiz[cant_go_back]
"""
Only valid if one_question_at_a_time=true
If true, questions are locked after answering.
Defaults to false.
"""
if quiz_cant_go_back is not None:
data["quiz[cant_go_back]"] = quiz_cant_go_back
# OPTIONAL - quiz[access_code]
"""
Restricts access to the quiz with a password.
For no access code restriction, set to null.
Defaults to null.
"""
if quiz_access_code is not None:
data["quiz[access_code]"] = quiz_access_code
# OPTIONAL - quiz[ip_filter]
"""
Restricts access to the quiz to computers in a specified IP range.
Filters can be a comma-separated list of addresses, or an address followed by a mask
Examples:
"192.168.217.1"
"192.168.217.1/24"
"192.168.217.1/255.255.255.0"
For no IP filter restriction, set to null.
Defaults to null.
"""
if quiz_ip_filter is not None:
data["quiz[ip_filter]"] = quiz_ip_filter
# OPTIONAL - quiz[due_at]
"""
The day/time the quiz is due.
Accepts times in ISO 8601 format, e.g. 2011-10-21T18:48Z.
"""
if quiz_due_at is not None:
if issubclass(quiz_due_at.__class__, str):
quiz_due_at = self._validate_iso8601_string(quiz_due_at)
elif issubclass(quiz_due_at.__class__, date) or issubclass(
quiz_due_at.__class__, datetime
):
quiz_due_at = quiz_due_at.strftime("%Y-%m-%dT%H:%M:%S+00:00")
data["quiz[due_at]"] = quiz_due_at
# OPTIONAL - quiz[lock_at]
"""
The day/time the quiz is locked for students.
Accepts times in ISO 8601 format, e.g. 2011-10-21T18:48Z.
"""
if quiz_lock_at is not None:
if issubclass(quiz_lock_at.__class__, str):
quiz_lock_at = self._validate_iso8601_string(quiz_lock_at)
elif issubclass(quiz_lock_at.__class__, date) or issubclass(
quiz_lock_at.__class__, datetime
):
quiz_lock_at = quiz_lock_at.strftime("%Y-%m-%dT%H:%M:%S+00:00")
data["quiz[lock_at]"] = quiz_lock_at
# OPTIONAL - quiz[unlock_at]
"""
The day/time the quiz is unlocked for students.
Accepts times in ISO 8601 format, e.g. 2011-10-21T18:48Z.
"""
if quiz_unlock_at is not None:
if issubclass(quiz_unlock_at.__class__, str):
quiz_unlock_at = self._validate_iso8601_string(quiz_unlock_at)
elif issubclass(quiz_unlock_at.__class__, date) or issubclass(
quiz_unlock_at.__class__, datetime
):
quiz_unlock_at = quiz_unlock_at.strftime("%Y-%m-%dT%H:%M:%S+00:00")
data["quiz[unlock_at]"] = quiz_unlock_at
# OPTIONAL - quiz[published]
"""
Whether the quiz should have a draft state of published or unpublished.
NOTE: If students have started taking the quiz, or there are any
submissions for the quiz, you may not unpublish a quiz and will recieve
an error.
"""
if quiz_published is not None:
data["quiz[published]"] = quiz_published
# OPTIONAL - quiz[one_time_results]
"""
Whether students should be prevented from viewing their quiz results past
the first time (right after they turn the quiz in.)
Only valid if "hide_results" is not set to "always".
Defaults to false.
"""
if quiz_one_time_results is not None:
data["quiz[one_time_results]"] = quiz_one_time_results
# OPTIONAL - quiz[only_visible_to_overrides]
"""
Whether this quiz is only visible to overrides (Only useful if
'differentiated assignments' account setting is on)
Defaults to false.
"""
if quiz_only_visible_to_overrides is not None:
data["quiz[only_visible_to_overrides]"] = quiz_only_visible_to_overrides
self.logger.debug(
"POST /api/v1/courses/{course_id}/quizzes with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/courses/{course_id}/quizzes".format(**path),
data=data,
params=params,
single_item=True,
)
def edit_quiz(self, course_id, id, quiz_notify_of_update=None):
"""
Edit a quiz.
Modify an existing quiz. See the documentation for quiz creation.
Additional arguments:
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - quiz[notify_of_update]
"""
If true, notifies users that the quiz has changed.
Defaults to true
"""
if quiz_notify_of_update is not None:
data["quiz[notify_of_update]"] = quiz_notify_of_update
self.logger.debug(
"PUT /api/v1/courses/{course_id}/quizzes/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/courses/{course_id}/quizzes/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def delete_quiz(self, course_id, id):
"""
Delete a quiz.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"DELETE /api/v1/courses/{course_id}/quizzes/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/courses/{course_id}/quizzes/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def reorder_quiz_items(self, course_id, id, order_id, order_type=None):
"""
Reorder quiz items.
Change order of the quiz questions or groups within the quiz
<b>204 No Content</b> response code is returned if the reorder was successful.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# REQUIRED - order[id]
"""
The associated item's unique identifier
"""
data["order[id]"] = order_id
# OPTIONAL - order[type]
"""
The type of item is either 'question' or 'group'
"""
if order_type is not None:
self._validate_enum(order_type, ["question", "group"])
data["order[type]"] = order_type
self.logger.debug(
"POST /api/v1/courses/{course_id}/quizzes/{id}/reorder with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/courses/{course_id}/quizzes/{id}/reorder".format(**path),
data=data,
params=params,
no_data=True,
)
def validate_quiz_access_code(self, access_code, course_id, id):
"""
Validate quiz access code.
Accepts an access code and returns a boolean indicating whether that access code is correct
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# REQUIRED - access_code
"""
The access code being validated
"""
data["access_code"] = access_code
self.logger.debug(
"POST /api/v1/courses/{course_id}/quizzes/{id}/validate_access_code with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/courses/{course_id}/quizzes/{id}/validate_access_code".format(
**path
),
data=data,
params=params,
)
class Quiz(BaseModel):
"""Quiz Model."""
def __init__(
self,
id=None,
title=None,
html_url=None,
mobile_url=None,
preview_url=None,
description=None,
quiz_type=None,
assignment_group_id=None,
time_limit=None,
shuffle_answers=None,
hide_results=None,
show_correct_answers=None,
show_correct_answers_last_attempt=None,
show_correct_answers_at=None,
hide_correct_answers_at=None,
one_time_results=None,
scoring_policy=None,
allowed_attempts=None,
one_question_at_a_time=None,
question_count=None,
points_possible=None,
cant_go_back=None,
access_code=None,
ip_filter=None,
due_at=None,
lock_at=None,
unlock_at=None,
published=None,
unpublishable=None,
locked_for_user=None,
lock_info=None,
lock_explanation=None,
speedgrader_url=None,
quiz_extensions_url=None,
permissions=None,
all_dates=None,
version_number=None,
question_types=None,
anonymous_submissions=None,
):
"""Init method for Quiz class."""
self._id = id
self._title = title
self._html_url = html_url
self._mobile_url = mobile_url
self._preview_url = preview_url
self._description = description
self._quiz_type = quiz_type
self._assignment_group_id = assignment_group_id
self._time_limit = time_limit
self._shuffle_answers = shuffle_answers
self._hide_results = hide_results
self._show_correct_answers = show_correct_answers
self._show_correct_answers_last_attempt = show_correct_answers_last_attempt
self._show_correct_answers_at = show_correct_answers_at
self._hide_correct_answers_at = hide_correct_answers_at
self._one_time_results = one_time_results
self._scoring_policy = scoring_policy
self._allowed_attempts = allowed_attempts
self._one_question_at_a_time = one_question_at_a_time
self._question_count = question_count
self._points_possible = points_possible
self._cant_go_back = cant_go_back
self._access_code = access_code
self._ip_filter = ip_filter
self._due_at = due_at
self._lock_at = lock_at
self._unlock_at = unlock_at
self._published = published
self._unpublishable = unpublishable
self._locked_for_user = locked_for_user
self._lock_info = lock_info
self._lock_explanation = lock_explanation
self._speedgrader_url = speedgrader_url
self._quiz_extensions_url = quiz_extensions_url
self._permissions = permissions
self._all_dates = all_dates
self._version_number = version_number
self._question_types = question_types
self._anonymous_submissions = anonymous_submissions
self.logger = logging.getLogger("py3canvas.Quiz")
@property
def id(self):
"""the ID of the quiz."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def title(self):
"""the title of the quiz."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn(
"Setting values on title will NOT update the remote Canvas instance."
)
self._title = value
@property
def html_url(self):
"""the HTTP/HTTPS URL to the quiz."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn(
"Setting values on html_url will NOT update the remote Canvas instance."
)
self._html_url = value
@property
def mobile_url(self):
"""a url suitable for loading the quiz in a mobile webview. it will persiste the headless session and, for quizzes in public courses, will force the user to login."""
return self._mobile_url
@mobile_url.setter
def mobile_url(self, value):
"""Setter for mobile_url property."""
self.logger.warn(
"Setting values on mobile_url will NOT update the remote Canvas instance."
)
self._mobile_url = value
@property
def preview_url(self):
"""A url that can be visited in the browser with a POST request to preview a quiz as the teacher. Only present when the user may grade."""
return self._preview_url
@preview_url.setter
def preview_url(self, value):
"""Setter for preview_url property."""
self.logger.warn(
"Setting values on preview_url will NOT update the remote Canvas instance."
)
self._preview_url = value
@property
def description(self):
"""the description of the quiz."""
return self._description
@description.setter
def description(self, value):
"""Setter for description property."""
self.logger.warn(
"Setting values on description will NOT update the remote Canvas instance."
)
self._description = value
@property
def quiz_type(self):
"""type of quiz possible values: 'practice_quiz', 'assignment', 'graded_survey', 'survey'."""
return self._quiz_type
@quiz_type.setter
def quiz_type(self, value):
"""Setter for quiz_type property."""
self.logger.warn(
"Setting values on quiz_type will NOT update the remote Canvas instance."
)
self._quiz_type = value
@property
def assignment_group_id(self):
"""the ID of the quiz's assignment group:."""
return self._assignment_group_id
@assignment_group_id.setter
def assignment_group_id(self, value):
"""Setter for assignment_group_id property."""
self.logger.warn(
"Setting values on assignment_group_id will NOT update the remote Canvas instance."
)
self._assignment_group_id = value
@property
def time_limit(self):
"""quiz time limit in minutes."""
return self._time_limit
@time_limit.setter
def time_limit(self, value):
"""Setter for time_limit property."""
self.logger.warn(
"Setting values on time_limit will NOT update the remote Canvas instance."
)
self._time_limit = value
@property
def shuffle_answers(self):
"""shuffle answers for students?."""
return self._shuffle_answers
@shuffle_answers.setter
def shuffle_answers(self, value):
"""Setter for shuffle_answers property."""
self.logger.warn(
"Setting values on shuffle_answers will NOT update the remote Canvas instance."
)
self._shuffle_answers = value
@property
def hide_results(self):
"""let students see their quiz responses? possible values: null, 'always', 'until_after_last_attempt'."""
return self._hide_results
@hide_results.setter
def hide_results(self, value):
"""Setter for hide_results property."""
self.logger.warn(
"Setting values on hide_results will NOT update the remote Canvas instance."
)
self._hide_results = value
@property
def show_correct_answers(self):
"""show which answers were correct when results are shown? only valid if hide_results=null."""
return self._show_correct_answers
@show_correct_answers.setter
def show_correct_answers(self, value):
"""Setter for show_correct_answers property."""
self.logger.warn(
"Setting values on show_correct_answers will NOT update the remote Canvas instance."
)
self._show_correct_answers = value
@property
def show_correct_answers_last_attempt(self):
"""restrict the show_correct_answers option above to apply only to the last submitted attempt of a quiz that allows multiple attempts. only valid if show_correct_answers=true and allowed_attempts > 1."""
return self._show_correct_answers_last_attempt
@show_correct_answers_last_attempt.setter
def show_correct_answers_last_attempt(self, value):
"""Setter for show_correct_answers_last_attempt property."""
self.logger.warn(
"Setting values on show_correct_answers_last_attempt will NOT update the remote Canvas instance."
)
self._show_correct_answers_last_attempt = value
@property
def show_correct_answers_at(self):
"""when should the correct answers be visible by students? only valid if show_correct_answers=true."""
return self._show_correct_answers_at
@show_correct_answers_at.setter
def show_correct_answers_at(self, value):
"""Setter for show_correct_answers_at property."""
self.logger.warn(
"Setting values on show_correct_answers_at will NOT update the remote Canvas instance."
)
self._show_correct_answers_at = value
@property
def hide_correct_answers_at(self):
"""prevent the students from seeing correct answers after the specified date has passed. only valid if show_correct_answers=true."""
return self._hide_correct_answers_at
@hide_correct_answers_at.setter
def hide_correct_answers_at(self, value):
"""Setter for hide_correct_answers_at property."""
self.logger.warn(
"Setting values on hide_correct_answers_at will NOT update the remote Canvas instance."
)
self._hide_correct_answers_at = value
@property
def one_time_results(self):
"""prevent the students from seeing their results more than once (right after they submit the quiz)."""
return self._one_time_results
@one_time_results.setter
def one_time_results(self, value):
"""Setter for one_time_results property."""
self.logger.warn(
"Setting values on one_time_results will NOT update the remote Canvas instance."
)
self._one_time_results = value
@property
def scoring_policy(self):
"""which quiz score to keep (only if allowed_attempts != 1) possible values: 'keep_highest', 'keep_latest'."""
return self._scoring_policy
@scoring_policy.setter
def scoring_policy(self, value):
"""Setter for scoring_policy property."""
self.logger.warn(
"Setting values on scoring_policy will NOT update the remote Canvas instance."
)
self._scoring_policy = value
@property
def allowed_attempts(self):
"""how many times a student can take the quiz -1 = unlimited attempts."""
return self._allowed_attempts
@allowed_attempts.setter
def allowed_attempts(self, value):
"""Setter for allowed_attempts property."""
self.logger.warn(
"Setting values on allowed_attempts will NOT update the remote Canvas instance."
)
self._allowed_attempts = value
@property
def one_question_at_a_time(self):
"""show one question at a time?."""
return self._one_question_at_a_time
@one_question_at_a_time.setter
def one_question_at_a_time(self, value):
"""Setter for one_question_at_a_time property."""
self.logger.warn(
"Setting values on one_question_at_a_time will NOT update the remote Canvas instance."
)
self._one_question_at_a_time = value
@property
def question_count(self):
"""the number of questions in the quiz."""
return self._question_count
@question_count.setter
def question_count(self, value):
"""Setter for question_count property."""
self.logger.warn(
"Setting values on question_count will NOT update the remote Canvas instance."
)
self._question_count = value
@property
def points_possible(self):
"""The total point value given to the quiz."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn(
"Setting values on points_possible will NOT update the remote Canvas instance."
)
self._points_possible = value
@property
def cant_go_back(self):
"""lock questions after answering? only valid if one_question_at_a_time=true."""
return self._cant_go_back
@cant_go_back.setter
def cant_go_back(self, value):
"""Setter for cant_go_back property."""
self.logger.warn(
"Setting values on cant_go_back will NOT update the remote Canvas instance."
)
self._cant_go_back = value
@property
def access_code(self):
"""access code to restrict quiz access."""
return self._access_code
@access_code.setter
def access_code(self, value):
"""Setter for access_code property."""
self.logger.warn(
"Setting values on access_code will NOT update the remote Canvas instance."
)
self._access_code = value
@property
def ip_filter(self):
"""IP address or range that quiz access is limited to."""
return self._ip_filter
@ip_filter.setter
def ip_filter(self, value):
"""Setter for ip_filter property."""
self.logger.warn(
"Setting values on ip_filter will NOT update the remote Canvas instance."
)
self._ip_filter = value
@property
def due_at(self):
"""when the quiz is due."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn(
"Setting values on due_at will NOT update the remote Canvas instance."
)
self._due_at = value
@property
def lock_at(self):
"""when to lock the quiz."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn(
"Setting values on lock_at will NOT update the remote Canvas instance."
)
self._lock_at = value
@property
def unlock_at(self):
"""when to unlock the quiz."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn(
"Setting values on unlock_at will NOT update the remote Canvas instance."
)
self._unlock_at = value
@property
def published(self):
"""whether the quiz has a published or unpublished draft state."""
return self._published
@published.setter
def published(self, value):
"""Setter for published property."""
self.logger.warn(
"Setting values on published will NOT update the remote Canvas instance."
)
self._published = value
@property
def unpublishable(self):
"""Whether the assignment's 'published' state can be changed to false. Will be false if there are student submissions for the quiz."""
return self._unpublishable
@unpublishable.setter
def unpublishable(self, value):
"""Setter for unpublishable property."""
self.logger.warn(
"Setting values on unpublishable will NOT update the remote Canvas instance."
)
self._unpublishable = value
@property
def locked_for_user(self):
"""Whether or not this is locked for the user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn(
"Setting values on locked_for_user will NOT update the remote Canvas instance."
)
self._locked_for_user = value
@property
def lock_info(self):
"""(Optional) Information for the user about the lock. Present when locked_for_user is true."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn(
"Setting values on lock_info will NOT update the remote Canvas instance."
)
self._lock_info = value
@property
def lock_explanation(self):
"""(Optional) An explanation of why this is locked for the user. Present when locked_for_user is true."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn(
"Setting values on lock_explanation will NOT update the remote Canvas instance."
)
self._lock_explanation = value
@property
def speedgrader_url(self):
"""Link to Speed Grader for this quiz. Will not be present if quiz is unpublished."""
return self._speedgrader_url
@speedgrader_url.setter
def speedgrader_url(self, value):
"""Setter for speedgrader_url property."""
self.logger.warn(
"Setting values on speedgrader_url will NOT update the remote Canvas instance."
)
self._speedgrader_url = value
@property
def quiz_extensions_url(self):
"""Link to endpoint to send extensions for this quiz."""
return self._quiz_extensions_url
@quiz_extensions_url.setter
def quiz_extensions_url(self, value):
"""Setter for quiz_extensions_url property."""
self.logger.warn(
"Setting values on quiz_extensions_url will NOT update the remote Canvas instance."
)
self._quiz_extensions_url = value
@property
def permissions(self):
"""Permissions the user has for the quiz."""
return self._permissions
@permissions.setter
def permissions(self, value):
"""Setter for permissions property."""
self.logger.warn(
"Setting values on permissions will NOT update the remote Canvas instance."
)
self._permissions = value
@property
def all_dates(self):
"""list of due dates for the quiz."""
return self._all_dates
@all_dates.setter
def all_dates(self, value):
"""Setter for all_dates property."""
self.logger.warn(
"Setting values on all_dates will NOT update the remote Canvas instance."
)
self._all_dates = value
@property
def version_number(self):
"""Current version number of the quiz."""
return self._version_number
@version_number.setter
def version_number(self, value):
"""Setter for version_number property."""
self.logger.warn(
"Setting values on version_number will NOT update the remote Canvas instance."
)
self._version_number = value
@property
def question_types(self):
"""List of question types in the quiz."""
return self._question_types
@question_types.setter
def question_types(self, value):
"""Setter for question_types property."""
self.logger.warn(
"Setting values on question_types will NOT update the remote Canvas instance."
)
self._question_types = value
@property
def anonymous_submissions(self):
"""Whether survey submissions will be kept anonymous (only applicable to 'graded_survey', 'survey' quiz types)."""
return self._anonymous_submissions
@anonymous_submissions.setter
def anonymous_submissions(self, value):
"""Setter for anonymous_submissions property."""
self.logger.warn(
"Setting values on anonymous_submissions will NOT update the remote Canvas instance."
)
self._anonymous_submissions = value
class Quizpermissions(BaseModel):
"""Quizpermissions Model.
Permissions the user has for the quiz"""
def __init__(
self,
read=None,
submit=None,
create=None,
manage=None,
read_statistics=None,
review_grades=None,
update=None,
):
"""Init method for Quizpermissions class."""
self._read = read
self._submit = submit
self._create = create
self._manage = manage
self._read_statistics = read_statistics
self._review_grades = review_grades
self._update = update
self.logger = logging.getLogger("py3canvas.Quizpermissions")
@property
def read(self):
"""whether the user can view the quiz."""
return self._read
@read.setter
def read(self, value):
"""Setter for read property."""
self.logger.warn(
"Setting values on read will NOT update the remote Canvas instance."
)
self._read = value
@property
def submit(self):
"""whether the user may submit a submission for the quiz."""
return self._submit
@submit.setter
def submit(self, value):
"""Setter for submit property."""
self.logger.warn(
"Setting values on submit will NOT update the remote Canvas instance."
)
self._submit = value
@property
def create(self):
"""whether the user may create a new quiz."""
return self._create
@create.setter
def create(self, value):
"""Setter for create property."""
self.logger.warn(
"Setting values on create will NOT update the remote Canvas instance."
)
self._create = value
@property
def manage(self):
"""whether the user may edit, update, or delete the quiz."""
return self._manage
@manage.setter
def manage(self, value):
"""Setter for manage property."""
self.logger.warn(
"Setting values on manage will NOT update the remote Canvas instance."
)
self._manage = value
@property
def read_statistics(self):
"""whether the user may view quiz statistics for this quiz."""
return self._read_statistics
@read_statistics.setter
def read_statistics(self, value):
"""Setter for read_statistics property."""
self.logger.warn(
"Setting values on read_statistics will NOT update the remote Canvas instance."
)
self._read_statistics = value
@property
def review_grades(self):
"""whether the user may review grades for all quiz submissions for this quiz."""
return self._review_grades
@review_grades.setter
def review_grades(self, value):
"""Setter for review_grades property."""
self.logger.warn(
"Setting values on review_grades will NOT update the remote Canvas instance."
)
self._review_grades = value
@property
def update(self):
"""whether the user may update the quiz."""
return self._update
@update.setter
def update(self, value):
"""Setter for update property."""
self.logger.warn(
"Setting values on update will NOT update the remote Canvas instance."
)
self._update = value
|
|
#!/usr/bin/python
# Copyright 2016 Mender Software AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import shutil
import time
from fabric.api import *
import pytest
from helpers import Helpers
from MenderAPI import adm, deploy
from mendertesting import MenderTesting
from common_docker import *
from common_setup import *
from common_update import *
from common import *
logger = logging.getLogger("root")
TEST_SETS = [
(
"Normal_success",
{
"FailureScript": [],
"ExpectedStatus": "success",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Leave_99",
"ArtifactCommit_Enter_01",
"ArtifactCommit_Enter_05",
"ArtifactCommit_Leave_01_extra_string",
"ArtifactCommit_Leave_91",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_Idle_Enter_script",
{
"FailureScript": ["Idle_Enter_09"],
"ExpectedStatus": "success",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09", # Error in this script should not have any effect.
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Leave_99",
"ArtifactCommit_Enter_01",
"ArtifactCommit_Enter_05",
"ArtifactCommit_Leave_01_extra_string",
"ArtifactCommit_Leave_91",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_Idle_Leave_script",
{
"FailureScript": ["Idle_Leave_09"],
"ExpectedStatus": "success",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09", # Error in this script should not have any effect.
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Leave_99",
"ArtifactCommit_Enter_01",
"ArtifactCommit_Enter_05",
"ArtifactCommit_Leave_01_extra_string",
"ArtifactCommit_Leave_91",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_Sync_Enter_script",
{
"FailureScript": ["Sync_Enter_02"],
"ExpectedStatus": None,
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Error_15",
"Sync_Error_16",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_Sync_Leave_script",
{
"FailureScript": ["Sync_Leave_15"],
"ExpectedStatus": None,
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Sync_Error_15",
"Sync_Error_16",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_Download_Enter_script",
{
"FailureScript": ["Download_Enter_12"],
"ExpectedStatus": None,
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Error_25",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_Download_Leave_script",
{
"FailureScript": ["Download_Leave_14"],
"ExpectedStatus": None,
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Error_25",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_ArtifactInstall_Enter_script",
{
"FailureScript": ["ArtifactInstall_Enter_01"],
"ExpectedStatus": "failure",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Error_01",
"ArtifactInstall_Error_02",
"ArtifactInstall_Error_99",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_ArtifactInstall_Leave_script",
{
"FailureScript": ["ArtifactInstall_Leave_01"],
"ExpectedStatus": "failure",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Error_01",
"ArtifactInstall_Error_02",
"ArtifactInstall_Error_99",
"ArtifactRollback_Enter_00",
"ArtifactRollback_Enter_01",
"ArtifactRollback_Leave_00",
"ArtifactRollback_Leave_01",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_ArtifactReboot_Enter_script",
{
"FailureScript": ["ArtifactReboot_Enter_11"],
"ExpectedStatus": "failure",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Error_97",
"ArtifactReboot_Error_98",
"ArtifactRollback_Enter_00",
"ArtifactRollback_Enter_01",
"ArtifactRollback_Leave_00",
"ArtifactRollback_Leave_01",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_ArtifactReboot_Leave_script",
{
"FailureScript": ["ArtifactReboot_Leave_89"],
"ExpectedStatus": "failure",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Error_97",
"ArtifactReboot_Error_98",
"ArtifactRollback_Enter_00",
"ArtifactRollback_Enter_01",
"ArtifactRollback_Leave_00",
"ArtifactRollback_Leave_01",
"ArtifactRollbackReboot_Enter_00",
"ArtifactRollbackReboot_Enter_99",
"ArtifactRollbackReboot_Leave_01",
"ArtifactRollbackReboot_Leave_99",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_ArtifactCommit_Enter_script",
{
"FailureScript": ["ArtifactCommit_Enter_05"],
"ExpectedStatus": "failure",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Leave_99",
"ArtifactCommit_Enter_01",
"ArtifactCommit_Enter_05",
"ArtifactCommit_Error_91",
"ArtifactRollback_Enter_00",
"ArtifactRollback_Enter_01",
"ArtifactRollback_Leave_00",
"ArtifactRollback_Leave_01",
"ArtifactRollbackReboot_Enter_00",
"ArtifactRollbackReboot_Enter_99",
"ArtifactRollbackReboot_Leave_01",
"ArtifactRollbackReboot_Leave_99",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Failure_in_ArtifactCommit_Leave_script",
{
"FailureScript": ["ArtifactCommit_Leave_01_extra_string"],
"ExpectedStatus": "success",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Leave_99",
"ArtifactCommit_Enter_01",
"ArtifactCommit_Enter_05",
"ArtifactCommit_Leave_01_extra_string", # Error in this script should not have any effect.
"ArtifactCommit_Leave_91",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Wrong_artifact_ID_on_filesystem",
{
"FailureScript": [],
"ExpectedStatus": "failure",
"BrokenArtifactId": True,
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Leave_99",
"ArtifactCommit_Enter_01",
"ArtifactCommit_Enter_05",
"ArtifactCommit_Error_91",
"ArtifactRollback_Enter_00",
"ArtifactRollback_Enter_01",
"ArtifactRollback_Leave_00",
"ArtifactRollback_Leave_01",
"ArtifactRollbackReboot_Enter_00",
"ArtifactRollbackReboot_Enter_99",
"ArtifactRollbackReboot_Leave_01",
"ArtifactRollbackReboot_Leave_99",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Simulated_boot_failure_in_ArtifactReboot_Enter",
{
"FailureScript": [],
"ExpectedStatus": "failure",
"SimulateBootFailureIn": "ArtifactReboot_Enter_11",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Corrupted_script_version_in_data",
{
"FailureScript": [],
"ExpectedStatus": "failure",
"CorruptDataScriptVersionIn": "ArtifactReboot_Enter_11",
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
# since version is corrupted from now on, no more scripts
# will be executed, but rollback will be performed
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
(
"Corrupted_script_version_in_etc",
{
"FailureScript": [],
"ExpectedStatus": "failure",
"CorruptEtcScriptVersionInUpdate": True,
"ScriptOrder": [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Leave_09",
"Idle_Leave_10",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Leave_99",
"ArtifactCommit_Enter_01",
"ArtifactCommit_Enter_05",
"ArtifactCommit_Error_91",
"ArtifactRollback_Enter_00",
"ArtifactRollback_Enter_01",
"ArtifactRollback_Leave_00",
"ArtifactRollback_Leave_01",
"ArtifactRollbackReboot_Enter_00",
"ArtifactRollbackReboot_Enter_99",
"ArtifactRollbackReboot_Leave_01",
"ArtifactRollbackReboot_Leave_99",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"Idle_Enter_08_testing",
"Idle_Enter_09",
],
},
),
]
REBOOT_TEST_SET = [
# test-set0
(
"simulate_powerloss_artifact_install_enter",
{
"RebootScripts": [
"ArtifactInstall_Enter_01",
],
"ExpectedFinalPartition": ["OriginalPartition"],
"ScriptOrder": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_01",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Leave_01",
"ArtifactFailure_Enter_01",
"ArtifactFailure_Leave_89",
],
"ExpectedScriptFlow": [
"ArtifactInstall_Enter_01", # kill!
"ArtifactFailure_Enter_01", # run failure scripts
"ArtifactFailure_Leave_89"
],
},
),
# test-set1
(
"simulate_powerloss_in_artifact_install_leave",
{
"RebootScripts": ["ArtifactInstall_Leave_02"],
"ExpectedFinalPartition": ["OriginalPartition"],
"DoubleReboot": [True],
"ScriptOrder": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_02",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Leave_01",
"ArtifactFailure_Enter_01",
"ArtifactFailure_Leave_89"
],
"ExpectedScriptFlow": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_02", # reboot_detector
"ArtifactFailure_Enter_01", # rerun failure scripts
"ArtifactFailure_Leave_89"
],
},
),
# test-set2
(
"simulate_powerloss_in_artifact_install_error_original_partition",
{
"ErrorScripts": ["ArtifactInstall_Enter_01"],
"RebootScripts": ["ArtifactInstall_Error_01"],
"ExpectedFinalPartition": ["OriginalPartition"],
"ScriptOrder": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Error_01",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Leave_44",
],
"ExpectedScriptFlow": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Error_01", # kill!
"ArtifactFailure_Enter_22", # run failure scripts on the committed (old) partition
"ArtifactFailure_Leave_44",
],
},
),
# test-set3
(
"simulate_powerloss_in_artifact_install_error_after_install",
{
"ErrorScripts": ["ArtifactInstall_Leave_01"],
"DoubleReboot": [True], # As the new image has already been installed, expect a double reboot
"RebootScripts": ["ArtifactInstall_Error_01"],
"ExpectedFinalPartition": ["OriginalPartition"],
"ScriptOrder": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Error_01",
"ArtifactFailure_Enter_22",
"ArtifactFailure_Leave_44",
],
"ExpectedScriptFlow": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Error_01", # kill!
"ArtifactFailure_Enter_22", # run failure scripts on the committed (old) partition
"ArtifactFailure_Leave_44",
],
},
),
# test-set4
(
"simulate_powerloss_in_reboot_enter",
{
"RebootScripts": ["ArtifactReboot_Enter_01"],
"DoubleReboot": [True],
"ExpectedFinalPartition": ["OriginalPartition"],
"ScriptOrder": [
"ArtifactReboot_Enter_01",
"ArtifactReboot_Leave_01",
"ArtifactFailure_Enter_02",
"ArtifactFailure_Leave_09",
],
"ExpectedScriptFlow": [
"ArtifactReboot_Enter_01", # kill!
"ArtifactFailure_Enter_02", # run failure scripts on the committed (old) partition
"ArtifactFailure_Leave_09",
],
},
),
# test-set5
(
"simulate_powerloss_in_commit_enter",
{
"RebootScripts": ["ArtifactCommit_Enter_89"],
"DoubleReboot": [True],
"ExpectedFinalPartition": ["OriginalPartition"],
"ScriptOrder": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_01",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Leave_01",
"ArtifactCommit_Enter_89",
"ArtifactRollbackReboot_Enter_89", # Should never be run
"ArtifactFailure_Enter_89",
"ArtifactFailure_Leave_09",
],
"ExpectedScriptFlow": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_01",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Leave_01", # on second partition, stop mender client
"ArtifactCommit_Enter_89", # sync and kill!
"ArtifactFailure_Enter_89", # run failure scripts on the committed (old) partition
"ArtifactFailure_Leave_09",
],
},
),
# test-set6
(
"simulate_powerloss_in_artifact_commit_leave",
{
"RebootOnceScripts": ["ArtifactCommit_Leave_01"],
"DoubleReboot": [True],
"ExpectedFinalPartition": ["OtherPartition"],
"ScriptOrder": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_01",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Leave_01",
"ArtifactCommit_Enter_89",
"ArtifactCommit_Leave_01",
"ArtifactCommit_Leave_02",
],
"ExpectedScriptFlow": [
"ArtifactInstall_Enter_01",
"ArtifactInstall_Leave_01",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Leave_01",
"ArtifactCommit_Enter_89",
"ArtifactCommit_Leave_01", # kill!
"ArtifactCommit_Leave_01", # rerun
"ArtifactCommit_Leave_02",
],
},
),
]
class TestStateScripts(MenderTesting):
scripts = [
"Idle_Enter_08_testing",
"Idle_Enter_09",
"Idle_Enter_100", # Invalid script, should never be run.
"Idle_Leave_09",
"Idle_Leave_10",
"Idle_Error_00",
"Sync_Enter_02",
"Sync_Enter_03",
"Sync_Leave_04",
"Sync_Leave_15",
"Sync_Error_15",
"Sync_Error_16",
"Download_Enter_12",
"Download_Enter_13",
"Download_Leave_14",
"Download_Leave_25",
"Download_Error_25",
"ArtifactInstall_Enter_01",
"ArtifactInstall_Enter_02",
"ArtifactInstall_Leave_01",
"ArtifactInstall_Leave_03",
"ArtifactInstall_Error_01",
"ArtifactInstall_Error_02",
"ArtifactInstall_Error_99",
"ArtifactReboot_Enter_01",
"ArtifactReboot_Enter_11",
"ArtifactReboot_Leave_01",
"ArtifactReboot_Leave_89",
"ArtifactReboot_Leave_99",
"ArtifactReboot_Error_97",
"ArtifactReboot_Error_98",
"ArtifactCommit_Enter_01",
"ArtifactCommit_Enter_05",
"ArtifactCommit_Leave_01_extra_string",
"ArtifactCommit_Leave_91",
"ArtifactCommit_Error_91",
"ArtifactRollback_Enter_00",
"ArtifactRollback_Enter_01",
"ArtifactRollback_Leave_00",
"ArtifactRollback_Leave_01",
"ArtifactRollback_Error_15", # Error for this state doesn't exist, should never run.
"ArtifactRollbackReboot_Enter_00",
"ArtifactRollbackReboot_Enter_99",
"ArtifactRollbackReboot_Leave_01",
"ArtifactRollbackReboot_Leave_99",
"ArtifactRollbackReboot_Error_88", # Error for this state doesn't exist, should never run.
"ArtifactRollbackReboot_Error_99", # Error for this state doesn't exist, should never run.
"ArtifactFailure_Enter_22",
"ArtifactFailure_Enter_33",
"ArtifactFailure_Leave_44",
"ArtifactFailure_Leave_55",
"ArtifactFailure_Error_55", # Error for this state doesn't exist, should never run.
]
@pytest.mark.usefixtures("standard_setup_one_client_bootstrapped")
@pytest.mark.parametrize("description,test_set", REBOOT_TEST_SET)
def test_reboot_recovery(self, description, test_set):
if not env.host_string:
execute(
self.test_reboot_recovery,
description,
test_set,
hosts=get_mender_clients())
return
client = env.host_string
work_dir = "test_state_scripts.%s" % client
script_content = '#!/bin/sh\n\necho "$(basename $0)" >> /data/test_state_scripts.log\n'
script_failure_content = script_content + 'sync\necho b > /proc/sysrq-trigger\n' # flush to disk before killing
# This is only needed in the case: die commit-leave,
# otherwise the device will get stuck in a boot-reboot loop
script_reboot_once =(
'''#!/bin/sh
if [ $(grep -c $(basename $0) /data/test_state_scripts.log) -eq 0 ]; then
echo "$(basename $0)" >> /data/test_state_scripts.log && sync && echo b > /proc/sysrq-trigger
fi
echo "$(basename $0)" >> /data/test_state_scripts.log
exit 0''')
script_error_content = script_content + "exit 1"
broken_image = test_set.get("Rollback", False)
# Put artifact-scripts in the artifact.
artifact_script_dir = os.path.join(work_dir, "artifact-scripts")
if os.path.exists(work_dir):
shutil.rmtree(work_dir, ignore_errors=True)
os.mkdir(work_dir)
os.mkdir(artifact_script_dir)
new_rootfs = os.path.join(work_dir, "rootfs.ext4")
shutil.copy(conftest.get_valid_image(), new_rootfs)
ps = subprocess.Popen(
["debugfs", "-w", new_rootfs], stdin=subprocess.PIPE)
ps.stdin.write("cd /etc/mender\n" "mkdir scripts\n" "cd scripts\n")
ps.stdin.close()
ps.wait()
for script in test_set.get("ScriptOrder"):
if not script.startswith("Artifact"):
# Not an artifact script, skip this one.
continue
with open(os.path.join(artifact_script_dir, script), "w") as fd:
if script in test_set.get("RebootScripts", []):
fd.write(script_failure_content)
if script in test_set.get("RebootOnceScripts", []):
fd.write(script_reboot_once)
elif script in test_set.get("ErrorScripts", []):
fd.write(script_error_content)
else:
fd.write(script_content)
# Now create the artifact, and make the deployment.
device_id = Helpers.ip_to_device_id_map([client])[client]
with Helpers.RebootDetector() as reboot_detector:
deployment_id = common_update_procedure(
install_image=new_rootfs,
broken_image=broken_image,
verify_status=True,
devices=[device_id],
scripts=[artifact_script_dir])[0]
try:
orig_part = Helpers.get_active_partition()
# handle case where the client has not finished the update
# path on the committed partition, but new partition is installed,
# thus we will not get a valid entrypoint into the uncommitted parition(reboot_leave)
# and the client will thus reboot straight after starting, and u-boot will
# fall back to the committed partition
if test_set.get("DoubleReboot", False):
reboot_detector.verify_reboot_performed(number_of_reboots=2)
else:
reboot_detector.verify_reboot_performed()
# wait until the last script has been run
script_logs = ""
timeout = time.time() + 60*60
while timeout >= time.time():
time.sleep(3)
script_logs = run("cat /data/test_state_scripts.log")
if test_set.get("ExpectedScriptFlow")[-1] in script_logs:
break
# make sure the client ended up on the right partition
if "OtherPartition" in test_set.get("ExpectedFinalPartition", []):
assert orig_part != Helpers.get_active_partition()
else:
assert orig_part == Helpers.get_active_partition()
assert script_logs.split() == test_set.get("ExpectedScriptFlow")
finally:
run("systemctl stop mender && "
+ "rm -f /data/test_state_scripts.log && "
+ "rm -rf /etc/mender/scripts && "
+ "rm -rf /data/mender/scripts && "
+ "systemctl start mender")
@MenderTesting.slow
@pytest.mark.usefixtures("standard_setup_one_client_bootstrapped")
@pytest.mark.parametrize("description,test_set", TEST_SETS)
def test_state_scripts(self, description, test_set):
"""Test that state scripts are executed in right order, and that errors
are treated like they should."""
if not env.host_string:
execute(self.test_state_scripts, description, test_set,
hosts=get_mender_clients())
return
client = env.host_string
work_dir = "test_state_scripts.%s" % client
deployment_id = None
try:
script_content = '#!/bin/sh\n\necho "$(basename $0)" >> /data/test_state_scripts.log\n'
script_failure_content = script_content + "exit 1\n"
old_active = Helpers.get_active_partition()
# Make rootfs-scripts and put them in rootfs image.
rootfs_script_dir = os.path.join(work_dir, "rootfs-scripts")
shutil.rmtree(work_dir, ignore_errors=True)
os.mkdir(work_dir)
os.mkdir(rootfs_script_dir)
new_rootfs = os.path.join(work_dir, "rootfs.ext4")
shutil.copy(conftest.get_valid_image(), new_rootfs)
ps = subprocess.Popen(["debugfs", "-w", new_rootfs], stdin=subprocess.PIPE)
ps.stdin.write("cd /etc/mender\n"
"mkdir scripts\n"
"cd scripts\n")
with open(os.path.join(rootfs_script_dir, "version"), "w") as fd:
if test_set.get('CorruptEtcScriptVersionInUpdate'):
fd.write("1000")
else:
fd.write("2")
ps.stdin.write("rm version\n")
ps.stdin.write("write %s version\n" % os.path.join(rootfs_script_dir, "version"))
for script in self.scripts:
if script.startswith("Artifact"):
# This is a script for the artifact, skip this one.
continue
with open(os.path.join(rootfs_script_dir, script), "w") as fd:
if script in test_set['FailureScript']:
fd.write(script_failure_content)
else:
fd.write(script_content)
os.fchmod(fd.fileno(), 0755)
ps.stdin.write("write %s %s\n" % (os.path.join(rootfs_script_dir, script), script))
ps.stdin.close()
ps.wait()
# Write this again in case it was corrupted above.
with open(os.path.join(rootfs_script_dir, "version"), "w") as fd:
fd.write("2")
# Then copy them to QEMU host.
# Zip them all up to avoid having to copy each and every file, which is
# quite slow.
subprocess.check_call(["tar", "czf", "../rootfs-scripts.tar.gz", "."], cwd=rootfs_script_dir)
# Stop client first to avoid race conditions.
run("systemctl stop mender")
try:
put(os.path.join(work_dir, "rootfs-scripts.tar.gz"),
remote_path="/")
run("mkdir -p cd /etc/mender/scripts && "
+ "cd /etc/mender/scripts && "
+ "tar xzf /rootfs-scripts.tar.gz && "
+ "rm -f /rootfs-scripts.tar.gz")
finally:
run("systemctl start mender")
# Put artifact-scripts in the artifact.
artifact_script_dir = os.path.join(work_dir, "artifact-scripts")
os.mkdir(artifact_script_dir)
for script in self.scripts:
if not script.startswith("Artifact"):
# Not an artifact script, skip this one.
continue
with open(os.path.join(artifact_script_dir, script), "w") as fd:
if script in test_set['FailureScript']:
fd.write(script_failure_content)
else:
fd.write(script_content)
if test_set.get("SimulateBootFailureIn") == script:
# Simulate that boot failed by immediately forcing a
# rollback with U-Boot.
fd.write("fw_setenv bootcount 1\n")
if test_set.get("CorruptDataScriptVersionIn") == script:
fd.write("printf '1000' > /data/mender/scripts/version\n")
# Now create the artifact, and make the deployment.
device_id = Helpers.ip_to_device_id_map([client])[client]
broken_artifact_id = test_set.get('BrokenArtifactId')
if broken_artifact_id is None:
broken_artifact_id = False
deployment_id = common_update_procedure(install_image=new_rootfs,
broken_image=broken_artifact_id,
verify_status=False,
devices=[device_id],
scripts=[artifact_script_dir])[0]
if test_set['ExpectedStatus'] is None:
# In this case we don't expect the deployment to even be
# attempted, presumably due to failing Idle/Sync/Download
# scripts on the client. So no deployment checking. Just wait
# until there is at least one Error script in the log, which
# will always be the case if ExpectedStatus is none (since one
# of them is preventing the update from being attempted).
def fetch_info(cmd_list):
all_output = ""
for cmd in cmd_list:
with settings(warn_only=True):
output = run(cmd)
logger.error("%s:\n%s" % (cmd, output))
all_output += "%s\n" % output
return all_output
info_query = [
"cat /data/test_state_scripts.log 1>&2",
"journalctl -u mender",
"top -n5 -b",
"ls -l /proc/`pgrep mender`/fd",
"for fd in /proc/`pgrep mender`/fdinfo/*; do echo $fd:; cat $fd; done",
]
starttime = time.time()
while starttime + 60*60 >= time.time():
with settings(warn_only=True):
result = run("grep Error /data/test_state_scripts.log")
if result.succeeded:
# If it succeeds, stop.
break
else:
fetch_info(info_query)
time.sleep(10)
continue
else:
info = fetch_info(info_query)
pytest.fail('Waited too long for "Error" to appear in log:\n%s' % info)
else:
deploy.check_expected_statistics(deployment_id, test_set['ExpectedStatus'], 1)
# Always give the client a little bit of time to settle in the base
# state after an update.
time.sleep(10)
output = run("cat /data/test_state_scripts.log")
self.verify_script_log_correct(test_set, output.split('\n'))
new_active = Helpers.get_active_partition()
should_switch_partition = (test_set['ExpectedStatus'] == "success")
# TODO
if test_set.get('SwapPartitionExpectation') is not None:
should_switch_partition = not should_switch_partition
if should_switch_partition:
assert old_active != new_active, "Device did not switch partition as expected!"
else:
assert old_active == new_active, "Device switched partition which was not expected!"
finally:
shutil.rmtree(work_dir, ignore_errors=True)
if deployment_id:
try:
deploy.abort(deployment_id)
except:
pass
run("systemctl stop mender && "
+ "rm -f /data/test_state_scripts.log && "
+ "rm -rf /etc/mender/scripts && "
+ "rm -rf /data/mender/scripts && "
+ "systemctl start mender")
def verify_script_log_correct(self, test_set, log):
expected_order = test_set['ScriptOrder']
# Iterate down the list of expected scripts, and make sure that the log
# follows the same list.
# Position in log list.
log_pos = 0
# Position in script list from test_set.
expected_pos = 0
# Position of the most recent first Idle script.
idle_pos = 0
try:
while expected_pos < len(expected_order):
if len(log[log_pos]) > 0:
# Make sure we are at right script.
assert expected_order[expected_pos] == log[log_pos]
log_pos = log_pos + 1
expected_pos = expected_pos + 1
if (log_pos < len(log)
and log[log_pos - 1].startswith("Sync_")
and log[log_pos].startswith("Idle_")
and not expected_order[expected_pos].startswith("Idle_")):
# The Idle/Sync sequence is allowed to "wrap around" and start
# over, because it may take a few rounds of checking before the
# deployment is ready for the client.
expected_pos = idle_pos
if (expected_pos < len(expected_order)
and not expected_order[expected_pos - 1].startswith("Idle_")
and expected_order[expected_pos].startswith("Idle_")):
# New Idle sequence entered.
idle_pos = expected_pos
except:
print("Exception in verify_script_log_correct: log of scripts = '%s'"
% "\n".join(log))
print("scripts we expected = '%s'"
% "\n".join(expected_order))
raise
|
|
# coding=utf-8
"""
Behave BDD runner.
See _bdd_utils#get_path_by_env for information how to pass list of features here.
Each feature could be file, folder with feature files or folder with "features" subfolder
Other args are tag expressionsin format (--tags=.. --tags=..).
See https://pythonhosted.org/behave/behave.html#tag-expression
"""
import functools
import glob
import sys
import os
import traceback
from behave.formatter.base import Formatter
from behave.model import Step, ScenarioOutline, Feature, Scenario
from behave.tag_expression import TagExpression
import re
import _bdd_utils
from distutils import version
from behave import __version__ as behave_version
from _jb_utils import VersionAgnosticUtils
_MAX_STEPS_SEARCH_FEATURES = 5000 # Do not look for features in folder that has more that this number of children
_FEATURES_FOLDER = 'features' # "features" folder name.
__author__ = 'Ilya.Kazakevich'
from behave import configuration, runner
def _get_dirs_to_run(base_dir_to_search):
"""
Searches for "features" dirs in some base_dir
:return: list of feature dirs to run
:rtype: list
:param base_dir_to_search root directory to search (should not have too many children!)
:type base_dir_to_search str
"""
result = set()
for (step, (folder, sub_folders, files)) in enumerate(os.walk(base_dir_to_search)):
if os.path.basename(folder) == _FEATURES_FOLDER and os.path.isdir(folder):
result.add(os.path.abspath(folder))
if step == _MAX_STEPS_SEARCH_FEATURES: # Guard
err = "Folder {0} is too deep to find any features folder. Please provider concrete folder".format(
base_dir_to_search)
raise Exception(err)
return list(result)
def _merge_hooks_wrapper(*hooks):
"""
Creates wrapper that runs provided behave hooks sequentally
:param hooks: hooks to run
:return: wrapper
"""
# TODO: Wheel reinvented!!!!
def wrapper(*args, **kwargs):
for hook in hooks:
hook(*args, **kwargs)
return wrapper
class _RunnerWrapper(runner.Runner):
"""
Wrapper around behave native wrapper. Has nothing todo with BddRunner!
We need it to support dry runs (to fetch data from scenarios) and hooks api
"""
def __init__(self, config, hooks):
"""
:type config configuration.Configuration
:param config behave configuration
:type hooks dict
:param hooks hooks in format "before_scenario" => f(context, scenario) to load after/before hooks, provided by user
"""
super(_RunnerWrapper, self).__init__(config)
self.dry_run = False
"""
Does not run tests (only fetches "self.features") if true. Runs tests otherwise.
"""
self.__hooks = hooks
def load_hooks(self, filename='environment.py'):
"""
Overrides parent "load_hooks" to add "self.__hooks"
:param filename: env. file name
"""
super(_RunnerWrapper, self).load_hooks(filename)
for (hook_name, hook) in self.__hooks.items():
hook_to_add = hook
if hook_name in self.hooks:
user_hook = self.hooks[hook_name]
if hook_name.startswith("before"):
user_and_custom_hook = [user_hook, hook]
else:
user_and_custom_hook = [hook, user_hook]
hook_to_add = _merge_hooks_wrapper(*user_and_custom_hook)
self.hooks[hook_name] = hook_to_add
def run_model(self, features=None):
"""
Overrides parent method to stop (do nothing) in case of "dry_run"
:param features: features to run
:return:
"""
if self.dry_run: # To stop further execution
return
return super(_RunnerWrapper, self).run_model(features)
def clean(self):
"""
Cleans runner after dry run (clears hooks, features etc). To be called before real run!
"""
self.dry_run = False
self.hooks.clear()
self.features = []
class _BehaveRunner(_bdd_utils.BddRunner):
"""
BddRunner for behave
"""
def __process_hook(self, is_started, context, element):
"""
Hook to be installed. Reports steps, features etc.
:param is_started true if test/feature/scenario is started
:type is_started bool
:param context behave context
:type context behave.runner.Context
:param element feature/suite/step
"""
element.location.file = element.location.filename # To preserve _bdd_utils contract
utils = VersionAgnosticUtils()
if isinstance(element, Step):
# Process step
step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
duration_ms = element.duration * 1000
if is_started:
self._test_started(step_name, element.location)
elif element.status == 'passed':
self._test_passed(step_name, duration_ms)
elif element.status == 'failed':
# Correct way is to use element.errormessage
# but assertions do not have trace there (due to Behave internals)
# do, we collect it manually
error_message = element.error_message
fetch_log = not error_message # If no error_message provided, need to fetch log manually
trace = ""
if isinstance(element.exception, AssertionError):
trace = self._collect_trace(element, utils)
# May be empty https://github.com/behave/behave/issues/468 for some exceptions
if not trace and not error_message:
try:
error_message = traceback.format_exc()
except AttributeError:
# Exception may have empty stracktrace, and traceback.format_exc() throws
# AttributeError in this case
trace = self._collect_trace(element, utils)
if not error_message:
# Format exception as last resort
error_message = element.exception
message_as_string = utils.to_unicode(error_message)
if fetch_log and self.__real_runner.config.log_capture:
message_as_string += u"\n" + utils.to_unicode(self.__real_runner.log_capture.getvalue())
self._test_failed(step_name, message_as_string, trace, duration=duration_ms)
elif element.status == 'undefined':
self._test_undefined(step_name, element.location)
else:
self._test_skipped(step_name, element.status, element.location)
elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
# To process scenarios with undefined/skipped tests
for step in element.steps:
assert isinstance(step, Step), step
if step.status not in ['passed', 'failed']: # Something strange, probably skipped or undefined
self.__process_hook(False, context, step)
self._feature_or_scenario(is_started, element.name, element.location)
elif isinstance(element, ScenarioOutline):
self._feature_or_scenario(is_started, str(element.examples), element.location)
else:
self._feature_or_scenario(is_started, element.name, element.location)
def _collect_trace(self, element, utils):
return u"".join([utils.to_unicode(l) for l in traceback.format_tb(element.exc_traceback)])
def __init__(self, config, base_dir):
"""
:type config configuration.Configuration
"""
super(_BehaveRunner, self).__init__(base_dir)
self.__config = config
# Install hooks
self.__real_runner = _RunnerWrapper(config, {
"before_feature": functools.partial(self.__process_hook, True),
"after_feature": functools.partial(self.__process_hook, False),
"before_scenario": functools.partial(self.__process_hook, True),
"after_scenario": functools.partial(self.__process_hook, False),
"before_step": functools.partial(self.__process_hook, True),
"after_step": functools.partial(self.__process_hook, False)
})
def _run_tests(self):
self.__real_runner.run()
def __filter_scenarios_by_args(self, scenario):
"""
Filters out scenarios that should be skipped by tags or scenario names
:param scenario scenario to check
:return true if should pass
"""
assert isinstance(scenario, Scenario), scenario
# TODO: share with lettuce_runner.py#_get_features_to_run
expected_tags = self.__config.tags
scenario_name_re = self.__config.name_re
if scenario_name_re and not scenario_name_re.match(scenario.name):
return False
if not expected_tags:
return True # No tags nor names are required
return isinstance(expected_tags, TagExpression) and expected_tags.check(scenario.tags)
def _get_features_to_run(self):
self.__real_runner.dry_run = True
self.__real_runner.run()
features_to_run = self.__real_runner.features
self.__real_runner.clean() # To make sure nothing left after dry run
# Change outline scenario skeletons with real scenarios
for feature in features_to_run:
assert isinstance(feature, Feature), feature
scenarios = []
for scenario in feature.scenarios:
if isinstance(scenario, ScenarioOutline):
scenarios.extend(scenario.scenarios)
else:
scenarios.append(scenario)
feature.scenarios = filter(self.__filter_scenarios_by_args, scenarios)
return features_to_run
if __name__ == "__main__":
# TODO: support all other params instead
class _Null(Formatter):
"""
Null formater to prevent stdout output
"""
pass
command_args = list(filter(None, sys.argv[1:]))
if command_args:
if "--junit" in command_args:
raise Exception("--junit report type for Behave is unsupported in PyCharm. \n "
"See: https://youtrack.jetbrains.com/issue/PY-14219")
_bdd_utils.fix_win_drive(command_args[0])
(base_dir, scenario_names, what_to_run) = _bdd_utils.get_what_to_run_by_env(os.environ)
for scenario_name in scenario_names:
command_args += ["-n", re.escape(scenario_name)] # TODO : rewite pythonic
my_config = configuration.Configuration(command_args=command_args)
# Temporary workaround to support API changes in 1.2.5
if version.LooseVersion(behave_version) >= version.LooseVersion("1.2.5"):
from behave.formatter import _registry
_registry.register_as("com.intellij.python.null",_Null)
else:
from behave.formatter import formatters
formatters.register_as(_Null, "com.intellij.python.null")
my_config.format = ["com.intellij.python.null"] # To prevent output to stdout
my_config.reporters = [] # To prevent summary to stdout
my_config.stdout_capture = False # For test output
my_config.stderr_capture = False # For test output
features = set()
for feature in what_to_run:
if os.path.isfile(feature) or glob.glob(
os.path.join(feature, "*.feature")): # File of folder with "features" provided, load it
features.add(feature)
elif os.path.isdir(feature):
features |= set(_get_dirs_to_run(feature)) # Find "features" subfolder
my_config.paths = list(features)
if what_to_run and not my_config.paths:
raise Exception("Nothing to run in {0}".format(what_to_run))
_BehaveRunner(my_config, base_dir).run()
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v2 EC2 Credentials action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class CreateEC2Creds(command.ShowOne):
_description = _("Create EC2 credentials")
def get_parser(self, prog_name):
parser = super(CreateEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_(
'Create credentials in project '
'(name or ID; default: current authenticated project)'
),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_(
'Create credentials for user '
'(name or ID; default: current authenticated user)'
),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
else:
# Get the project from the current auth
project = self.app.client_manager.auth_ref.project_id
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = self.app.client_manager.auth_ref.user_id
creds = identity_client.ec2.create(user, project)
info = {}
info.update(creds._info)
if 'tenant_id' in info:
info.update(
{'project_id': info.pop('tenant_id')}
)
return zip(*sorted(info.items()))
class DeleteEC2Creds(command.Command):
_description = _("Delete EC2 credentials")
def get_parser(self, prog_name):
parser = super(DeleteEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_keys',
metavar='<access-key>',
nargs='+',
help=_('Credentials access key(s)'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Delete credentials for user (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = self.app.client_manager.auth_ref.user_id
result = 0
for access_key in parsed_args.access_keys:
try:
identity_client.ec2.delete(user, access_key)
except Exception as e:
result += 1
LOG.error(_("Failed to delete EC2 credentials with "
"access key '%(access_key)s': %(e)s"),
{'access_key': access_key, 'e': e})
if result > 0:
total = len(parsed_args.access_keys)
msg = (_("%(result)s of %(total)s EC2 keys failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListEC2Creds(command.Lister):
_description = _("List EC2 credentials")
def get_parser(self, prog_name):
parser = super(ListEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Filter list by user (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = self.app.client_manager.auth_ref.user_id
columns = ('access', 'secret', 'tenant_id', 'user_id')
column_headers = ('Access', 'Secret', 'Project ID', 'User ID')
data = identity_client.ec2.list(user)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class ShowEC2Creds(command.ShowOne):
_description = _("Display EC2 credentials details")
def get_parser(self, prog_name):
parser = super(ShowEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Show credentials for user (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = self.app.client_manager.auth_ref.user_id
creds = identity_client.ec2.get(user, parsed_args.access_key)
info = {}
info.update(creds._info)
if 'tenant_id' in info:
info.update(
{'project_id': info.pop('tenant_id')}
)
return zip(*sorted(info.items()))
|
|
#!/usr/bin/python
#------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2014 Jordi Arnavat
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#------------------------------------------------------------------------------
import boto
import boto.ec2
from boto.exception import EC2ResponseError
import time
import subprocess
import urllib2
import re
import argparse
import os.path
import sys
import shutil
import logging
SCRIPT_NAME = 'AWSiRecovery'
logger = logging.getLogger(SCRIPT_NAME)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
if not os.path.exists('logs'):
os.makedirs('logs')
fh = logging.FileHandler('logs/awsirecovery.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
def getMyIP():
request = urllib2.Request('http://whatsmyip.net/')
#ie7 user-agent just in case...
request.add_header('User-Agent','Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)')
opener = urllib2.build_opener()
return re.findall( r'[0-9]+(?:\.[0-9]+){3}', opener.open(request).read() )[0]
class AwsWrapper(object):
def __init__(self, region):
self.conn = boto.ec2.connect_to_region(region)
class AwsWrapperExtended(AwsWrapper):
parameter = 'state'
def waitUntil(self, object , state):
while getattr(object,self.parameter) != state:
time.sleep(8)
object.update()
logger.debug('%s: %s'%(object, getattr(object,self.parameter)))
class Instance(AwsWrapperExtended):
def __init__(self,id=None,name=None,region='eu-west-1'):
super(Instance, self).__init__(region)
if id:
self.get(id)
def get(self,id):
self.instance = self.conn.get_all_instances(instance_ids=id)[0].instances[0]
def waitUntil(self,state):
super(Instance, self).waitUntil(self.instance,state)
def create(self, ami_id , tags={} , **kwargs):
try:
reservation = self.conn.run_instances(ami_id,**kwargs)
except EC2ResponseError, err:
logger.error('Error creating instance: %s'%str(err))
return
self.instance = reservation.instances[0]
self.waitUntil('running')
for name, value in tags:
self.instance.add_tag(tag, value)
def stop(self):
self.instance.stop()
self.waitUntil('stopped')
def terminate(self):
self.conn.terminate_instances(instance_ids=[self.instance.id])
self.waitUntil('terminated')
del(self.instance)
def getMappedDevices(self):
return self.instance.block_device_mapping.keys()
def getVolume(self, mountpoint):
return Volume(self.instance.block_device_mapping.get(mountpoint).volume_id)
class Volume(AwsWrapperExtended):
parameter = 'status'
def __init__(self,id,region='eu-west-1'):
super(Volume, self).__init__(region)
self.get(id)
def waitUntil(self,state):
super(Volume, self).waitUntil(self.volume,state)
def get(self,id):
self.volume = self.conn.get_all_volumes(volume_ids=id)[0]
def attach(self,instance_id, mountpoint):
logger.info("Attaching volume %s from instance %s"%(self.volume.id,instance_id))
self.conn.attach_volume(self.volume.id, instance_id = instance_id, device=mountpoint)
self.waitUntil("in-use")
def detach(self,instance_id, mountpoint):
logger.info("Detaching volume %s from instance %s"%(self.volume.id,instance_id))
self.conn.detach_volume(self.volume.id, instance_id = instance_id, device=mountpoint)
self.waitUntil("available")
class RecoverySecurityGroup(AwsWrapper):
def __init__(self,name=None,region='eu-west-1'):
super(RecoverySecurityGroup, self).__init__(region)
self.name = '%s:SecurityGroup'%SCRIPT_NAME
if name:
self.name = name
def create(self):
try:
sg = self.conn.get_all_security_groups(groupnames=[self.name])
self.securityGroup = sg[0]
except:
logger.info('Creating temporary security group for rescuing')
self.securityGroup = self.conn.create_security_group(self.name, self.name)
self.securityGroup.authorize('tcp', 22, 22, getMyIP()+'/32')
def delete(self):
try:
logger.info('Deleting %s'%self.securityGroup)
self.conn.delete_security_group(name=self.securityGroup.name, group_id= self.securityGroup.id)
except EC2ResponseError, err:
logger.warning('Error deleting %s: %s'%(self.securityGroup,str(err)))
def recoverInstance(instance_id, keyname, keypair):
sg = RecoverySecurityGroup()
sg.create()
logger.info('Getting instance:%s to recover'%instance_id)
instance_to_recover = Instance(id=instance_id)
recoverInstance = Instance()
logger.info('Creating temporary instance')
recoverInstance.create('ami-892fe1fe',
key_name = keyname,
instance_type = 't2.micro',
security_group_ids =[sg.securityGroup.id],
subnet_id = instance_to_recover.instance.subnet_id )
logger.info('Stopping instance:%s'%instance_id)
instance_to_recover.stop()
mountpoint = instance_to_recover.getMappedDevices()[0]
volume = instance_to_recover.getVolume(mountpoint)
volume.detach(instance_to_recover.instance.id, mountpoint)
volume.attach(recoverInstance.instance.id,'/dev/sdh')
executePlaybook(os.path.join('playbooks','recover_instance.yml'),recoverInstance.instance, keypair,'ec2-user')
recoverInstance.stop()
volume.detach(recoverInstance.instance.id,'/dev/sdh')
volume.attach(instance_to_recover.instance.id, mountpoint)
recoverInstance.terminate()
sg.delete()
def executePlaybook(playbook, instance, private_key, remote_user):
ansible_cmd = 'ansible-playbook -i %s, %s --private-key=%s -u %s'%(instance.public_dns_name,playbook,private_key,remote_user)
subprocess.call('ssh-keyscan -t rsa %s >> ~/.ssh/known_hosts'%instance.public_dns_name, shell=True)
logger.info("Executing Ansible: %s"%ansible_cmd)
subprocess.call(ansible_cmd, shell=True)
subprocess.call('ssh-keygen -R %s'%instance.public_dns_name, shell=True)
subprocess.call('ssh-keygen -R %s'%instance.ip_address, shell=True)
def test(keyname, keypair):
sg = RecoverySecurityGroup(name="testInstanceSecurityGroup")
sg.create()
testInstance = Instance()
testInstance.create('ami-892fe1fe',
key_name = keyname,
instance_type = 't2.micro',
security_group_ids=[sg.securityGroup.id])
recoverInstance(testInstance.instance.id, keyname, keypair)
testInstance.terminate()
sg.delete()
def checkIfIsFile(args,key):
if not os.path.isfile(getattr(args,key)):
logger.error('File %s not found'%getattr(args,key))
sys.exit(1)
logger.info('File %s found'%getattr(args,key))
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers( dest='action',
title='Options',
description='',
help='additional help')
regions = ['us-east-1','us-east-2','us-west-1','eu-west-1','ap-southeast-1','ap-southeast-2','ap-northeast-1','sa-east-1']
recoverParser = subparsers.add_parser('recover', help='recover instance in aws')
recoverParser.add_argument('-i', '--instance',
help='You need to specify the id of the instance that you want to recover access',
required=True)
recoverParser.add_argument('-n', '--keyname',
help='Amazon Keypair name that will use to create a temporary instance in order to recover the instance which the one you have lost access',
required=True)
recoverParser.add_argument('-k', '--keypair',
help='Amazon Keypair that will use to create a temporary instance in order to recover the instance which the one you have lost access',
required=True)
recoverParser.add_argument('-p', '--public_key',
help='SSH public key for the new user that will be created in the instance to recover',
required=True)
recoverParser.add_argument('-r', '--region',
choices= regions,
default='eu-west-1')
testParser = subparsers.add_parser('test', help='Test unit for the library. It will create a new temporary instance to recover in your aws account and it will grant access to it')
testParser.add_argument('-n', '--keyname',
help='Amazon Keypair name that will use to create a temporary instance in order to recover the instance which the one you have lost access',
required=True)
testParser.add_argument('-k', '--keypair',
help='Amazon Keypair that will use to create a temporary instance in order to recover the instance which the one you have lost access',
required=True)
testParser.add_argument('-p', '--public_key',
help='SSH public key for the new user that will be created in the instance to recover',
required=True)
testParser.add_argument('-r', '--region',
choices=regions,
default='eu-west-1')
args = parser.parse_args()
checkIfIsFile(args,'keypair')
checkIfIsFile(args,'public_key')
if not os.path.exists('tmp'):
os.makedirs('tmp')
logger.info('Copying public_key [%s] into tmp/ folder'%args.public_key)
shutil.copyfile(args.public_key,os.path.join('tmp','public_key'))
logger.info('Executing %s'%(args.action))
if args.action == "recover":
logger.info('Executing %s'%args)
recoverInstance(args.instance, args.keyname, args.keypair)
elif args.action == "test":
logger.info('Executing %s'%args)
test(args.keyname, args.keypair)
os.remove(os.path.join('tmp','public_key'))
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %default)")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.options.bitcoind = os.getenv("BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/bitcoind' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv("BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/bitcoin-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, get_datadir_path(self.options.tmpdir, i), rpchost=rpchost, timewait=timewait, bitcoind=binary[i], bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr=''):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr)
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backward compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.bitcoind, "-datadir=" + datadir]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, get_datadir_path(self.options.cachedir, i), extra_conf=["bind=127.0.0.1"], extra_args=[], rpchost=None, timewait=None, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallets', 'chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
|
from datetime import date
import flask
import pymysql.cursors
from donut.auth_utils import check_permission, get_permissions, get_user_id
from donut.constants import Gender
from donut.default_permissions import Permissions
from .permissions import DirectoryPermissions, ManageMembersPermissions
HIDDEN_FIELDS = \
set(('username', 'uid', 'birthday', 'phone_string', 'hometown_string'))
def get_hidden_fields(viewer_name, viewee_id):
"""
Returns a set of strings corresponding to fields
on view_user page for viewee that viewer should not see
"""
if viewer_name is not None:
is_me = get_user_id(viewer_name) == viewee_id
if is_me or check_permission(
viewer_name, DirectoryPermissions.HIDDEN_SEARCH_FIELDS):
return set()
return HIDDEN_FIELDS
def get_user(user_id):
# TODO: remove timezone after COVID
query = """
SELECT
uid, first_name, middle_name, last_name, preferred_name,
gender, gender_custom, birthday, entry_year, graduation_year,
msc, building_name, room, address, city, state, zip, country,
email, phone, username, timezone, extension IS NOT NULL as image
FROM members
NATURAL LEFT JOIN buildings
NATURAL LEFT JOIN images
NATURAL LEFT JOIN users
WHERE user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [user_id])
user = cursor.fetchone()
if user is not None:
if user['gender_custom']:
user['gender_string'] = user['gender_custom']
elif user['gender'] == Gender.MALE.value:
user['gender_string'] = 'Male'
elif user['gender'] == Gender.FEMALE.value:
user['gender_string'] = 'Female'
phone = user['phone']
if phone:
if len(phone) == 10 and all(['0' <= d <= '9' for d in phone]):
user['phone_string'] = '(' + phone[:3] + ') '
user['phone_string'] += phone[3:6] + '-' + phone[6:]
else: #what sort of phone number is that
user['phone_string'] = phone
place_names = [(user[field] or '').strip()
for field in ['city', 'state', 'country']]
user['hometown_string'] = ', '.join(filter(None, place_names))
option_query = """
SELECT option_name, option_type
FROM member_options NATURAL JOIN options
WHERE user_id = %s ORDER BY option_type, option_name
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(option_query, [user_id])
user['options'] = cursor.fetchall()
groups_query = """
SELECT group_name, pos_name
FROM current_position_holders NATURAL JOIN positions NATURAL JOIN groups
WHERE pos_id NOT IN (SELECT pos_id FROM house_positions)
AND NOT (group_name LIKE 'ug-%%' AND pos_name = 'Admin')
AND user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(groups_query, [user_id])
user['positions'] = cursor.fetchall()
houses_query = """
SELECT group_name, pos_name
FROM house_positions NATURAL JOIN current_position_holders
WHERE user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(houses_query, [user_id])
user['houses'] = cursor.fetchall()
return user
SEARCH_NAME = "CONCAT(IFNULL(preferred_name, ''), ' ', first_name, ' ', last_name)"
def make_name_query(search):
"""
Query is split on spaces, so 'abc def' would
find all users whose names contain 'abc' and 'def',
case insensitive.
Returns arguments to substitute and SQL query.
"""
terms = search.split(' ')
#INSTR is case-insensitive
return terms, ' AND '.join(
['INSTR(' + SEARCH_NAME + ', %s) > 0'] * len(terms))
def get_users_by_name_query(search):
"""
Finds users whose names match the given query.
Max 10 users returned, in alphabetical order.
"""
query = 'SELECT user_id, full_name, graduation_year FROM members NATURAL JOIN members_full_name WHERE '
search, name_query = make_name_query(search)
query += name_query
query += ' ORDER BY LOWER(full_name) LIMIT 10'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, search)
return cursor.fetchall()
def get_image(user_id):
query = 'SELECT extension, image FROM images WHERE user_id = %s'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [user_id])
image = cursor.fetchone()
if image is None:
raise Exception('No image found for user')
return image['extension'], image['image']
def execute_search(**kwargs):
query = """
SELECT DISTINCT
user_id, full_name, graduation_year,
extension IS NOT NULL as image
FROM members
NATURAL JOIN members_full_name
NATURAL LEFT JOIN (house_positions NATURAL JOIN current_position_holders)
NATURAL LEFT JOIN member_options
NATURAL LEFT JOIN buildings
NATURAL LEFT JOIN users
NATURAL LEFT JOIN images
"""
query += ' WHERE INSTR(email, %s) > 0'
substitution_arguments = [kwargs['email'].lower()]
if kwargs['username']:
query += ' AND INSTR(username, %s) > 0'
substitution_arguments.append(kwargs['username'])
if kwargs['name']:
name_search, name_query = make_name_query(kwargs['name'])
query += ' AND ' + name_query
substitution_arguments += name_search
if kwargs['house_id']:
query += ' AND group_id = %s'
substitution_arguments.append(kwargs['house_id'])
if kwargs['option_id']:
query += ' AND option_id = %s'
substitution_arguments.append(kwargs['option_id'])
if kwargs['building_id']:
query += ' AND building_id = %s'
substitution_arguments.append(kwargs['building_id'])
if kwargs['grad_year']:
query += ' AND graduation_year = %s'
substitution_arguments.append(kwargs['grad_year'])
# TODO: remove timezone after COVID
if kwargs['timezone_from']:
query += ' AND timezone >= %s'
substitution_arguments.append(kwargs['timezone_from'])
if kwargs['timezone_to']:
query += ' AND timezone <= %s'
substitution_arguments.append(kwargs['timezone_to'])
if 'offset' in kwargs:
query += ' ORDER BY LOWER(last_name), LOWER(full_name) LIMIT %s, %s'
substitution_arguments.append(kwargs['offset'])
substitution_arguments.append(kwargs['per_page'])
else:
query = 'SELECT COUNT(*) AS cnt FROM (' + query + ') sub'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, substitution_arguments)
if 'offset' in kwargs:
return cursor.fetchall()
else:
return cursor.fetchone()['cnt']
def members_unique_values(field, string):
query = 'SELECT DISTINCT ' + field + ' FROM members WHERE ' + field + ' IS NOT NULL'
if string:
query += ' AND LENGTH(TRIM(' + field + '))'
query += ' ORDER BY ' + field
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return [member[field] for member in cursor.fetchall()]
def get_houses():
query = 'SELECT * FROM house_groups ORDER BY group_name'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def get_options():
query = 'SELECT * FROM options ORDER BY option_name'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def get_residences():
query = 'SELECT DISTINCT building_id, building_name FROM members NATURAL JOIN buildings ORDER BY building_name'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def get_grad_years():
return members_unique_values('graduation_year', False)
def get_manage_members_houses():
username = flask.session.get('username')
if not username: return ()
permissions = check_permission(username, set(ManageMembersPermissions))
return tuple(permission.name.title()
for permission in ManageMembersPermissions
if permission in permissions)
def get_house_member_positions(house):
query = """
SELECT pos_id, pos_name FROM house_positions
WHERE group_name = %s
ORDER BY pos_name
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, house)
return cursor.fetchall()
def get_members(pos_id):
query = """
SELECT user_id, full_name, hold_id
FROM members NATURAL JOIN members_full_name
NATURAL JOIN current_position_holders
WHERE pos_id = %s AND graduation_year > %s
ORDER BY last_name, full_name
"""
today = date.today()
year_start = today.year - (1 if today.month < 7 else 0)
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (pos_id, year_start))
return cursor.fetchall()
def get_position_house(pos_id):
query = 'SELECT group_name FROM house_positions WHERE pos_id = %s'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, pos_id)
res = cursor.fetchone()
return res and res['group_name']
def get_held_position_house(hold_id):
query = 'SELECT pos_id FROM position_holders WHERE hold_id = %s'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, hold_id)
res = cursor.fetchone()
return res and get_position_house(res['pos_id'])
|
|
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
import miscIO
import sys
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def lookAtHeaderTokens(aTokens):
patientList = []
typeDict = {}
for a in aTokens:
if (a.upper().startswith("TCGA-")):
patientID = a[8:12].upper()
if (patientID not in patientList):
patientList += [patientID]
if (len(a) >= 15):
typeID = a[13:15]
if (typeID not in typeDict.keys()):
typeDict[typeID] = 0
typeDict[typeID] += 1
else:
print " WARNING : no typeID ??? <%s> " % a
if (len(patientList) > 0):
print " "
print " # of unique patients : ", len(patientList)
print " sample type counts : ", typeDict
print " "
print " "
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def lookAtLine(aLine):
if (1):
print len(aLine)
numTab = 0
numLF = 0
numCR = 0
numSpace = 0
numDigit = 0
numLetter = 0
numLinesOut = 0
i1 = 0
for ii in range(len(aLine)):
ordVal = ord(aLine[ii])
if (1):
if (ordVal == 9):
# this is a tab ...
numTab += 1
elif (ordVal == 10):
numLF += 1
elif (ordVal == 13):
numCR += 1
elif (ordVal == 32):
numSpace += 1
elif ((ordVal >= 48 and ordVal <= 57) or (ordVal == 46)):
numDigit += 1
elif ((ordVal >= 65 and ordVal <= 90) or (ordVal >= 97 and ordVal <= 122)):
numLetter += 1
elif (ordVal < 32 or ordVal > 126):
print " %6d %3d " % (ii, ordVal)
else:
# print " %6d <%s> %3d " % ( ii, aLine[ii], ord ( aLine[ii]
# ) )
doNothing = 1
if (ordVal == 13):
i2 = ii
# print " --> writing out from %d to %d " % ( i1, i2 )
# print " <%s> " % aLine[i1:i2]
numLinesOut += 1
## if ( numLinesOut == 5 ): sys.exit(-1)
## fhOut.write ( "%s\n" % aLine[i1:i2] )
i1 = i2 + 1
print numTab, numLF, numCR, numSpace, numDigit, numLetter
print numLinesOut
sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (len(sys.argv) != 2 and len(sys.argv) != 3):
print ' Usage : %s <filename> [hist-file] ' % sys.argv[0]
print " ERROR -- bad command line arguments "
sys.exit(-1)
inFilename = sys.argv[1]
if (len(sys.argv) == 3):
histFilename = sys.argv[2]
noHist = 0
else:
noHist = 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fh = file(inFilename)
numLines = 10
for iLine in range(numLines):
aLine = fh.readline()
# look for carriage return / line-feed ?
## lookAtLine ( aLine )
## aLine = aLine.strip()
aTokens = aLine.split('\t')
if (len(aTokens) > 15):
print len(aTokens), aTokens[:5], aTokens[-5:]
else:
print len(aTokens), aTokens
numLines = miscIO.num_lines(fh)
print "\n\n total # of lines in file : %d " % numLines
fh.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fh = file(inFilename)
aLine = fh.readline()
aTokens = aLine.split('\t')
numA = len(aTokens)
print " number of header tokens : ", numA
lookAtHeaderTokens(aTokens)
done = 0
iLine = 0
while not done:
bLine = fh.readline()
iLine += 1
# print bLine
bTokens = bLine.split('\t')
# print len(bTokens), bTokens
numB = len(bTokens)
if (numB < 2):
done = 1
continue
if (numA != numB):
print " wrong number of tokens ??? ", numB, numA, iLine
print bTokens
print bLine
sys.exit(-1)
for ii in range(numA):
if (bTokens[ii] == ''):
print " WARNING ... blank token ", ii
print bTokens
print bLine
## sys.exit(-1)
fh.close()
# sys.exit(-1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fh = file(inFilename)
aLine = fh.readline()
bLine = fh.readline()
fh.close()
try:
if (aLine[-1] == '\n'):
aLine = aLine[:-1]
if (bLine[-1] == '\n'):
bLine = bLine[:-1]
except:
print " ERROR ??? bad data file ??? ", inFilename
sys.exit(-1)
aTokens = aLine.split('\t')
bTokens = bLine.split('\t')
numA = len(aTokens)
numB = len(bTokens)
print numA, numB
if (numA != numB):
print " ERROR ??? first two lines do not have the same numbers of tokens ??? "
sys.exit(-1)
if (numA < 50):
for ii in range(numA):
print ii, "\t", aTokens[ii], "\t:\t", bTokens[ii]
print " "
print " "
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print " opening file <%s> " % inFilename
fh = file(inFilename)
aLine = fh.readline()
# hdrTokens has the list of column ids (patients presumably)
hdrTokens = aLine.split('\t')
numCol = len(hdrTokens)
if (numCol > 15):
print numCol, hdrTokens[:5], hdrTokens[-5:]
else:
print numCol, hdrTokens
# now we make a data matrix, the first dimension will be the column #
print " --> first dimension of dataMatrix is %d " % numCol
dataMatrix = [0] * numCol
for iCol in range(numCol):
dataMatrix[iCol] = []
done = 0
isBinary = 0
numBinary = 0
numNotB = 0
while not done:
bLine = fh.readline()
try:
if ( bLine[-1] == '\n' ): bLine = bLine[:-1]
except:
doNothing = 1
## bLine = bLine.strip()
# each bTokens will have a feature name, followed by a list of feature
# values
bTokens = bLine.split('\t')
if (len(bTokens) != numCol):
done = 1
print " DONE ", numCol, len(bTokens)
print bTokens
print " "
else:
# dataMatrix[0]
for iCol in range(numCol):
if ( bTokens[iCol] == "" ):
dataMatrix[iCol] += ["NA"]
else:
dataMatrix[iCol] += [bTokens[iCol]]
if (iCol > 0):
if (bTokens[iCol]!="NA" and bTokens[iCol]!=""):
if (bTokens[iCol] == "0" or bTokens[iCol] == "1"):
numBinary += 1
else:
numNotB += 1
## print " dataMatrix[%d] has %d values " % ( iCol, len(dataMatrix[iCol]) )
# print numBinary, numNotB
if (numBinary > numNotB * 1000):
isBinary = 1
fh.close()
print " "
print len(dataMatrix), len(dataMatrix[0])
# print dataMatrix[:5]
print dataMatrix[0][:5] # this is all of the feature IDs
print dataMatrix[1][:5] # this is data for the first patient
print dataMatrix[-1][:5] # this is data for the last patient
print " "
numRow = len(dataMatrix[0])
numNA = 0
notNA = 0
numNAbyRow = [0] * numRow
maxNA = 0
# if this looks like a purely binary feature matrix, then
# count up the number of ones and 0s ...
if (isBinary):
bitD = {}
bitD["0"] = 0
bitD["1"] = 1
for iCol in range(1, numCol):
for iRow in range(numRow):
curVal = dataMatrix[iCol][iRow]
if (curVal in bitD.keys()):
bitD[curVal] += 1
print " "
print " binary counts : ", bitD, 10000. * (float(bitD["1"]) / float(bitD["0"] + bitD["1"])), (numRow - 1), (numCol - 1)
maxOn = 0
maxCol = -1
for iCol in range(1, numCol):
numOn = 0
featName = hdrTokens[iCol]
if (featName.lower().find("unknown") >= 0):
continue
for iRow in range(numRow):
if (dataMatrix[iCol][iRow] == "1"):
numOn += 1
if (numOn > maxOn):
maxCol = iCol
maxOn = numOn
print " most mutated patient : ", maxCol, hdrTokens[maxCol], maxOn
print " "
# if this file looks like a feature matrix with "data types",
# then lets count up NAs by data type ...
haveDataTypes = 0
if (dataMatrix[0][0][1] == ':'):
if (dataMatrix[0][0][6] == ':'):
haveDataTypes = 1
NAbyDataType = {}
AVbyDataType = {}
for iRow in range(numRow):
dataType = dataMatrix[0][iRow][:6]
if (dataType not in NAbyDataType.keys()):
NAbyDataType[dataType] = 0
AVbyDataType[dataType] = 0
for iCol in range(1, numCol):
for iRow in range(numRow):
if (dataMatrix[iCol][iRow] == ""):
print " ERROR ??? blank entry ??? ", iCol, iRow
print dataMatrix[iCol - 5:iCol + 5][iRow]
print dataMatrix[iCol][iRow - 5:iRow + 5]
sys.exit(-1)
if (haveDataTypes):
dataType = dataMatrix[0][iRow][:6]
if ((dataMatrix[iCol][iRow] == "NA") or (dataMatrix[iCol][iRow] == "na") or (dataMatrix[iCol][iRow] == "null")):
numNA += 1
numNAbyRow[iRow] += 1
if (maxNA < numNAbyRow[iRow]):
maxNA = numNAbyRow[iRow]
if (haveDataTypes):
NAbyDataType[dataType] += 1
else:
notNA += 1
if (haveDataTypes):
AVbyDataType[dataType] += 1
print " %d x %d " % ((numCol - 1), (numRow))
print " total number of NA values : ", numNA, notNA
fracNA = float(numNA) / float(numNA + notNA)
print " fraction of NA values : %f " % fracNA
print " "
print " Summary : %4d x %6d %5.3f %s " % ((numCol - 1), numRow, fracNA, inFilename)
print " "
try:
keyList = NAbyDataType.keys()
keyList.sort()
for aType in keyList:
numNA = NAbyDataType[aType]
numAV = AVbyDataType[aType]
numTot = numNA + numAV
fracNA = float(numNA) / float(numTot)
appCol = int((1. - fracNA) * float(numCol - 1))
print " <%s> %6.3f : approx %4d out of %4d %12d %12d %12d " % \
(aType, fracNA, appCol, (numCol - 1), numNA, numAV, numTot)
print " "
print " "
print " "
except:
keyList = []
doNothing = 1
# check some basic mutation counts ...
print " "
print " checking mutation counts for TP53 and TTN ... "
aType = "B:GNAB"
if (aType in keyList):
for iRow in range(numRow):
featName = dataMatrix[0][iRow]
if (featName.startswith("B:GNAB:TP53:") or featName.startswith("B:GNAB:TTN:")):
if (featName.find("code_potential") > 0):
bitD = {}
print iRow, featName
print featName
for iCol in range(1, numCol):
curVal = dataMatrix[iCol][iRow]
if (curVal not in bitD):
bitD[curVal] = 1
else:
bitD[curVal] += 1
print bitD
print " "
# check cardinality of categorical features ...
print " "
print " checking for high cardinality categorical features ... "
highCard = 0
maxCard = 0
maxName = "NA"
for iRow in range(numRow):
featName = dataMatrix[0][iRow]
if (featName.startswith("C:")):
uVec = []
for iCol in range(1, numCol):
curVal = dataMatrix[iCol][iRow]
if ((curVal == "NA") or (curVal == "na") or
(curVal == "null") or (curVal == NA_VALUE)):
doNothing = 1
else:
if (curVal not in uVec):
uVec += [curVal]
if ( maxCard < len(uVec) ):
maxCard = len(uVec)
maxName = featName
if (len(uVec) > 25):
print " WARNING ... VERY high cardinality feature !!! ", featName, len(uVec), uVec
print " "
highCard = 1
print " --> highest cardinality feature found: ", maxCard, maxName
if ( 0 ):
if (highCard):
print " "
print " "
sys.exit(-1)
# sys.exit(-1)
# get information about data values for each data type ...
print " "
print " now trying to get some information about the actual data values ... "
# nz_byDataType = {} # number of zeros by data type
# nv_byDataType = {} # number of values by data type
# min_byDataType = {} # smallest non-zero (abs) value by data type
# max_byDataType = {} # largest non-zero (abs) value by data type
nz_byDataType = {}
nv_byDataType = {}
min_byDataType = {}
max_byDataType = {}
for iRow in range(numRow):
for iCol in range(1, numCol):
dataType = dataMatrix[0][iRow][:6]
if ((dataMatrix[iCol][iRow] == "NA") or (dataMatrix[iCol][iRow] == "na") or
(dataMatrix[iCol][iRow] == "null") or (dataMatrix[iCol][iRow] == NA_VALUE)):
doNothing = 1
else:
if (dataType not in nz_byDataType.keys()):
nz_byDataType[dataType] = 0
nv_byDataType[dataType] = 0
min_byDataType[dataType] = 999990
max_byDataType[dataType] = -999990
else:
nv_byDataType[dataType] += 1
try:
fVal = float(dataMatrix[iCol][iRow])
if (abs(fVal) < 0.0000001):
nz_byDataType[dataType] += 1
else:
if (fVal < min_byDataType[dataType]):
min_byDataType[dataType] = fVal
if (fVal > max_byDataType[dataType]):
max_byDataType[dataType] = fVal
except:
doNothing = 1
for aType in nz_byDataType.keys():
if (min_byDataType[aType] == 999990):
print aType, nz_byDataType[aType], nv_byDataType[aType]
else:
print aType, nz_byDataType[aType], nv_byDataType[aType], \
min_byDataType[aType], max_byDataType[aType]
numNAhist = [0] * (maxNA + 1)
for iRow in range(numRow):
numNAhist[numNAbyRow[iRow]] += 1
if (not noHist):
fh = file(histFilename, 'w')
if ( 0 ):
fh.write("\n\n")
fh.write("Histogram of NA counts by feature: \n\n")
for ii in range(maxNA + 1):
if (numNAhist[ii] > 0): fh.write(" %4d : %8d \n" % (ii, numNAhist[ii]))
fh.write("\n\n")
for iRow in range(numRow):
fh.write("%4d %s \n" % (numNAbyRow[iRow], dataMatrix[0][iRow]))
## fh.write("\n\n")
fh.close()
sys.exit(-1)
for iCol in range(numCol):
numNotNull = 0
numS = len(dataMatrix[iCol])
uniqueList = []
for iS in range(numS):
if (dataMatrix[iCol][iS] != "null"):
if ((dataMatrix[iCol][iS] != "NA") and (dataMatrix[iCol][iS] != "na") and (dataMatrix[iCol][iS] != "null")):
numNotNull += 1
if (dataMatrix[iCol][iS] not in uniqueList):
uniqueList += [dataMatrix[iCol][iS]]
print " %3d %32s %3d / %3d " % (iCol, hdrTokens[iCol], numNotNull, numS)
uniqueList.sort()
if (len(uniqueList) < 10):
print len(uniqueList), uniqueList
else:
print len(uniqueList), uniqueList[:10]
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
|
|
import sys
sys.path.append('..')
import time
import pygame
from pygame import Rect, Color
from pathfinder import PathFinder
from gridmap import GridMap
class Visualizer(object):
def __init__(self, screen, field, message_func):
self.screen = screen
self.field = field
self.message_func = message_func
self.grid_size = 15
self.field_color = Color('black')
self.grid_color = Color('gray')
self.start_pos_color = Color('red')
self.goal_pos_color = Color('green')
self.path_color = Color('violet')
self.blocked_color = Color('gray')
self._init_map()
def draw(self):
self._draw_grid(self.field)
self._draw_map(self.field,
self.blocked_list, self.start_pos,
self.goal_pos, self.path)
self.message_func(self.msg1, self.msg2)
def user_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F5:
self._recompute_path()
elif event.type == pygame.MOUSEBUTTONDOWN:
self.path_valid = False
self.msg1 = 'Please recompute path (F5)'
self.msg2 = ''
self._handle_mouse_click(event)
########################## PRIVATE ##########################
def _init_map(self):
self.start_pos = 0, 0
self.goal_pos = 3, 8
nrows = self.field.height / self.grid_size
ncols = self.field.width / self.grid_size
self.map = GridMap(nrows, ncols)
for b in [ (1, 1), (1, 2), (0, 3), (1, 3), (2, 3),
(2, 4), (2, 5), (2, 6)]:
self.map.set_blocked(b)
self._recompute_path()
def _handle_mouse_click(self, event):
if not self.field.collidepoint(event.pos):
return
ncol = (event.pos[0] - self.field.left) / self.grid_size
nrow = (event.pos[1] - self.field.top) / self.grid_size
coord = (nrow, ncol)
if event.button == 1:
self.map.set_blocked(coord, not self.map.blocked[coord])
elif event.button == 2:
self.start_pos = coord
elif event.button == 3:
self.goal_pos = coord
def _recompute_path(self):
self.blocked_list = self.map.blocked
pf = PathFinder(self.map.successors, self.map.move_cost,
self.map.move_cost)
t = time.clock()
self.path = list(pf.compute_path(self.start_pos, self.goal_pos))
dt = time.clock() - t
if self.path == []:
self.msg1 = "No path found"
else:
self.msg1 = "Found path (length %d)" % len(self.path)
self.msg2 = "Elapsed: %s seconds" % dt
self.path_valid = True
def _draw_grid(self, field):
""" Draw a grid on the given surface.
"""
self.screen.fill(self.field_color, field)
nrows = field.height / self.grid_size
ncols = field.width / self.grid_size
for y in range(nrows + 1):
pygame.draw.line(
self.screen,
self.grid_color,
(field.left, field.top + y * self.grid_size - 1),
(field.right - 1, field.top + y * self.grid_size - 1))
for x in range(ncols + 1):
pygame.draw.line(
self.screen,
self.grid_color,
(field.left + x * self.grid_size - 1, field.top),
(field.left + x * self.grid_size - 1, field.bottom - 1))
def _draw_map(self, field, blocked, start, goal, path):
def _fill_square((nrow, ncol), color):
left = field.left + ncol * self.grid_size
top = field.top + nrow * self.grid_size
width = self.grid_size - 1
self.screen.fill(color, Rect(left, top, width, width))
def _fill_spot((nrow, ncol), color):
pos_x = field.left + ncol * self.grid_size + self.grid_size / 2
pos_y = field.top + nrow * self.grid_size + self.grid_size / 2
radius = self.grid_size / 4
pygame.draw.circle(self.screen,
color, (pos_x, pos_y), radius)
for bl in blocked:
_fill_square(bl, self.blocked_color)
if self.path_valid:
for path_square in path:
_fill_spot(path_square, self.path_color)
_fill_spot(start, self.start_pos_color)
_fill_spot(goal, self.goal_pos_color)
def draw_messages(screen, rect, message1, message2):
draw_rimmed_box(screen, rect, (50, 20, 0), 4, Color('white'))
my_font = pygame.font.SysFont('arial', 18)
message1_sf = my_font.render(message1, True, Color('white'))
message2_sf = my_font.render(message2, True, Color('white'))
screen.blit(message1_sf, rect.move(10, 0))
screen.blit(message2_sf, rect.move(10, message1_sf.get_height()))
def draw_rimmed_box(screen, box_rect, box_color,
rim_width=0,
rim_color=Color('black')):
""" Draw a rimmed box on the given surface. The rim is drawn
outside the box rect.
"""
if rim_width:
rim_rect = Rect(box_rect.left - rim_width,
box_rect.top - rim_width,
box_rect.width + rim_width * 2,
box_rect.height + rim_width * 2)
pygame.draw.rect(screen, rim_color, rim_rect)
pygame.draw.rect(screen, box_color, box_rect)
def draw_title(screen, rect):
draw_rimmed_box(screen, rect, (40, 10, 60), 4, Color('gray'))
msgs = [
'Left click to toggle wall',
'Middle click to set start (red)',
'Right click to set goal (green)',
'F5 to recompute the path',
]
my_font = pygame.font.SysFont('arial', 16)
for i, msg in enumerate(msgs):
rendered = my_font.render(msg, True, Color('white'))
screen.blit(rendered, rect.move(10, i * rendered.get_height()))
def run_game():
SCREEN_WIDTH, SCREEN_HEIGHT = 350, 550
FIELD_RECT = Rect(25, 130, 300, 300)
MESSAGES_RECT = Rect(25, 450, 300, 50)
TITLE_RECT = Rect(25, 10, 300, 90)
pygame.init()
screen = pygame.display.set_mode(
(SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
clock = pygame.time.Clock()
def message_func(msg1, msg2):
draw_messages(screen, MESSAGES_RECT, msg1, msg2)
visualizer = Visualizer(screen, FIELD_RECT, message_func)
while True:
time_passed = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game()
else:
visualizer.user_event(event)
draw_title(screen, TITLE_RECT)
visualizer.draw()
pygame.display.flip()
def exit_game():
sys.exit()
run_game()
|
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backend import CpuDevice
from chainer import links
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
't': [[0, 2], [-1, 1, 2]],
'reduce': ['sum', 'no'],
}))
@testing.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestNegativeSampling(unittest.TestCase):
in_size = 3
sample_size = 2
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
batch = len(self.t)
x_shape = (batch, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.t = numpy.array(self.t).astype(numpy.int32)
if self.reduce == 'no':
g_shape = self.t.shape
elif self.reduce == 'sum':
g_shape = ()
self.gy = numpy.random.uniform(-1, 1, g_shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.test_forward_options = {'atol': 1e-2}
self.test_backward_options = {'atol': 5e-3}
else:
self.test_forward_options = {}
self.test_backward_options = {'atol': 1e-4}
def tearDown(self):
self._config_user.__exit__(None, None, None)
def create_link(self, rng=None):
if rng is None:
rng = numpy.random.RandomState()
link = links.NegativeSampling(
self.in_size, [10, 5, 2, 5, 2], self.sample_size)
link.cleargrads()
# W is initialized with zero. Inject random values for meaningful test.
link.W.array[:] = rng.uniform(-1, 1, link.W.shape)
return link
def call_link_with_samples(self, samples, func):
# Call the link with given `samples` array.
# `func` is a function in which the link is called.
# mock sampler that returns the saved samples
def mock_sample(shape):
assert samples.shape == shape
return samples.copy()
# Wrap F.negative_sampling to replace sampler with the mock
orig_negative_sampling = chainer.functions.negative_sampling
def wrap_negative_sampling(*args, **kwargs):
args = args[:3] + (mock_sample,) + args[4:]
return orig_negative_sampling(*args, **kwargs)
with testing.patch(
'chainer.functions.loss.negative_sampling.negative_sampling',
wraps=wrap_negative_sampling) as m:
ret = func()
assert m.call_count == 1
return ret
def test_forward(self, backend_config):
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
link = self.create_link()
link.to_device(backend_config.device)
y, samples = link(x, t, reduce=self.reduce, return_samples=True)
self.assertEqual(y.shape, self.gy.shape)
cpu_device = CpuDevice()
W = cpu_device.send(link.W.data)
samples = cpu_device.send(samples)
loss = numpy.empty((len(self.x),), self.dtype)
for i in range(len(self.x)):
ix = self.x[i]
it = self.t[i]
if it == -1:
loss[i] = 0
else:
w = W[samples[i]]
f = w.dot(ix)
# first one is positive example
f[0] *= -1
loss[i] = numpy.logaddexp(f, 0).sum()
if self.reduce == 'sum':
loss = loss.sum()
testing.assert_allclose(y.data, loss, **self.test_forward_options)
def test_to_cpu(self, backend_config):
link = self.create_link()
link.to_device(backend_config.device)
self.assertEqual(link.sampler.device, backend_config.device)
link.to_cpu()
self.assertEqual(link.sampler.device, backend.CpuDevice())
def test_return_samples(self, backend_config):
batch_size = self.t.shape[0]
link = self.create_link()
link.to_device(backend_config.device)
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
# return_samples=True
y, samples = link(x, t, reduce=self.reduce, return_samples=True)
assert isinstance(samples, backend_config.xp.ndarray)
assert samples.shape == (batch_size, self.sample_size + 1)
assert samples.dtype == numpy.int32
# return_samples=False, with saved samples
y_ = self.call_link_with_samples(
samples,
lambda: link(x, t, reduce=self.reduce))
# y and y_ should equal
cpu_device = CpuDevice()
numpy.testing.assert_array_equal(
cpu_device.send(y.array), cpu_device.send(y_.array))
def test_backward_compare_with_numpy(self, backend_config):
# This test compares gradients with that of NumPy mode.
rng = numpy.random.RandomState()
rng_state = rng.get_state()
# Call NumPy mode link and save samples
x = chainer.Variable(self.x)
t = chainer.Variable(self.t, requires_grad=False)
link = self.create_link(rng)
y, samples = link(x, t, return_samples=True)
y.backward()
assert t.grad is None
gw_cpu = link.W.grad
gx_cpu = x.grad
# Call GPU mode link
rng.set_state(rng_state)
link = self.create_link(rng)
link.to_device(backend_config.device)
x = chainer.Variable(backend_config.get_array(self.x))
t = chainer.Variable(
backend_config.get_array(self.t), requires_grad=False)
samples = backend_config.get_array(samples)
y = self.call_link_with_samples(samples, lambda: link(x, t))
y.backward()
assert t.grad is None
gw_gpu = link.W.grad
gx_gpu = x.grad
# Compare gradients from CPU and GPU modes
testing.assert_allclose(gx_cpu, gx_gpu, **self.test_backward_options)
testing.assert_allclose(gw_cpu, gw_gpu, **self.test_backward_options)
testing.run_module(__name__, __file__)
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._io}.
"""
from __future__ import print_function
import sys
from twisted.trial import unittest
from .._levels import LogLevel
from .._logger import Logger
from .._observer import LogPublisher
from .._io import LoggingFile
class LoggingFileTests(unittest.TestCase):
"""
Tests for L{LoggingFile}.
"""
def setUp(self):
"""
Create a logger for test L{LoggingFile} instances to use.
"""
self.publisher = LogPublisher()
self.logger = Logger(observer=self.publisher)
def test_softspace(self):
"""
L{LoggingFile.softspace} is 0.
"""
self.assertEqual(LoggingFile.softspace, 0)
def test_readOnlyAttributes(self):
"""
Some L{LoggingFile} attributes are read-only.
"""
f = LoggingFile(self.logger)
self.assertRaises(AttributeError, setattr, f, "closed", True)
self.assertRaises(AttributeError, setattr, f, "encoding", "utf-8")
self.assertRaises(AttributeError, setattr, f, "mode", "r")
self.assertRaises(AttributeError, setattr, f, "newlines", ["\n"])
self.assertRaises(AttributeError, setattr, f, "name", "foo")
def test_unsupportedMethods(self):
"""
Some L{LoggingFile} methods are unsupported.
"""
f = LoggingFile(self.logger)
self.assertRaises(IOError, f.read)
self.assertRaises(IOError, f.next)
self.assertRaises(IOError, f.readline)
self.assertRaises(IOError, f.readlines)
self.assertRaises(IOError, f.xreadlines)
self.assertRaises(IOError, f.seek)
self.assertRaises(IOError, f.tell)
self.assertRaises(IOError, f.truncate)
def test_level(self):
"""
Default level is L{LogLevel.info} if not set.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.level, LogLevel.info)
f = LoggingFile(self.logger, level=LogLevel.error)
self.assertEqual(f.level, LogLevel.error)
def test_encoding(self):
"""
Default encoding is C{sys.getdefaultencoding()} if not set.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.encoding, sys.getdefaultencoding())
f = LoggingFile(self.logger, encoding="utf-8")
self.assertEqual(f.encoding, "utf-8")
def test_mode(self):
"""
Reported mode is C{"w"}.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.mode, "w")
def test_newlines(self):
"""
The C{newlines} attribute is L{None}.
"""
f = LoggingFile(self.logger)
self.assertIsNone(f.newlines)
def test_name(self):
"""
The C{name} attribute is fixed.
"""
f = LoggingFile(self.logger)
self.assertEqual(
f.name,
"<LoggingFile twisted.logger.test.test_io#info>"
)
def test_close(self):
"""
L{LoggingFile.close} closes the file.
"""
f = LoggingFile(self.logger)
f.close()
self.assertTrue(f.closed)
self.assertRaises(ValueError, f.write, "Hello")
def test_flush(self):
"""
L{LoggingFile.flush} does nothing.
"""
f = LoggingFile(self.logger)
f.flush()
def test_fileno(self):
"""
L{LoggingFile.fileno} returns C{-1}.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.fileno(), -1)
def test_isatty(self):
"""
L{LoggingFile.isatty} returns C{False}.
"""
f = LoggingFile(self.logger)
self.assertFalse(f.isatty())
def test_writeBuffering(self):
"""
Writing buffers correctly.
"""
f = self.observedFile()
f.write("Hello")
self.assertEqual(f.messages, [])
f.write(", world!\n")
self.assertEqual(f.messages, [u"Hello, world!"])
f.write("It's nice to meet you.\n\nIndeed.")
self.assertEqual(
f.messages,
[
u"Hello, world!",
u"It's nice to meet you.",
u"",
]
)
def test_writeBytesDecoded(self):
"""
Bytes are decoded to unicode.
"""
f = self.observedFile(encoding="utf-8")
f.write(b"Hello, Mr. S\xc3\xa1nchez\n")
self.assertEqual(f.messages, [u"Hello, Mr. S\xe1nchez"])
def test_writeUnicode(self):
"""
Unicode is unmodified.
"""
f = self.observedFile(encoding="utf-8")
f.write(u"Hello, Mr. S\xe1nchez\n")
self.assertEqual(f.messages, [u"Hello, Mr. S\xe1nchez"])
def test_writeLevel(self):
"""
Log level is emitted properly.
"""
f = self.observedFile()
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_level"], LogLevel.info)
f = self.observedFile(level=LogLevel.error)
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_level"], LogLevel.error)
def test_writeFormat(self):
"""
Log format is C{u"{message}"}.
"""
f = self.observedFile()
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_format"], u"{log_io}")
def test_writelinesBuffering(self):
"""
C{writelines} does not add newlines.
"""
# Note this is different behavior than t.p.log.StdioOnnaStick.
f = self.observedFile()
f.writelines(("Hello", ", ", ""))
self.assertEqual(f.messages, [])
f.writelines(("world!\n",))
self.assertEqual(f.messages, [u"Hello, world!"])
f.writelines(("It's nice to meet you.\n\n", "Indeed."))
self.assertEqual(
f.messages,
[
u"Hello, world!",
u"It's nice to meet you.",
u"",
]
)
def test_print(self):
"""
L{LoggingFile} can replace L{sys.stdout}.
"""
f = self.observedFile()
self.patch(sys, "stdout", f)
print("Hello,", end=" ")
print("world.")
self.assertEqual(f.messages, [u"Hello, world."])
def observedFile(self, **kwargs):
"""
Construct a L{LoggingFile} with a built-in observer.
@param kwargs: keyword arguments for the L{LoggingFile}.
@type kwargs: L{dict}
@return: a L{LoggingFile} with an observer that appends received
events into the file's C{events} attribute (a L{list}) and
event messages into the file's C{messages} attribute (a L{list}).
@rtype: L{LoggingFile}
"""
def observer(event):
f.events.append(event)
if "log_io" in event:
f.messages.append(event["log_io"])
log = Logger(observer=observer)
f = LoggingFile(logger=log, **kwargs)
f.events = []
f.messages = []
return f
|
|
import mxnet as mx
import numpy as np
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}_conv".format(name))
if use_batchnorm:
conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
return relu
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
assert not use_batchnorm, "batchnorm not yet supported"
bias = mx.symbol.Variable(name="conv{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="conv{}".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}{}".format(act_type, name))
if use_batchnorm:
relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
return conv, relu
def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128):
"""Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols
"""
# arguments check
assert len(from_layers) > 0
assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0
assert len(from_layers) == len(num_filters) == len(strides) == len(pads)
internals = body.get_internals()
layers = []
for k, params in enumerate(zip(from_layers, num_filters, strides, pads)):
from_layer, num_filter, s, p = params
if from_layer.strip():
# extract from base network
layer = internals[from_layer.strip() + '_output']
layers.append(layer)
else:
# attach from last feature layer
assert len(layers) > 0
assert num_filter > 0
layer = layers[-1]
num_1x1 = max(min_filter, num_filter // 2)
conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k),
num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')
conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k),
num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu')
layers.append(conv_3x3)
return layers
def multibox_layer(from_layers, num_classes, sizes=[.2, .95],
ratios=[1], normalization=-1, num_channels=[],
clip=False, interm_layer=0, steps=[]):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, \
"num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) <= len(num_channels), \
"must provide number of channels for each normalized layer"
if steps:
assert len(steps) == len(from_layers), "provide steps for all layers or leave empty"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = mx.symbol.L2Normalization(data=from_layer, \
mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),
shape=(1, num_channels.pop(0), 1, 1),
init=mx.init.Constant(normalization[k]),
attr={'__wd_mult__': '0.1'})
from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=interm_layer, \
name="{}_inter_conv".format(from_name))
from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", \
name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
num_anchors = len(size) -1 + len(ratio)
# create location prediction layer
num_loc_pred = num_anchors * 4
bias = mx.symbol.Variable(name="{}_loc_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
loc_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_loc_pred, \
name="{}_loc_pred_conv".format(from_name))
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = mx.symbol.Flatten(data=loc_pred)
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
bias = mx.symbol.Variable(name="{}_cls_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
cls_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_cls_pred, \
name="{}_cls_pred_conv".format(from_name))
cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1))
cls_pred = mx.symbol.Flatten(data=cls_pred)
cls_pred_layers.append(cls_pred)
# create anchor generation layer
if steps:
step = (steps[k], steps[k])
else:
step = '(-1.0, -1.0)'
anchors = mx.contrib.symbol.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str, \
clip=clip, name="{}_anchors".format(from_name), steps=step)
anchors = mx.symbol.Flatten(data=anchors)
anchor_layers.append(anchors)
loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), \
dim=1, name="multibox_loc_pred")
cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), \
dim=1)
cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes))
cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred")
anchor_boxes = mx.symbol.Concat(*anchor_layers, \
num_args=len(anchor_layers), dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
return [loc_preds, cls_preds, anchor_boxes]
|
|
# -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re, os, urllib, urlparse, json, binascii
from resources.lib.modules import client
def google(url):
try:
if any(x in url for x in ['youtube.', 'docid=']): url = 'https://drive.google.com/file/d/%s/view' % re.compile('docid=([\w-]+)').findall(url)[0]
netloc = urlparse.urlparse(url.strip().lower()).netloc
netloc = netloc.split('.google')[0]
if netloc == 'docs' or netloc == 'drive':
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
headers = {'User-Agent': client.agent()}
result = client.request(url, output='extended', headers=headers)
try:
headers['Cookie'] = result[2]['Set-Cookie']
except:
pass
result = result[0]
if netloc == 'docs' or netloc == 'drive':
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
result = json.loads(result)
result = [i.split('|')[-1] for i in result.split(',')]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'photos':
result = result.replace('\r', '').replace('\n', '').replace('\t', '')
result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]
result = result.replace('\\u003d', '=').replace('\\u0026', '&')
result = re.compile('url=(.+?)&').findall(result)
result = [urllib.unquote(i) for i in result]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'picasaweb':
id = re.compile('#(\d*)').findall(url)[0]
result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
result = json.loads(result)['feed']['entry']
if len(result) > 1:
result = [i for i in result if str(id) in i['link'][0]['href']][0]
elif len(result) == 1:
result = result[0]
result = result['media']['content']
result = [i['url'] for i in result if 'video' in i['type']]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'plus':
id = (urlparse.urlparse(url).path).split('/')[-1]
result = result.replace('\r', '').replace('\n', '').replace('\t', '')
result = result.split('"%s"' % id)[-1].split(']]')[0]
result = result.replace('\\u003d', '=').replace('\\u0026', '&')
result = re.compile('url=(.+?)&').findall(result)
result = [urllib.unquote(i) for i in result]
result = sum([googletag(i, append_height=True) for i in result], [])
result = sorted(result, key=lambda i: i.get('height', 0), reverse=True)
url = []
for q in ['4K', '1440p', '1080p', 'HD', 'SD']:
try:
url += [[i for i in result if i.get('quality') == q][0]]
except:
pass
for i in url:
i.pop('height', None)
i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)})
if not url: return
return url
except:
return
def googletag(url, append_height=False):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try:
quality = quality[0]
except:
return []
itag_map = {'151': {'quality': 'SD', 'height': 72}, '212': {'quality': 'SD', 'height': 480}, '313': {'quality': '4K', 'height': 2160},
'242': {'quality': 'SD', 'height': 240}, '315': {'quality': '4K', 'height': 2160}, '219': {'quality': 'SD', 'height': 480},
'133': {'quality': 'SD', 'height': 240}, '271': {'quality': '1440p', 'height': 1440}, '272': {'quality': '4K', 'height': 2160},
'137': {'quality': '1080p', 'height': 1080}, '136': {'quality': 'HD', 'height': 720}, '135': {'quality': 'SD', 'height': 480},
'134': {'quality': 'SD', 'height': 360}, '82': {'quality': 'SD', 'height': 360}, '83': {'quality': 'SD', 'height': 480},
'218': {'quality': 'SD', 'height': 480}, '93': {'quality': 'SD', 'height': 360}, '84': {'quality': 'HD', 'height': 720},
'170': {'quality': '1080p', 'height': 1080}, '167': {'quality': 'SD', 'height': 360}, '22': {'quality': 'HD', 'height': 720},
'46': {'quality': '1080p', 'height': 1080}, '160': {'quality': 'SD', 'height': 144}, '44': {'quality': 'SD', 'height': 480},
'45': {'quality': 'HD', 'height': 720}, '43': {'quality': 'SD', 'height': 360}, '94': {'quality': 'SD', 'height': 480},
'5': {'quality': 'SD', 'height': 240}, '6': {'quality': 'SD', 'height': 270}, '92': {'quality': 'SD', 'height': 240},
'85': {'quality': '1080p', 'height': 1080}, '308': {'quality': '1440p', 'height': 1440}, '278': {'quality': 'SD', 'height': 144},
'78': {'quality': 'SD', 'height': 480}, '302': {'quality': 'HD', 'height': 720}, '303': {'quality': '1080p', 'height': 1080},
'245': {'quality': 'SD', 'height': 480}, '244': {'quality': 'SD', 'height': 480}, '247': {'quality': 'HD', 'height': 720},
'246': {'quality': 'SD', 'height': 480}, '168': {'quality': 'SD', 'height': 480}, '266': {'quality': '4K', 'height': 2160},
'243': {'quality': 'SD', 'height': 360}, '264': {'quality': '1440p', 'height': 1440}, '102': {'quality': 'HD', 'height': 720},
'100': {'quality': 'SD', 'height': 360}, '101': {'quality': 'SD', 'height': 480}, '95': {'quality': 'HD', 'height': 720},
'248': {'quality': '1080p', 'height': 1080}, '96': {'quality': '1080p', 'height': 1080}, '91': {'quality': 'SD', 'height': 144},
'38': {'quality': '4K', 'height': 3072}, '59': {'quality': 'SD', 'height': 480}, '17': {'quality': 'SD', 'height': 144},
'132': {'quality': 'SD', 'height': 240}, '18': {'quality': 'SD', 'height': 360}, '37': {'quality': '1080p', 'height': 1080},
'35': {'quality': 'SD', 'height': 480}, '34': {'quality': 'SD', 'height': 360}, '298': {'quality': 'HD', 'height': 720},
'299': {'quality': '1080p', 'height': 1080}, '169': {'quality': 'HD', 'height': 720}}
if quality in itag_map:
quality = itag_map[quality]
if append_height:
return [{'quality': quality['quality'], 'height': quality['height'], 'url': url}]
else:
return [{'quality': quality['quality'], 'url': url}]
else:
return []
def googlepass(url):
try:
try:
headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
except:
headers = None
url = url.split('|')[0].replace('\\', '')
url = client.request(url, headers=headers, output='geturl')
if 'requiressl=yes' in url:
url = url.replace('http://', 'https://')
else:
url = url.replace('https://', 'http://')
if headers: url += '|%s' % urllib.urlencode(headers)
return url
except:
return
def vk(url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)
try:
oid, video_id = query['oid'][0], query['id'][0]
except:
oid, video_id = re.findall('\/video(.*)_(.*)', url)[0]
sources_url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, video_id)
html = client.request(sources_url)
html = re.sub(r'[^\x00-\x7F]+', ' ', html)
sources = re.findall('(\d+)x\d+.+?(http.+?\.m3u8.+?)n', html)
if not sources:
sources = re.findall('"url(\d+)"\s*:\s*"(.+?)"', html)
sources = [(i[0], i[1].replace('\\', '')) for i in sources]
sources = dict(sources)
url = []
try:
url += [{'quality': 'HD', 'url': sources['720']}]
except:
pass
try:
url += [{'quality': 'SD', 'url': sources['540']}]
except:
pass
try:
url += [{'quality': 'SD', 'url': sources['480']}]
except:
pass
if not url == []: return url
try:
url += [{'quality': 'SD', 'url': sources['360']}]
except:
pass
if not url == []: return url
try:
url += [{'quality': 'SD', 'url': sources['240']}]
except:
pass
if not url == []: return url
except:
return
def odnoklassniki(url):
try:
media_id = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]
result = client.request('http://ok.ru/dk', post={'cmd': 'videoPlayerMetadata', 'mid': media_id})
result = re.sub(r'[^\x00-\x7F]+', ' ', result)
result = json.loads(result).get('videos', [])
hd = []
for name, quali in {'ultra': '4K', 'quad': '1440p', 'full': '1080p', 'hd': 'HD'}.items():
hd += [{'quality': quali, 'url': i.get('url')} for i in result if i.get('name').lower() == name]
sd = []
for name, quali in {'sd': 'SD', 'low': 'SD', 'lowest': 'SD', 'mobile': 'SD'}.items():
sd += [{'quality': quali, 'url': i.get('url')} for i in result if i.get('name').lower() == name]
url = hd + sd[:1]
if not url == []: return url
except:
return
def cldmailru(url):
try:
v = url.split('public')[-1]
r = client.request(url)
r = re.sub(r'[^\x00-\x7F]+', ' ', r)
tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0]
url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0]
url = '%s%s?key=%s' % (url, v, tok)
return url
except:
return
def yandex(url):
try:
cookie = client.request(url, output='cookie')
r = client.request(url, cookie=cookie)
r = re.sub(r'[^\x00-\x7F]+', ' ', r)
sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0]
idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0]
idclient = binascii.b2a_hex(os.urandom(16))
post = {'idClient': idclient, 'version': '3.9.2', 'sk': sk, '_model.0': 'do-get-resource-url', 'id.0': idstring}
post = urllib.urlencode(post)
r = client.request('https://yadi.sk/models/?_m=do-get-resource-url', post=post, cookie=cookie)
r = json.loads(r)
url = r['models'][0]['data']['file']
return url
except:
return
|
|
import errno
import fcntl
import json
import os
import threading
import time
from subprocess import PIPE
from subprocess import Popen
import mock
import service_configuration_lib
from behave import given
from behave import then
from behave import when
from itest_utils import get_service_connection_string
from kazoo.exceptions import NodeExistsError
from steps.setup_steps import modify_configs
from paasta_tools.deployd.master import DEAD_DEPLOYD_WORKER_MESSAGE
from paasta_tools.marathon_tools import list_all_marathon_app_ids
from paasta_tools.marathon_tools import load_marathon_service_config_no_cache
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import ZookeeperPool
@given("paasta-deployd is running")
def start_deployd(context):
try:
os.makedirs("/nail/etc/services")
except OSError as e:
if e.errno == errno.EEXIST:
pass
with ZookeeperPool() as zk:
try:
zk.create("/autoscaling")
except NodeExistsError:
pass
context.zk_hosts = "%s/mesos-testcluster" % get_service_connection_string(
"zookeeper"
)
context.soa_dir = "/nail/etc/services"
if not hasattr(context, "daemon"):
context.daemon = Popen("paasta-deployd", stderr=PIPE)
output = context.daemon.stderr.readline().decode("utf-8")
start = time.time()
timeout = start + 60
while "Startup finished!" not in output:
output = context.daemon.stderr.readline().decode("utf-8")
if not output:
raise Exception("deployd exited prematurely")
print(output.rstrip("\n"))
if time.time() > timeout:
raise Exception("deployd never ran")
context.num_workers_crashed = 0
def dont_let_stderr_buffer():
while True:
line = context.daemon.stderr.readline()
if not line:
return
if DEAD_DEPLOYD_WORKER_MESSAGE.encode("utf-8") in line:
context.num_workers_crashed += 1
print(f"deployd stderr: {line}")
threading.Thread(target=dont_let_stderr_buffer).start()
time.sleep(5)
@then("no workers should have crashed")
def no_workers_should_crash(context):
if context.num_workers_crashed > 0:
raise Exception(
f"Expected no workers to crash, found {context.num_workers_crashed} stderr lines matching {DEAD_DEPLOYD_WORKER_MESSAGE!r}"
)
@then("paasta-deployd can be stopped")
def stop_deployd(context):
context.daemon.terminate()
context.daemon.wait()
@then("a second deployd does not become leader")
def start_second_deployd(context):
context.daemon1 = Popen("paasta-deployd", stderr=PIPE)
output = context.daemon1.stderr.readline().decode("utf-8")
fd = context.daemon1.stderr
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
for i in range(0, 5):
try:
output = context.daemon1.stderr.readline().decode("utf-8")
print(output.rstrip("\n"))
assert "This node is elected as leader" not in output
except IOError:
pass
time.sleep(1)
@then("a second deployd becomes leader")
def second_deployd_is_leader(context):
try:
output = context.daemon1.stderr.readline().decode("utf-8")
except IOError:
output = ""
start = time.time()
timeout = start + 60
while "This node is elected as leader" not in output:
try:
output = context.daemon1.stderr.readline().decode("utf-8")
except IOError:
output = ""
if output:
print(output.rstrip("\n"))
if time.time() > timeout:
raise Exception("Timed out waiting for second deployd leader")
time.sleep(1)
context.daemon1.terminate()
context.daemon1.wait()
@then('we should see "{service_instance}" listed in marathon after {seconds:d} seconds')
def check_app_running(context, service_instance, seconds):
service, instance, _, _ = decompose_job_id(service_instance)
service_configuration_lib._yaml_cache = {}
context.marathon_config = load_marathon_service_config_no_cache(
service, instance, context.cluster
)
context.app_id = context.marathon_config.format_marathon_app_dict()["id"]
step = 5
attempts = 0
context.current_client = context.marathon_clients.get_current_client_for_service(
context.marathon_config
)
while (attempts * step) < seconds:
if context.app_id in list_all_marathon_app_ids(context.current_client):
break
time.sleep(step)
attempts += 1
assert context.app_id in list_all_marathon_app_ids(context.current_client)
context.old_app_id = context.app_id
@then("we should not see the old version listed in marathon after {seconds:d} seconds")
def check_app_not_running(context, seconds):
step = 5
attempts = 0
while (attempts * step) < seconds:
if context.old_app_id not in list_all_marathon_app_ids(context.current_client):
return
time.sleep(step)
attempts += 1
assert context.old_app_id not in list_all_marathon_app_ids(context.current_client)
@then("we set a new command for our service instance to {cmd}")
def set_cmd(context, cmd):
context.cmd = cmd
@then('the appid for "{service_instance}" should have changed')
def check_sha_changed(context, service_instance):
service, instance, _, _ = decompose_job_id(service_instance)
service_configuration_lib._yaml_cache = {}
context.marathon_config = load_marathon_service_config_no_cache(
service, instance, context.cluster
)
assert context.app_id != context.marathon_config.format_marathon_app_dict()["id"]
@given(
'we have a secret called "{secret_name}" for the service "{service}" with signature "{signature}"'
)
def create_secret_json_file(context, secret_name, service, signature):
secret = {
"environments": {
"devc": {"ciphertext": "ScrambledNonsense", "signature": signature}
}
}
if not os.path.exists(os.path.join(context.soa_dir, service, "secrets")):
os.makedirs(os.path.join(context.soa_dir, service, "secrets"))
with open(
os.path.join(context.soa_dir, service, "secrets", f"{secret_name}.json"), "w"
) as secret_file:
json.dump(secret, secret_file)
@given(
'we set the an environment variable called "{var}" to "{val}" for '
'service "{service}" and instance "{instance}" for framework "{framework}"'
)
def add_env_var(context, var, val, service, instance, framework):
field = "env"
value = {var: val}
modify_configs(context, field, framework, service, instance, value)
@when('we set some arbitrary data at "{zookeeper_path}" in ZK')
def zookeeper_write_bogus_key(context, zookeeper_path):
with mock.patch.object(
SystemPaastaConfig, "get_zk_hosts", autospec=True, return_value=context.zk_hosts
):
with ZookeeperPool() as zookeeper_client:
zookeeper_client.ensure_path(zookeeper_path)
zookeeper_client.set(zookeeper_path, b"WHATEVER")
@given("we remove autoscaling ZK keys for test-service")
def zookeeper_rmr_keys(context):
context.zk_hosts = "%s/mesos-testcluster" % get_service_connection_string(
"zookeeper"
)
with mock.patch.object(
SystemPaastaConfig, "get_zk_hosts", autospec=True, return_value=context.zk_hosts
):
with ZookeeperPool() as zookeeper_client:
zookeeper_client.delete("/autoscaling/test-service", recursive=True)
|
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#!/usr/bin/env python
"""manages services and nodepool"""
# -*- python -*-
import os, sys, random, time, sets, shutil, threading
import urllib, urlparse, re, getpass, pprint, signal, shutil
from pprint import pformat
from HTMLParser import HTMLParser
binfile = sys.path[0]
libdir = os.path.dirname(binfile)
sys.path.append(libdir)
import hodlib.Common.logger
from hodlib.RingMaster.idleJobTracker import JobTrackerMonitor, HadoopJobStatus
from hodlib.Common.threads import func
from hodlib.Hod.nodePool import *
from hodlib.Common.util import *
from hodlib.Common.nodepoolutil import NodePoolUtil
from hodlib.Common.socketServers import hodXMLRPCServer
from hodlib.Common.socketServers import threadedHTTPServer
from hodlib.NodePools import *
from hodlib.NodePools.torque import *
from hodlib.GridServices import *
from hodlib.Common.descGenerator import *
from hodlib.Common.xmlrpc import hodXRClient
from hodlib.Common.miniHTMLParser import miniHTMLParser
from hodlib.Common.threads import simpleCommand
class ringMasterServer:
"""The RPC server that exposes all the master config
changes. Also, one of these RPC servers runs as a proxy
and all the hodring instances register with this proxy"""
instance = None
xmlrpc = None
def __init__(self, cfg, log, logMasterSources, retry=5):
try:
from hodlib.Common.socketServers import twistedXMLRPCServer
ringMasterServer.xmlrpc = twistedXMLRPCServer("",
cfg['ringmaster']['xrs-port-range'])
except ImportError:
log.info("Twisted interface not found. Using hodXMLRPCServer.")
ringMasterServer.xmlrpc = hodXMLRPCServer("",
cfg['ringmaster']['xrs-port-range'])
ringMasterServer.xmlrpc.register_instance(logMasterSources)
self.logMasterSources = logMasterSources
ringMasterServer.xmlrpc.serve_forever()
while not ringMasterServer.xmlrpc.is_alive():
time.sleep(.5)
log.debug('Ringmaster RPC Server at %d' %
ringMasterServer.xmlrpc.server_address[1])
def startService(ss, cfg, np, log, rm):
logMasterSources = _LogMasterSources(ss, cfg, np, log, rm)
ringMasterServer.instance = ringMasterServer(cfg, log, logMasterSources)
def stopService():
ringMasterServer.xmlrpc.stop()
def getPort():
return ringMasterServer.instance.port
def getAddress():
return 'http://%s:%d/' % (socket.gethostname(),
ringMasterServer.xmlrpc.server_address[1])
startService = staticmethod(startService)
stopService = staticmethod(stopService)
getPort = staticmethod(getPort)
getAddress = staticmethod(getAddress)
class _LogMasterSources:
"""All the methods that are run by the RPC server are
added into this class """
def __init__(self, serviceDict, cfg, np, log, rm):
self.serviceDict = serviceDict
self.tarSource = []
self.tarSourceLock = threading.Lock()
self.dict = {}
self.count = {}
self.logsourceList = []
self.logsourceListLock = threading.Lock()
self.masterParam = []
self.masterParamLock = threading.Lock()
self.verify = 'none'
self.cmdLock = threading.Lock()
self.cfg = cfg
self.log = log
self.np = np
self.rm = rm
self.hdfsHost = None
self.mapredHost = None
self.maxconnect = self.cfg['ringmaster']['max-connect']
self.log.debug("Using max-connect value %s"%self.maxconnect)
def registerTarSource(self, hostname, url, addr=None):
self.log.debug("registering: " + url)
lock = self.tarSourceLock
lock.acquire()
self.dict[url] = url
self.count[url] = 0
# addr is None when ringMaster himself invokes this method
if addr:
c = self.count[addr]
self.count[addr] = c - 1
lock.release()
if addr:
str = "%s is done" % (addr)
self.log.debug(str)
return url
def getTarList(self,hodring): # this looks useful
lock = self.tarSourceLock
lock.acquire()
leastkey = None
leastval = -1
for k, v in self.count.iteritems():
if (leastval == -1):
leastval = v
pass
if (v <= leastval and v < self.maxconnect):
leastkey = k
leastval = v
if (leastkey == None):
url = 'none'
else:
url = self.dict[leastkey]
self.count[leastkey] = leastval + 1
self.log.debug("%s %d" % (leastkey, self.count[leastkey]))
lock.release()
self.log.debug('sending url ' + url+" to "+hodring) # this looks useful
return url
def tarDone(self, uri):
str = "%s is done" % (uri)
self.log.debug(str)
lock = self.tarSourceLock
lock.acquire()
c = self.count[uri]
self.count[uri] = c - 1
lock.release()
return uri
def status(self):
return True
# FIXME: this code is broken, it relies on a central service registry
#
# def clusterStart(self, changedClusterParams=[]):
# self.log.debug("clusterStart method invoked.")
# self.dict = {}
# self.count = {}
# try:
# if (len(changedClusterParams) > 0):
# self.log.debug("Updating config.")
# for param in changedClusterParams:
# (key, sep1, val) = param.partition('=')
# (i1, sep2, i2) = key.partition('.')
# try:
# prev = self.cfg[i1][i2]
# self.rm.cfg[i1][i2] = val
# self.cfg[i1][i2] = val
# self.log.debug("\nModified [%s][%s]=%s to [%s][%s]=%s" % (i1, i2, prev, i1, i2, val))
# except KeyError, e:
# self.log.info("Skipping %s as no such config parameter found in ringmaster" % param)
# self.log.debug("Regenerating Service Description.")
# dGen = DescGenerator(self.rm.cfg)
# self.rm.cfg['servicedesc'] = dGen.createServiceDescDict()
# self.cfg['servicedesc'] = self.rm.cfg['servicedesc']
#
# self.rm.tar = None
# if self.rm.cfg['ringmaster'].has_key('hadoop-tar-ball'):
# self.rm.download = True
# self.rm.tar = self.rm.cfg['ringmaster']['hadoop-tar-ball']
# self.log.debug("self.rm.tar=%s" % self.rm.tar)
#
# self.rm.cd_to_tempdir()
#
# self.rm.tarAddress = None
# hostname = socket.gethostname()
# if (self.rm.download):
# self.rm.basename = os.path.basename(self.rm.tar)
# dest = os.path.join(os.getcwd(), self.rm.basename)
# src = self.rm.tar
# self.log.debug("cp %s -> %s" % (src, dest))
# shutil.copy(src, dest)
# self.rm.tarAddress = "%s%s" % (self.rm.httpAddress, self.rm.basename)
# self.registerTarSource(hostname, self.rm.tarAddress)
# self.log.debug("Registered new tarAddress %s" % self.rm.tarAddress)
# else:
# self.log.debug("Download not set.")
#
# if (self.rm.tar != None):
# self.cfg['hodring']['download-addr'] = self.rm.tarAddress
# self.rm.cfg['hodring']['download-addr'] = self.rm.tarAddress
#
# sdl = self.rm.cfg['servicedesc']
# workDirs = self.rm.getWorkDirs(self.rm.cfg, True)
# hdfsDesc = sdl['hdfs']
# hdfs = None
# if hdfsDesc.isExternal():
# hdfs = HdfsExternal(hdfsDesc, workDirs)
# else:
# hdfs = Hdfs(hdfsDesc, workDirs, 0, False, True)
#
# self.rm.serviceDict[hdfs.getName()] = hdfs
# mrDesc = sdl['mapred']
# mr = None
# if mrDesc.isExternal():
# mr = MapReduceExternal(mrDesc, workDirs)
# else:
# mr = MapReduce(mrDesc, workDirs, 1)
# self.rm.serviceDict[mr.getName()] = mr
#
# ringList = self.rm.serviceClient.getServiceInfo(self.cfg['hodring']['userid'],
# self.np.getServiceId(), 'hodring', 'hod')
#
# slaveList = ringList
# hdfsringXRAddress = None
# # Start HDFS Master - Step 1
# if not hdfsDesc.isExternal():
# masterFound = False
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# if (ringXRAddress.find(self.hdfsHost) != -1):
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# hdfsringXRAddress = ringXRAddress
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (HDFS Master)")
# ringClient.clusterStart()
# masterFound = True
# slaveList.remove(ring)
# break
# if not masterFound:
# raise Exception("HDFS Master host not found")
# while hdfs.getInfoAddrs() == None:
# self.log.debug("Waiting for HDFS Master (Name Node) to register dfs.info.port")
# time.sleep(1)
#
# # Start MAPRED Master - Step 2
# if not mrDesc.isExternal():
# masterFound = False
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# if (not mrDesc.isExternal() and ringXRAddress.find(self.mapredHost) != -1):
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (MAPRED Master)")
# ringClient.clusterStart()
# masterFound = True
# slaveList.remove(ring)
# break
# if not masterFound:
# raise Excpetion("MAPRED Master host not found")
# while mr.getInfoAddrs() == None:
# self.log.debug("Waiting for MAPRED Master (Job Tracker) to register \
# mapred.job.tracker.info.port")
# time.sleep(1)
#
# # Start Slaves - Step 3
# for ring in slaveList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (Slaves)")
# ringThread = func(name='hodring_slaves_start', functionRef=ringClient.clusterStart())
# ring['thread'] = ringThread
# ringThread.start()
#
# for ring in slaveList:
# ringThread = ring['thread']
# if ringThread == None:
# raise Exception("Could not get hodring thread (Slave).")
# ringThread.join()
# self.log.debug("Completed clusterStart on " + ring['xrs'] + " (Slave)")
#
# # Run Admin Commands on HDFS Master - Step 4
# if not hdfsDesc.isExternal():
# if hdfsringXRAddress == None:
# raise Exception("HDFS Master host not found (to Run Admin Commands)")
# ringClient = hodXRClient(hdfsringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart(False) - Admin on "
# + hdfsringXRAddress + " (HDFS Master)")
# ringClient.clusterStart(False)
#
# except:
# self.log.debug(get_exception_string())
# return False
#
# self.log.debug("Successfully started cluster.")
# return True
#
# def clusterStop(self):
# self.log.debug("clusterStop method invoked.")
# try:
# hdfsAddr = self.getServiceAddr('hdfs')
# if hdfsAddr.find(':') != -1:
# h, p = hdfsAddr.split(':', 1)
# self.hdfsHost = h
# self.log.debug("hdfsHost: " + self.hdfsHost)
# mapredAddr = self.getServiceAddr('mapred')
# if mapredAddr.find(':') != -1:
# h, p = mapredAddr.split(':', 1)
# self.mapredHost = h
# self.log.debug("mapredHost: " + self.mapredHost)
# ringList = self.rm.serviceClient.getServiceInfo(self.cfg['hodring']['userid'],
# self.np.getServiceId(),
# 'hodring', 'hod')
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False)
# self.log.debug("Invoking clusterStop on " + ringXRAddress)
# ringThread = func(name='hodring_stop', functionRef=ringClient.clusterStop())
# ring['thread'] = ringThread
# ringThread.start()
#
# for ring in ringList:
# ringThread = ring['thread']
# if ringThread == None:
# raise Exception("Could not get hodring thread.")
# ringThread.join()
# self.log.debug("Completed clusterStop on " + ring['xrs'])
#
# except:
# self.log.debug(get_exception_string())
# return False
#
# self.log.debug("Successfully stopped cluster.")
#
# return True
def getCommand(self, addr):
"""This method is called by the
hodrings to get commands from
the ringmaster"""
lock = self.cmdLock
cmdList = []
lock.acquire()
try:
try:
for v in self.serviceDict.itervalues():
if (not v.isExternal()):
if v.isLaunchable(self.serviceDict):
# If a master is still not launched, or the number of
# retries for launching master is not reached,
# launch master
if not v.isMasterLaunched() and \
(v.getMasterFailureCount() <= \
self.cfg['ringmaster']['max-master-failures']):
cmdList = v.getMasterCommands(self.serviceDict)
v.setlaunchedMaster()
v.setMasterAddress(addr)
break
if cmdList == []:
for s in self.serviceDict.itervalues():
if (not v.isExternal()):
if s.isMasterInitialized():
cl = s.getWorkerCommands(self.serviceDict)
cmdList.extend(cl)
else:
cmdList = []
break
except:
self.log.debug(get_exception_string())
finally:
lock.release()
pass
cmd = addr + pformat(cmdList)
self.log.debug("getCommand returning " + cmd)
return cmdList
def getAdminCommand(self, addr):
"""This method is called by the
hodrings to get admin commands from
the ringmaster"""
lock = self.cmdLock
cmdList = []
lock.acquire()
try:
try:
for v in self.serviceDict.itervalues():
cmdList = v.getAdminCommands(self.serviceDict)
if cmdList != []:
break
except Exception, e:
self.log.debug(get_exception_string())
finally:
lock.release()
pass
cmd = addr + pformat(cmdList)
self.log.debug("getAdminCommand returning " + cmd)
return cmdList
def addMasterParams(self, addr, vals):
"""This method is called by
hodring to update any parameters
its changed for the commands it was
running"""
self.log.debug('Comment: adding master params from %s' % addr)
self.log.debug(pformat(vals))
lock = self.masterParamLock
lock.acquire()
try:
for v in self.serviceDict.itervalues():
if v.isMasterLaunched():
if (v.getMasterAddress() == addr):
v.setMasterParams(vals)
v.setMasterInitialized()
except:
self.log.debug(get_exception_string())
pass
lock.release()
return addr
def setHodRingErrors(self, addr, errors):
"""This method is called by the hodrings to update errors
it encountered while starting up"""
self.log.critical("Hodring at %s failed with following errors:\n%s" \
% (addr, errors))
lock = self.masterParamLock
lock.acquire()
try:
for v in self.serviceDict.itervalues():
if v.isMasterLaunched():
if (v.getMasterAddress() == addr):
# strip the PID part.
idx = addr.rfind('_')
if idx is not -1:
addr = addr[:idx]
v.setMasterFailed("Hodring at %s failed with following" \
" errors:\n%s" % (addr, errors))
except:
self.log.debug(get_exception_string())
pass
lock.release()
return True
def getKeys(self):
lock= self.masterParamLock
lock.acquire()
keys = self.serviceDict.keys()
lock.release()
return keys
def getServiceAddr(self, name):
addr = 'not found'
self.log.debug("getServiceAddr name: %s" % name)
lock= self.masterParamLock
lock.acquire()
try:
service = self.serviceDict[name]
except KeyError:
pass
else:
self.log.debug("getServiceAddr service: %s" % service)
# Check if we should give up ! If the limit on max failures is hit,
# give up.
err = service.getMasterFailed()
if (err is not None) and \
(service.getMasterFailureCount() > \
self.cfg['ringmaster']['max-master-failures']):
self.log.critical("Detected errors (%s) beyond allowed number"\
" of failures (%s). Flagging error to client" \
% (service.getMasterFailureCount(), \
self.cfg['ringmaster']['max-master-failures']))
addr = "Error: " + err
elif (service.isMasterInitialized()):
addr = service.getMasterAddrs()[0]
else:
addr = 'not found'
lock.release()
self.log.debug("getServiceAddr addr %s: %s" % (name, addr))
return addr
def getURLs(self, name):
addr = 'none'
lock = self.masterParamLock
lock.acquire()
try:
service = self.serviceDict[name]
except KeyError:
pass
else:
if (service.isMasterInitialized()):
addr = service.getInfoAddrs()[0]
lock.release()
return addr
def stopRM(self):
"""An XMLRPC call which will spawn a thread to stop the Ringmaster program."""
# We spawn a thread here because we want the XMLRPC call to return. Calling
# stop directly from here will also stop the XMLRPC server.
try:
self.log.debug("inside xml-rpc call to stop ringmaster")
rmStopperThread = func('RMStopper', self.rm.stop)
rmStopperThread.start()
self.log.debug("returning from xml-rpc call to stop ringmaster")
return True
except:
self.log.debug("Exception in stop: %s" % get_exception_string())
return False
class RingMaster:
def __init__(self, cfg, log, **kwds):
"""starts nodepool and services"""
self.download = False
self.httpServer = None
self.cfg = cfg
self.log = log
self.__hostname = local_fqdn()
self.workDirs = None
# ref to the idle job tracker object.
self.__jtMonitor = None
self.__idlenessDetected = False
self.__stopInProgress = False
self.__isStopped = False # to let main exit
self.__exitCode = 0 # exit code with which the ringmaster main method should return
self.workers_per_ring = self.cfg['ringmaster']['workers_per_ring']
self.__initialize_signal_handlers()
sdd = self.cfg['servicedesc']
gsvc = None
for key in sdd:
gsvc = sdd[key]
break
npd = self.cfg['nodepooldesc']
self.np = NodePoolUtil.getNodePool(npd, cfg, log)
self.log.debug("Getting service ID.")
self.serviceId = self.np.getServiceId()
self.log.debug("Got service ID: %s" % self.serviceId)
self.tarSrcLoc = None
if self.cfg['ringmaster'].has_key('hadoop-tar-ball'):
self.download = True
self.tarSrcLoc = self.cfg['ringmaster']['hadoop-tar-ball']
self.cd_to_tempdir()
if (self.download):
self.__copy_tarball(os.getcwd())
self.basename = self.__find_tarball_in_dir(os.getcwd())
if self.basename is None:
raise Exception('Did not find tarball copied from %s in %s.'
% (self.tarSrcLoc, os.getcwd()))
self.serviceAddr = to_http_url(self.cfg['ringmaster']['svcrgy-addr'])
self.log.debug("Service registry @ %s" % self.serviceAddr)
self.serviceClient = hodXRClient(self.serviceAddr)
self.serviceDict = {}
try:
sdl = self.cfg['servicedesc']
workDirs = self.getWorkDirs(cfg)
hdfsDesc = sdl['hdfs']
hdfs = None
# Determine hadoop Version
hadoopVers = hadoopVersion(self.__getHadoopDir(), \
self.cfg['hodring']['java-home'], self.log)
if (hadoopVers['major']==None) or (hadoopVers['minor']==None):
raise Exception('Could not retrive the version of Hadoop.'
+ ' Check the Hadoop installation or the value of the hodring.java-home variable.')
if hdfsDesc.isExternal():
hdfs = HdfsExternal(hdfsDesc, workDirs, version=int(hadoopVers['minor']))
hdfs.setMasterParams( self.cfg['gridservice-hdfs'] )
else:
hdfs = Hdfs(hdfsDesc, workDirs, 0, version=int(hadoopVers['minor']),
workers_per_ring = self.workers_per_ring)
self.serviceDict[hdfs.getName()] = hdfs
mrDesc = sdl['mapred']
mr = None
if mrDesc.isExternal():
mr = MapReduceExternal(mrDesc, workDirs, version=int(hadoopVers['minor']))
mr.setMasterParams( self.cfg['gridservice-mapred'] )
else:
mr = MapReduce(mrDesc, workDirs,1, version=int(hadoopVers['minor']),
workers_per_ring = self.workers_per_ring)
self.serviceDict[mr.getName()] = mr
except:
self.log.critical("Exception in creating Hdfs and Map/Reduce descriptor objects: \
%s." % get_exception_error_string())
self.log.debug(get_exception_string())
raise
# should not be starting these in a constructor
ringMasterServer.startService(self.serviceDict, cfg, self.np, log, self)
self.rpcserver = ringMasterServer.getAddress()
self.httpAddress = None
self.tarAddress = None
hostname = socket.gethostname()
if (self.download):
self.httpServer = threadedHTTPServer(hostname,
self.cfg['ringmaster']['http-port-range'])
self.httpServer.serve_forever()
self.httpAddress = "http://%s:%d/" % (self.httpServer.server_address[0],
self.httpServer.server_address[1])
self.tarAddress = "%s%s" % (self.httpAddress, self.basename)
ringMasterServer.instance.logMasterSources.registerTarSource(hostname,
self.tarAddress)
else:
self.log.debug("Download not set.")
self.log.debug("%s %s %s %s %s" % (self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod'))
if self.cfg['ringmaster']['register']:
if self.httpAddress:
self.serviceClient.registerService(self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod', {
'xrs' : self.rpcserver, 'http' : self.httpAddress })
else:
self.serviceClient.registerService(self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod', {
'xrs' : self.rpcserver, })
self.log.debug("Registered with serivce registry: %s." % self.serviceAddr)
hodRingPath = os.path.join(cfg['ringmaster']['base-dir'], 'bin', 'hodring')
hodRingWorkDir = os.path.join(cfg['hodring']['temp-dir'], 'hodring' + '_'
+ getpass.getuser())
self.cfg['hodring']['hodring'] = [hodRingWorkDir,]
self.cfg['hodring']['svcrgy-addr'] = self.cfg['ringmaster']['svcrgy-addr']
self.cfg['hodring']['service-id'] = self.np.getServiceId()
self.cfg['hodring']['ringmaster-xrs-addr'] = self.__url_to_addr(self.rpcserver)
if (self.tarSrcLoc != None):
cfg['hodring']['download-addr'] = self.tarAddress
self.__init_job_tracker_monitor(ringMasterServer.instance.logMasterSources)
def __init_job_tracker_monitor(self, logMasterSources):
hadoopDir = self.__getHadoopDir()
self.log.debug('hadoopdir=%s, java-home=%s' % \
(hadoopDir, self.cfg['hodring']['java-home']))
try:
self.__jtMonitor = JobTrackerMonitor(self.log, self,
self.cfg['ringmaster']['jt-poll-interval'],
self.cfg['ringmaster']['idleness-limit'],
hadoopDir, self.cfg['hodring']['java-home'],
logMasterSources)
self.log.debug('starting jt monitor')
self.__jtMonitor.start()
except:
self.log.critical('Exception in running idle job tracker. This cluster cannot be deallocated if idle.\
Exception message: %s' % get_exception_error_string())
self.log.debug('Exception details: %s' % get_exception_string())
def __getHadoopDir(self):
hadoopDir = None
if self.cfg['ringmaster'].has_key('hadoop-tar-ball'):
tarFile = os.path.join(os.getcwd(), self.basename)
ret = untar(tarFile, os.getcwd())
if not ret:
raise Exception('Untarring tarfile %s to directory %s failed. Cannot find hadoop directory.' \
% (tarFile, os.getcwd()))
hadoopDir = os.path.join(os.getcwd(), self.__get_dir(tarFile))
else:
hadoopDir = self.cfg['gridservice-mapred']['pkgs']
self.log.debug('Returning Hadoop directory as: %s' % hadoopDir)
return hadoopDir
def __get_dir(self, name):
"""Return the root directory inside the tarball
specified by name. Assumes that the tarball begins
with a root directory."""
import tarfile
myTarFile = tarfile.open(name)
hadoopPackage = myTarFile.getnames()[0]
self.log.debug("tarball name : %s hadoop package name : %s" %(name,hadoopPackage))
return hadoopPackage
def __find_tarball_in_dir(self, dir):
"""Find the tarball among files specified in the given
directory. We need this method because how the tarball
source URI is given depends on the method of copy and
we can't get the tarball name from that.
This method will fail if there are multiple tarballs
in the directory with the same suffix."""
files = os.listdir(dir)
for file in files:
if self.tarSrcLoc.endswith(file):
return file
return None
def __copy_tarball(self, destDir):
"""Copy the hadoop tar ball from a remote location to the
specified destination directory. Based on the URL it executes
an appropriate copy command. Throws an exception if the command
returns a non-zero exit code."""
# for backwards compatibility, treat the default case as file://
url = ''
if self.tarSrcLoc.startswith('/'):
url = 'file:/'
src = '%s%s' % (url, self.tarSrcLoc)
if src.startswith('file://'):
src = src[len('file://')-1:]
cpCmd = '/bin/cp'
cmd = '%s %s %s' % (cpCmd, src, destDir)
self.log.debug('Command to execute: %s' % cmd)
copyProc = simpleCommand('remote copy', cmd)
copyProc.start()
copyProc.wait()
copyProc.join()
ret = copyProc.exit_code()
self.log.debug('Completed command execution. Exit Code: %s.' % ret)
if ret != 0:
output = copyProc.output()
raise Exception('Could not copy tarball using command %s. Exit code: %s. Output: %s'
% (cmd, ret, output))
else:
raise Exception('Unsupported URL for file: %s' % src)
# input: http://hostname:port/. output: [hostname,port]
def __url_to_addr(self, url):
addr = url.rstrip('/')
if addr.startswith('http://'):
addr = addr.replace('http://', '', 1)
addr_parts = addr.split(':')
return [addr_parts[0], int(addr_parts[1])]
def __initialize_signal_handlers(self):
def sigStop(sigNum, handler):
sig_wrapper(sigNum, self.stop)
signal.signal(signal.SIGTERM, sigStop)
signal.signal(signal.SIGINT, sigStop)
signal.signal(signal.SIGQUIT, sigStop)
def __clean_up(self):
tempDir = self.__get_tempdir()
os.chdir(os.path.split(tempDir)[0])
if os.path.exists(tempDir):
shutil.rmtree(tempDir, True)
self.log.debug("Cleaned up temporary dir: %s" % tempDir)
def __get_tempdir(self):
dir = os.path.join(self.cfg['ringmaster']['temp-dir'],
"%s.%s.ringmaster" % (self.cfg['ringmaster']['userid'],
self.np.getServiceId()))
return dir
def getWorkDirs(self, cfg, reUse=False):
if (not reUse) or (self.workDirs == None):
import math
frand = random.random()
while math.ceil(frand) != math.floor(frand):
frand = frand * 100
irand = int(frand)
uniq = '%s-%d-%s' % (socket.gethostname(), os.getpid(), irand)
dirs = []
parentDirs = cfg['ringmaster']['work-dirs']
for p in parentDirs:
dir = os.path.join(p, uniq)
dirs.append(dir)
self.workDirs = dirs
return self.workDirs
def _fetchLink(self, link, parentDir):
parser = miniHTMLParser()
self.log.debug("Checking link %s" %link)
while link:
# Get the file from the site and link
input = urllib.urlopen(link)
out = None
contentType = input.info().gettype()
isHtml = contentType == 'text/html'
#print contentType
if isHtml:
parser.setBaseUrl(input.geturl())
else:
parsed = urlparse.urlparse(link)
hp = parsed[1]
h = hp
p = None
if hp.find(':') != -1:
h, p = hp.split(':', 1)
path = parsed[2]
path = path.split('/')
file = os.path.join(parentDir, h, p)
for c in path:
if c == '':
continue
file = os.path.join(file, c)
try:
self.log.debug('Creating %s' % file)
dir, tail = os.path.split(file)
if not os.path.exists(dir):
os.makedirs(dir)
except:
self.log.debug(get_exception_string())
out = open(file, 'w')
bufSz = 8192
buf = input.read(bufSz)
while len(buf) > 0:
if isHtml:
# Feed the file into the HTML parser
parser.feed(buf)
if out:
out.write(buf)
buf = input.read(bufSz)
input.close()
if out:
out.close()
# Search the retfile here
# Get the next link in level traversal order
link = parser.getNextLink()
parser.close()
def _finalize(self):
try:
# FIXME: get dir from config
dir = 'HOD-log-P%d' % (os.getpid())
dir = os.path.join('.', dir)
except:
self.log.debug(get_exception_string())
self.np.finalize()
def handleIdleJobTracker(self):
self.log.critical("Detected idle job tracker for %s seconds. The allocation will be cleaned up." \
% self.cfg['ringmaster']['idleness-limit'])
self.__idlenessDetected = True
def cd_to_tempdir(self):
dir = self.__get_tempdir()
if not os.path.exists(dir):
os.makedirs(dir)
os.chdir(dir)
return dir
def getWorkload(self):
return self.workload
def getHostName(self):
return self.__hostname
def start(self):
"""run the thread main loop"""
self.log.debug("Entered start method.")
hodring = os.path.join(self.cfg['ringmaster']['base-dir'],
'bin', 'hodring')
largs = [hodring]
targs = self.cfg.get_args(section='hodring')
largs.extend(targs)
hodringCmd = ""
for item in largs:
hodringCmd = "%s%s " % (hodringCmd, item)
self.log.debug(hodringCmd)
if self.np.runWorkers(largs) > 0:
self.log.critical("Failed to start worker.")
self.log.debug("Returned from runWorkers.")
self._finalize()
def __findExitCode(self):
"""Determine the exit code based on the status of the cluster or jobs run on them"""
xmlrpcServer = ringMasterServer.instance.logMasterSources
if xmlrpcServer.getServiceAddr('hdfs') == 'not found' or \
xmlrpcServer.getServiceAddr('hdfs').startswith("Error: "):
self.__exitCode = 7
elif xmlrpcServer.getServiceAddr('mapred') == 'not found' or \
xmlrpcServer.getServiceAddr('mapred').startswith("Error: "):
self.__exitCode = 8
else:
clusterStatus = get_cluster_status(xmlrpcServer.getServiceAddr('hdfs'),
xmlrpcServer.getServiceAddr('mapred'))
if clusterStatus != 0:
self.__exitCode = clusterStatus
else:
self.__exitCode = self.__findHadoopJobsExitCode()
self.log.debug('exit code %s' % self.__exitCode)
def __findHadoopJobsExitCode(self):
"""Determine the consolidate exit code of hadoop jobs run on this cluster, provided
this information is available. Return 0 otherwise"""
ret = 0
failureStatus = 3
failureCount = 0
if self.__jtMonitor:
jobStatusList = self.__jtMonitor.getJobsStatus()
try:
if len(jobStatusList) > 0:
for jobStatus in jobStatusList:
self.log.debug('job status for %s: %s' % (jobStatus.getJobId(),
jobStatus.getStatus()))
if jobStatus.getStatus() == failureStatus:
failureCount = failureCount+1
if failureCount > 0:
if failureCount == len(jobStatusList): # all jobs failed
ret = 16
else:
ret = 17
except:
self.log.debug('exception in finding hadoop jobs exit code' % get_exception_string())
return ret
def stop(self):
self.log.debug("RingMaster stop method invoked.")
if self.__stopInProgress or self.__isStopped:
return
self.__stopInProgress = True
if ringMasterServer.instance is not None:
self.log.debug('finding exit code')
self.__findExitCode()
self.log.debug('stopping ringmaster instance')
ringMasterServer.stopService()
else:
self.__exitCode = 6
if self.__jtMonitor is not None:
self.__jtMonitor.stop()
if self.httpServer:
self.httpServer.stop()
self.__clean_up()
self.__isStopped = True
def shouldStop(self):
"""Indicates whether the main loop should exit, either due to idleness condition,
or a stop signal was received"""
return self.__idlenessDetected or self.__isStopped
def getExitCode(self):
"""return the exit code of the program"""
return self.__exitCode
def main(cfg,log):
try:
rm = None
dGen = DescGenerator(cfg)
cfg = dGen.initializeDesc()
rm = RingMaster(cfg, log)
rm.start()
while not rm.shouldStop():
time.sleep(1)
rm.stop()
log.debug('returning from main')
return rm.getExitCode()
except Exception, e:
if log:
log.critical(get_exception_string())
raise Exception(e)
|
|
"""
Matrix Market I/O in Python.
See http://math.nist.gov/MatrixMarket/formats.html
for information about the Matrix Market format.
"""
#
# Author: Pearu Peterson <pearu@cens.ioc.ee>
# Created: October, 2004
#
# References:
# http://math.nist.gov/MatrixMarket/
#
import os
import sys
from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate,
ones, can_cast)
from numpy.compat import asbytes, asstr
from scipy.sparse import coo_matrix, isspmatrix
__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile']
# -----------------------------------------------------------------------------
def mminfo(source):
"""
Return size and storage parameters from Matrix Market file-like 'source'.
Parameters
----------
source : str or file-like
Matrix Market filename (extension .mtx) or open file-like object
Returns
-------
rows : int
Number of matrix rows.
cols : int
Number of matrix columns.
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix.
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symmetry : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
return MMFile.info(source)
# -----------------------------------------------------------------------------
def mmread(source):
"""
Reads the contents of a Matrix Market file-like 'source' into a matrix.
Parameters
----------
source : str or file-like
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file-like object.
Returns
-------
a : ndarray or coo_matrix
Dense or sparse matrix depending on the matrix format in the
Matrix Market file.
"""
return MMFile().read(source)
# -----------------------------------------------------------------------------
def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None):
"""
Writes the sparse or dense array `a` to Matrix Market file-like `target`.
Parameters
----------
target : str or file-like
Matrix Market filename (extension .mtx) or open file-like object.
a : array like
Sparse or dense 2-D array.
comment : str, optional
Comments to be prepended to the Matrix Market file.
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
symmetry : None or str, optional
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
If symmetry is None the symmetry type of 'a' is determined by its
values.
"""
MMFile().write(target, a, comment, field, precision, symmetry)
###############################################################################
class MMFile:
__slots__ = ('_rows',
'_cols',
'_entries',
'_format',
'_field',
'_symmetry')
@property
def rows(self):
return self._rows
@property
def cols(self):
return self._cols
@property
def entries(self):
return self._entries
@property
def format(self):
return self._format
@property
def field(self):
return self._field
@property
def symmetry(self):
return self._symmetry
@property
def has_symmetry(self):
return self._symmetry in (self.SYMMETRY_SYMMETRIC,
self.SYMMETRY_SKEW_SYMMETRIC,
self.SYMMETRY_HERMITIAN)
# format values
FORMAT_COORDINATE = 'coordinate'
FORMAT_ARRAY = 'array'
FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
@classmethod
def _validate_format(self, format):
if format not in self.FORMAT_VALUES:
raise ValueError('unknown format type %s, must be one of %s' %
(format, self.FORMAT_VALUES))
# field values
FIELD_INTEGER = 'integer'
FIELD_UNSIGNED = 'unsigned-integer'
FIELD_REAL = 'real'
FIELD_COMPLEX = 'complex'
FIELD_PATTERN = 'pattern'
FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN)
@classmethod
def _validate_field(self, field):
if field not in self.FIELD_VALUES:
raise ValueError('unknown field type %s, must be one of %s' %
(field, self.FIELD_VALUES))
# symmetry values
SYMMETRY_GENERAL = 'general'
SYMMETRY_SYMMETRIC = 'symmetric'
SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
SYMMETRY_HERMITIAN = 'hermitian'
SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
@classmethod
def _validate_symmetry(self, symmetry):
if symmetry not in self.SYMMETRY_VALUES:
raise ValueError('unknown symmetry type %s, must be one of %s' %
(symmetry, self.SYMMETRY_VALUES))
DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp',
FIELD_UNSIGNED: 'uint64',
FIELD_REAL: 'd',
FIELD_COMPLEX: 'D',
FIELD_PATTERN: 'd'}
# -------------------------------------------------------------------------
@staticmethod
def reader():
pass
# -------------------------------------------------------------------------
@staticmethod
def writer():
pass
# -------------------------------------------------------------------------
@classmethod
def info(self, source):
"""
Return size, storage parameters from Matrix Market file-like 'source'.
Parameters
----------
source : str or file-like
Matrix Market filename (extension .mtx) or open file-like object
Returns
-------
rows : int
Number of matrix rows.
cols : int
Number of matrix columns.
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix.
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symmetry : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
stream, close_it = self._open(source)
try:
# read and validate header line
line = stream.readline()
mmid, matrix, format, field, symmetry = \
[asstr(part.strip()) for part in line.split()]
if not mmid.startswith('%%MatrixMarket'):
raise ValueError('source is not in Matrix Market format')
if not matrix.lower() == 'matrix':
raise ValueError("Problem reading file header: " + line)
# http://math.nist.gov/MatrixMarket/formats.html
if format.lower() == 'array':
format = self.FORMAT_ARRAY
elif format.lower() == 'coordinate':
format = self.FORMAT_COORDINATE
# skip comments
# line.startswith('%')
while line and line[0] in ['%', 37]:
line = stream.readline()
# skip empty lines
while not line.strip():
line = stream.readline()
line = line.split()
if format == self.FORMAT_ARRAY:
if not len(line) == 2:
raise ValueError("Header line not of length 2: " + line)
rows, cols = map(int, line)
entries = rows * cols
else:
if not len(line) == 3:
raise ValueError("Header line not of length 3: " + line)
rows, cols, entries = map(int, line)
return (rows, cols, entries, format, field.lower(),
symmetry.lower())
finally:
if close_it:
stream.close()
# -------------------------------------------------------------------------
@staticmethod
def _open(filespec, mode='rb'):
""" Return an open file stream for reading based on source.
If source is a file name, open it (after trying to find it with mtx and
gzipped mtx extensions). Otherwise, just return source.
Parameters
----------
filespec : str or file-like
String giving file name or file-like object
mode : str, optional
Mode with which to open file, if `filespec` is a file name.
Returns
-------
fobj : file-like
Open file-like object.
close_it : bool
True if the calling function should close this file when done,
false otherwise.
"""
# If 'filespec' is path-like (str, pathlib.Path, os.DirEntry, other class
# implementing a '__fspath__' method), try to convert it to str. If this
# fails by throwing a 'TypeError', assume it's an open file handle and
# return it as-is.
try:
filespec = os.fspath(filespec)
except TypeError:
return filespec, False
# 'filespec' is definitely a str now
# open for reading
if mode[0] == 'r':
# determine filename plus extension
if not os.path.isfile(filespec):
if os.path.isfile(filespec+'.mtx'):
filespec = filespec + '.mtx'
elif os.path.isfile(filespec+'.mtx.gz'):
filespec = filespec + '.mtx.gz'
elif os.path.isfile(filespec+'.mtx.bz2'):
filespec = filespec + '.mtx.bz2'
# open filename
if filespec.endswith('.gz'):
import gzip
stream = gzip.open(filespec, mode)
elif filespec.endswith('.bz2'):
import bz2
stream = bz2.BZ2File(filespec, 'rb')
else:
stream = open(filespec, mode)
# open for writing
else:
if filespec[-4:] != '.mtx':
filespec = filespec + '.mtx'
stream = open(filespec, mode)
return stream, True
# -------------------------------------------------------------------------
@staticmethod
def _get_symmetry(a):
m, n = a.shape
if m != n:
return MMFile.SYMMETRY_GENERAL
issymm = True
isskew = True
isherm = a.dtype.char in 'FD'
# sparse input
if isspmatrix(a):
# check if number of nonzero entries of lower and upper triangle
# matrix are equal
a = a.tocoo()
(row, col) = a.nonzero()
if (row < col).sum() != (row > col).sum():
return MMFile.SYMMETRY_GENERAL
# define iterator over symmetric pair entries
a = a.todok()
def symm_iterator():
for ((i, j), aij) in a.items():
if i > j:
aji = a[j, i]
yield (aij, aji)
# non-sparse input
else:
# define iterator over symmetric pair entries
def symm_iterator():
for j in range(n):
for i in range(j+1, n):
aij, aji = a[i][j], a[j][i]
yield (aij, aji)
# check for symmetry
for (aij, aji) in symm_iterator():
if issymm and aij != aji:
issymm = False
if isskew and aij != -aji:
isskew = False
if isherm and aij != conj(aji):
isherm = False
if not (issymm or isskew or isherm):
break
# return symmetry value
if issymm:
return MMFile.SYMMETRY_SYMMETRIC
if isskew:
return MMFile.SYMMETRY_SKEW_SYMMETRIC
if isherm:
return MMFile.SYMMETRY_HERMITIAN
return MMFile.SYMMETRY_GENERAL
# -------------------------------------------------------------------------
@staticmethod
def _field_template(field, precision):
return {MMFile.FIELD_REAL: '%%.%ie\n' % precision,
MMFile.FIELD_INTEGER: '%i\n',
MMFile.FIELD_UNSIGNED: '%u\n',
MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' %
(precision, precision)
}.get(field, None)
# -------------------------------------------------------------------------
def __init__(self, **kwargs):
self._init_attrs(**kwargs)
# -------------------------------------------------------------------------
def read(self, source):
"""
Reads the contents of a Matrix Market file-like 'source' into a matrix.
Parameters
----------
source : str or file-like
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file object.
Returns
-------
a : ndarray or coo_matrix
Dense or sparse matrix depending on the matrix format in the
Matrix Market file.
"""
stream, close_it = self._open(source)
try:
self._parse_header(stream)
return self._parse_body(stream)
finally:
if close_it:
stream.close()
# -------------------------------------------------------------------------
def write(self, target, a, comment='', field=None, precision=None,
symmetry=None):
"""
Writes sparse or dense array `a` to Matrix Market file-like `target`.
Parameters
----------
target : str or file-like
Matrix Market filename (extension .mtx) or open file-like object.
a : array like
Sparse or dense 2-D array.
comment : str, optional
Comments to be prepended to the Matrix Market file.
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
symmetry : None or str, optional
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
If symmetry is None the symmetry type of 'a' is determined by its
values.
"""
stream, close_it = self._open(target, 'wb')
try:
self._write(stream, a, comment, field, precision, symmetry)
finally:
if close_it:
stream.close()
else:
stream.flush()
# -------------------------------------------------------------------------
def _init_attrs(self, **kwargs):
"""
Initialize each attributes with the corresponding keyword arg value
or a default of None
"""
attrs = self.__class__.__slots__
public_attrs = [attr[1:] for attr in attrs]
invalid_keys = set(kwargs.keys()) - set(public_attrs)
if invalid_keys:
raise ValueError('''found %s invalid keyword arguments, please only
use %s''' % (tuple(invalid_keys),
public_attrs))
for attr in attrs:
setattr(self, attr, kwargs.get(attr[1:], None))
# -------------------------------------------------------------------------
def _parse_header(self, stream):
rows, cols, entries, format, field, symmetry = \
self.__class__.info(stream)
self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
field=field, symmetry=symmetry)
# -------------------------------------------------------------------------
def _parse_body(self, stream):
rows, cols, entries, format, field, symm = (self.rows, self.cols,
self.entries, self.format,
self.field, self.symmetry)
try:
from scipy.sparse import coo_matrix
except ImportError:
coo_matrix = None
dtype = self.DTYPES_BY_FIELD.get(field, None)
has_symmetry = self.has_symmetry
is_integer = field == self.FIELD_INTEGER
is_unsigned_integer = field == self.FIELD_UNSIGNED
is_complex = field == self.FIELD_COMPLEX
is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
is_herm = symm == self.SYMMETRY_HERMITIAN
is_pattern = field == self.FIELD_PATTERN
if format == self.FORMAT_ARRAY:
a = zeros((rows, cols), dtype=dtype)
line = 1
i, j = 0, 0
if is_skew:
a[i, j] = 0
if i < rows - 1:
i += 1
while line:
line = stream.readline()
# line.startswith('%')
if not line or line[0] in ['%', 37] or not line.strip():
continue
if is_integer:
aij = int(line)
elif is_unsigned_integer:
aij = int(line)
elif is_complex:
aij = complex(*map(float, line.split()))
else:
aij = float(line)
a[i, j] = aij
if has_symmetry and i != j:
if is_skew:
a[j, i] = -aij
elif is_herm:
a[j, i] = conj(aij)
else:
a[j, i] = aij
if i < rows-1:
i = i + 1
else:
j = j + 1
if not has_symmetry:
i = 0
else:
i = j
if is_skew:
a[i, j] = 0
if i < rows-1:
i += 1
if is_skew:
if not (i in [0, j] and j == cols - 1):
raise ValueError("Parse error, did not read all lines.")
else:
if not (i in [0, j] and j == cols):
raise ValueError("Parse error, did not read all lines.")
elif format == self.FORMAT_COORDINATE and coo_matrix is None:
# Read sparse matrix to dense when coo_matrix is not available.
a = zeros((rows, cols), dtype=dtype)
line = 1
k = 0
while line:
line = stream.readline()
# line.startswith('%')
if not line or line[0] in ['%', 37] or not line.strip():
continue
l = line.split()
i, j = map(int, l[:2])
i, j = i-1, j-1
if is_integer:
aij = int(l[2])
elif is_unsigned_integer:
aij = int(l[2])
elif is_complex:
aij = complex(*map(float, l[2:]))
else:
aij = float(l[2])
a[i, j] = aij
if has_symmetry and i != j:
if is_skew:
a[j, i] = -aij
elif is_herm:
a[j, i] = conj(aij)
else:
a[j, i] = aij
k = k + 1
if not k == entries:
ValueError("Did not read all entries")
elif format == self.FORMAT_COORDINATE:
# Read sparse COOrdinate format
if entries == 0:
# empty matrix
return coo_matrix((rows, cols), dtype=dtype)
I = zeros(entries, dtype='intc')
J = zeros(entries, dtype='intc')
if is_pattern:
V = ones(entries, dtype='int8')
elif is_integer:
V = zeros(entries, dtype='intp')
elif is_unsigned_integer:
V = zeros(entries, dtype='uint64')
elif is_complex:
V = zeros(entries, dtype='complex')
else:
V = zeros(entries, dtype='float')
entry_number = 0
for line in stream:
# line.startswith('%')
if not line or line[0] in ['%', 37] or not line.strip():
continue
if entry_number+1 > entries:
raise ValueError("'entries' in header is smaller than "
"number of entries")
l = line.split()
I[entry_number], J[entry_number] = map(int, l[:2])
if not is_pattern:
if is_integer:
V[entry_number] = int(l[2])
elif is_unsigned_integer:
V[entry_number] = int(l[2])
elif is_complex:
V[entry_number] = complex(*map(float, l[2:]))
else:
V[entry_number] = float(l[2])
entry_number += 1
if entry_number < entries:
raise ValueError("'entries' in header is larger than "
"number of entries")
I -= 1 # adjust indices (base 1 -> base 0)
J -= 1
if has_symmetry:
mask = (I != J) # off diagonal mask
od_I = I[mask]
od_J = J[mask]
od_V = V[mask]
I = concatenate((I, od_J))
J = concatenate((J, od_I))
if is_skew:
od_V *= -1
elif is_herm:
od_V = od_V.conjugate()
V = concatenate((V, od_V))
a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
else:
raise NotImplementedError(format)
return a
# ------------------------------------------------------------------------
def _write(self, stream, a, comment='', field=None, precision=None,
symmetry=None):
if isinstance(a, list) or isinstance(a, ndarray) or \
isinstance(a, tuple) or hasattr(a, '__array__'):
rep = self.FORMAT_ARRAY
a = asarray(a)
if len(a.shape) != 2:
raise ValueError('Expected 2 dimensional array')
rows, cols = a.shape
if field is not None:
if field == self.FIELD_INTEGER:
if not can_cast(a.dtype, 'intp'):
raise OverflowError("mmwrite does not support integer "
"dtypes larger than native 'intp'.")
a = a.astype('intp')
elif field == self.FIELD_REAL:
if a.dtype.char not in 'fd':
a = a.astype('d')
elif field == self.FIELD_COMPLEX:
if a.dtype.char not in 'FD':
a = a.astype('D')
else:
if not isspmatrix(a):
raise ValueError('unknown matrix type: %s' % type(a))
rep = 'coordinate'
rows, cols = a.shape
typecode = a.dtype.char
if precision is None:
if typecode in 'fF':
precision = 8
else:
precision = 16
if field is None:
kind = a.dtype.kind
if kind == 'i':
if not can_cast(a.dtype, 'intp'):
raise OverflowError("mmwrite does not support integer "
"dtypes larger than native 'intp'.")
field = 'integer'
elif kind == 'f':
field = 'real'
elif kind == 'c':
field = 'complex'
elif kind == 'u':
field = 'unsigned-integer'
else:
raise TypeError('unexpected dtype kind ' + kind)
if symmetry is None:
symmetry = self._get_symmetry(a)
# validate rep, field, and symmetry
self.__class__._validate_format(rep)
self.__class__._validate_field(field)
self.__class__._validate_symmetry(symmetry)
# write initial header line
stream.write(asbytes('%%MatrixMarket matrix {0} {1} {2}\n'.format(rep,
field, symmetry)))
# write comments
for line in comment.split('\n'):
stream.write(asbytes('%%%s\n' % (line)))
template = self._field_template(field, precision)
# write dense format
if rep == self.FORMAT_ARRAY:
# write shape spec
stream.write(asbytes('%i %i\n' % (rows, cols)))
if field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED):
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
stream.write(asbytes(template % a[i, j]))
elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC:
for j in range(cols):
for i in range(j + 1, rows):
stream.write(asbytes(template % a[i, j]))
else:
for j in range(cols):
for i in range(j, rows):
stream.write(asbytes(template % a[i, j]))
elif field == self.FIELD_COMPLEX:
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
aij = a[i, j]
stream.write(asbytes(template % (real(aij),
imag(aij))))
else:
for j in range(cols):
for i in range(j, rows):
aij = a[i, j]
stream.write(asbytes(template % (real(aij),
imag(aij))))
elif field == self.FIELD_PATTERN:
raise ValueError('pattern type inconsisted with dense format')
else:
raise TypeError('Unknown field type %s' % field)
# write sparse format
else:
coo = a.tocoo() # convert to COOrdinate format
# if symmetry format used, remove values above main diagonal
if symmetry != self.SYMMETRY_GENERAL:
lower_triangle_mask = coo.row >= coo.col
coo = coo_matrix((coo.data[lower_triangle_mask],
(coo.row[lower_triangle_mask],
coo.col[lower_triangle_mask])),
shape=coo.shape)
# write shape spec
stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz)))
template = self._field_template(field, precision-1)
if field == self.FIELD_PATTERN:
for r, c in zip(coo.row+1, coo.col+1):
stream.write(asbytes("%i %i\n" % (r, c)))
elif field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED):
for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
stream.write(asbytes(("%i %i " % (r, c)) +
(template % d)))
elif field == self.FIELD_COMPLEX:
for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
stream.write(asbytes(("%i %i " % (r, c)) +
(template % (d.real, d.imag))))
else:
raise TypeError('Unknown field type %s' % field)
def _is_fromfile_compatible(stream):
"""
Check whether `stream` is compatible with numpy.fromfile.
Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with
Python 3.
"""
bad_cls = []
try:
import gzip
bad_cls.append(gzip.GzipFile)
except ImportError:
pass
try:
import bz2
bad_cls.append(bz2.BZ2File)
except ImportError:
pass
bad_cls = tuple(bad_cls)
return not isinstance(stream, bad_cls)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import time
for filename in sys.argv[1:]:
print('Reading', filename, '...', end=' ')
sys.stdout.flush()
t = time.time()
mmread(filename)
print('took %s seconds' % (time.time() - t))
|
|
"""
sentry.tasks.deletion
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from celery.utils.log import get_task_logger
from sentry.exceptions import DeleteAborted
from sentry.signals import pending_delete
from sentry.tasks.base import instrumented_task, retry
from sentry.utils.query import bulk_delete_objects
logger = get_task_logger(__name__)
@instrumented_task(name='sentry.tasks.deletion.delete_organization', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_organization(object_id, continuous=True, **kwargs):
from sentry.models import (
Organization, OrganizationMember, OrganizationStatus, Team, TeamStatus
)
try:
o = Organization.objects.get(id=object_id)
except Organization.DoesNotExist:
return
if o.status == OrganizationStatus.VISIBLE:
raise DeleteAborted('Aborting organization deletion as status is invalid')
if o.status != OrganizationStatus.DELETION_IN_PROGRESS:
o.update(status=OrganizationStatus.DELETION_IN_PROGRESS)
pending_delete.send(sender=Organization, instance=o)
for team in Team.objects.filter(organization=o).order_by('id')[:1]:
logger.info('Removing Team id=%s where organization=%s', team.id, o.id)
team.update(status=TeamStatus.DELETION_IN_PROGRESS)
delete_team(team.id, continuous=False)
if continuous:
delete_organization.delay(object_id=object_id, countdown=15)
return
model_list = (OrganizationMember,)
has_more = delete_objects(model_list, relation={'organization': o}, logger=logger)
if has_more:
if continuous:
delete_organization.delay(object_id=object_id, countdown=15)
return
o.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_team', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_team(object_id, continuous=True, **kwargs):
from sentry.models import Team, TeamStatus, Project, ProjectStatus
try:
t = Team.objects.get(id=object_id)
except Team.DoesNotExist:
return
if t.status == TeamStatus.VISIBLE:
raise DeleteAborted('Aborting team deletion as status is invalid')
if t.status != TeamStatus.DELETION_IN_PROGRESS:
pending_delete.send(sender=Team, instance=t)
t.update(status=TeamStatus.DELETION_IN_PROGRESS)
# Delete 1 project at a time since this is expensive by itself
for project in Project.objects.filter(team=t).order_by('id')[:1]:
logger.info('Removing Project id=%s where team=%s', project.id, t.id)
project.update(status=ProjectStatus.DELETION_IN_PROGRESS)
delete_project(project.id, continuous=False)
if continuous:
delete_team.delay(object_id=object_id, countdown=15)
return
t.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_project', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_project(object_id, continuous=True, **kwargs):
from sentry.models import (
Activity, EventMapping, Group, GroupAssignee, GroupBookmark,
GroupEmailThread, GroupHash, GroupMeta, GroupResolution,
GroupRuleStatus, GroupSeen, GroupTagKey, GroupTagValue, Project,
ProjectKey, ProjectStatus, SavedSearchUserDefault, SavedSearch, TagKey,
TagValue, UserReport
)
try:
p = Project.objects.get(id=object_id)
except Project.DoesNotExist:
return
if p.status == ProjectStatus.VISIBLE:
raise DeleteAborted('Aborting project deletion as status is invalid')
if p.status != ProjectStatus.DELETION_IN_PROGRESS:
pending_delete.send(sender=Project, instance=p)
p.update(status=ProjectStatus.DELETION_IN_PROGRESS)
# XXX: remove keys first to prevent additional data from flowing in
model_list = (
Activity, EventMapping, GroupAssignee, GroupBookmark, GroupEmailThread,
GroupHash, GroupSeen, GroupRuleStatus, GroupTagKey,
GroupTagValue, ProjectKey, TagKey, TagValue, SavedSearchUserDefault,
SavedSearch, UserReport
)
for model in model_list:
has_more = bulk_delete_objects(model, project_id=p.id, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
# TODO(dcramer): no project relation so we cant easily bulk
# delete today
has_more = delete_objects([GroupMeta, GroupResolution],
relation={'group__project': p},
logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
has_more = delete_events(relation={'project_id': p.id}, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
model_list = (Group,)
for model in model_list:
has_more = bulk_delete_objects(model, project_id=p.id, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
p.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_group', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_group(object_id, continuous=True, **kwargs):
from sentry.models import (
EventMapping, Group, GroupAssignee, GroupBookmark, GroupHash, GroupMeta,
GroupResolution, GroupRuleStatus, GroupStatus, GroupTagKey,
GroupTagValue, GroupEmailThread, UserReport
)
try:
group = Group.objects.get(id=object_id)
except Group.DoesNotExist:
return
if group.status != GroupStatus.DELETION_IN_PROGRESS:
group.update(status=GroupStatus.DELETION_IN_PROGRESS)
bulk_model_list = (
GroupAssignee, GroupBookmark, GroupHash, GroupMeta, GroupResolution,
GroupRuleStatus, GroupTagValue, GroupTagKey, EventMapping,
GroupEmailThread, UserReport
)
for model in bulk_model_list:
has_more = bulk_delete_objects(model, group_id=object_id, logger=logger)
if has_more:
if continuous:
delete_group.delay(object_id=object_id, countdown=15)
return
has_more = delete_events(relation={'group_id': object_id}, logger=logger)
if has_more:
if continuous:
delete_group.delay(object_id=object_id, countdown=15)
return
group.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_tag_key', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_tag_key(object_id, continuous=True, **kwargs):
from sentry.models import (
GroupTagKey, GroupTagValue, TagKey, TagKeyStatus, TagValue
)
try:
tagkey = TagKey.objects.get(id=object_id)
except TagKey.DoesNotExist:
return
if tagkey.status != TagKeyStatus.DELETION_IN_PROGRESS:
tagkey.update(status=TagKeyStatus.DELETION_IN_PROGRESS)
bulk_model_list = (
GroupTagValue, GroupTagKey, TagValue
)
for model in bulk_model_list:
has_more = bulk_delete_objects(model, project_id=tagkey.project_id,
key=tagkey.key, logger=logger)
if has_more:
if continuous:
delete_tag_key.delay(object_id=object_id, countdown=15)
return
tagkey.delete()
def delete_events(relation, limit=100, logger=None):
from sentry.app import nodestore
from sentry.models import Event
has_more = False
if logger is not None:
logger.info('Removing %r objects where %r', Event, relation)
result_set = list(Event.objects.filter(**relation)[:limit])
has_more = bool(result_set)
if has_more:
# delete objects from nodestore first
node_ids = set(r.data.id for r in result_set)
nodestore.delete_multi(node_ids)
# bulk delete by id
Event.objects.filter(id__in=[r.id for r in result_set]).delete()
return has_more
def delete_objects(models, relation, limit=100, logger=None):
# This handles cascades properly
has_more = False
for model in models:
if logger is not None:
logger.info('Removing %r objects where %r', model, relation)
for obj in model.objects.filter(**relation)[:limit]:
obj.delete()
has_more = True
if has_more:
return True
return has_more
|
|
"""
Sensors on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/sensor.zha/
"""
import logging
from homeassistant.components.sensor import DOMAIN
from homeassistant.components.zha import helpers
from homeassistant.components.zha.const import (
DATA_ZHA, DATA_ZHA_DISPATCHERS, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT, REPORT_CONFIG_RPT_CHANGE, ZHA_DISCOVERY_NEW)
from homeassistant.components.zha.entities import ZhaEntity
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.temperature import convert as convert_temperature
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['zha']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Old way of setting up Zigbee Home Automation sensors."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation sensor from config entry."""
async def async_discover(discovery_info):
await _async_setup_entities(hass, config_entry, async_add_entities,
[discovery_info])
unsub = async_dispatcher_connect(
hass, ZHA_DISCOVERY_NEW.format(DOMAIN), async_discover)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
sensors = hass.data.get(DATA_ZHA, {}).get(DOMAIN)
if sensors is not None:
await _async_setup_entities(hass, config_entry, async_add_entities,
sensors.values())
del hass.data[DATA_ZHA][DOMAIN]
async def _async_setup_entities(hass, config_entry, async_add_entities,
discovery_infos):
"""Set up the ZHA sensors."""
entities = []
for discovery_info in discovery_infos:
entities.append(await make_sensor(discovery_info))
async_add_entities(entities, update_before_add=True)
async def make_sensor(discovery_info):
"""Create ZHA sensors factory."""
from zigpy.zcl.clusters.measurement import (
RelativeHumidity, TemperatureMeasurement, PressureMeasurement,
IlluminanceMeasurement
)
from zigpy.zcl.clusters.smartenergy import Metering
from zigpy.zcl.clusters.homeautomation import ElectricalMeasurement
from zigpy.zcl.clusters.general import PowerConfiguration
in_clusters = discovery_info['in_clusters']
if 'sub_component' in discovery_info:
sensor = discovery_info['sub_component'](**discovery_info)
elif RelativeHumidity.cluster_id in in_clusters:
sensor = RelativeHumiditySensor(**discovery_info)
elif PowerConfiguration.cluster_id in in_clusters:
sensor = GenericBatterySensor(**discovery_info)
elif TemperatureMeasurement.cluster_id in in_clusters:
sensor = TemperatureSensor(**discovery_info)
elif PressureMeasurement.cluster_id in in_clusters:
sensor = PressureSensor(**discovery_info)
elif IlluminanceMeasurement.cluster_id in in_clusters:
sensor = IlluminanceMeasurementSensor(**discovery_info)
elif Metering.cluster_id in in_clusters:
sensor = MeteringSensor(**discovery_info)
elif ElectricalMeasurement.cluster_id in in_clusters:
sensor = ElectricalMeasurementSensor(**discovery_info)
return sensor
else:
sensor = Sensor(**discovery_info)
return sensor
class Sensor(ZhaEntity):
"""Base ZHA sensor."""
_domain = DOMAIN
value_attribute = 0
min_report_interval = REPORT_CONFIG_MIN_INT
max_report_interval = REPORT_CONFIG_MAX_INT
min_reportable_change = REPORT_CONFIG_RPT_CHANGE
report_config = (min_report_interval, max_report_interval,
min_reportable_change)
def __init__(self, **kwargs):
"""Init ZHA Sensor instance."""
super().__init__(**kwargs)
self._cluster = list(kwargs['in_clusters'].values())[0]
@property
def zcl_reporting_config(self) -> dict:
"""Return a dict of attribute reporting configuration."""
return {
self.cluster: {self.value_attribute: self.report_config}
}
@property
def cluster(self):
"""Return Sensor's cluster."""
return self._cluster
@property
def state(self) -> str:
"""Return the state of the entity."""
if isinstance(self._state, float):
return str(round(self._state, 2))
return self._state
def attribute_updated(self, attribute, value):
"""Handle attribute update from device."""
_LOGGER.debug("Attribute updated: %s %s %s", self, attribute, value)
if attribute == self.value_attribute:
self._state = value
self.async_schedule_update_ha_state()
async def async_update(self):
"""Retrieve latest state."""
result = await helpers.safe_read(
self.cluster,
[self.value_attribute],
allow_cache=False,
only_cache=(not self._initialized)
)
self._state = result.get(self.value_attribute, self._state)
class GenericBatterySensor(Sensor):
"""ZHA generic battery sensor."""
report_attribute = 32
value_attribute = 33
battery_sizes = {
0: 'No battery',
1: 'Built in',
2: 'Other',
3: 'AA',
4: 'AAA',
5: 'C',
6: 'D',
7: 'CR2',
8: 'CR123A',
9: 'CR2450',
10: 'CR2032',
11: 'CR1632',
255: 'Unknown'
}
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return '%'
@property
def zcl_reporting_config(self) -> dict:
"""Return a dict of attribute reporting configuration."""
return {
self.cluster: {
self.value_attribute: self.report_config,
self.report_attribute: self.report_config
}
}
async def async_update(self):
"""Retrieve latest state."""
_LOGGER.debug("%s async_update", self.entity_id)
result = await helpers.safe_read(
self._endpoint.power,
[
'battery_size',
'battery_quantity',
'battery_percentage_remaining'
],
allow_cache=False,
only_cache=(not self._initialized)
)
self._device_state_attributes['battery_size'] = self.battery_sizes.get(
result.get('battery_size', 255), 'Unknown')
self._device_state_attributes['battery_quantity'] = result.get(
'battery_quantity', 'Unknown')
self._state = result.get('battery_percentage_remaining', self._state)
@property
def state(self):
"""Return the state of the entity."""
if self._state == 'unknown' or self._state is None:
return None
return self._state
class TemperatureSensor(Sensor):
"""ZHA temperature sensor."""
min_reportable_change = 50 # 0.5'C
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self.hass.config.units.temperature_unit
@property
def state(self):
"""Return the state of the entity."""
if self._state is None:
return None
celsius = self._state / 100
return round(convert_temperature(celsius,
TEMP_CELSIUS,
self.unit_of_measurement),
1)
class RelativeHumiditySensor(Sensor):
"""ZHA relative humidity sensor."""
min_reportable_change = 50 # 0.5%
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return '%'
@property
def state(self):
"""Return the state of the entity."""
if self._state is None:
return None
return round(float(self._state) / 100, 1)
class PressureSensor(Sensor):
"""ZHA pressure sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return 'hPa'
@property
def state(self):
"""Return the state of the entity."""
if self._state is None:
return None
return round(float(self._state))
class IlluminanceMeasurementSensor(Sensor):
"""ZHA lux sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return 'lx'
@property
def state(self):
"""Return the state of the entity."""
return self._state
class MeteringSensor(Sensor):
"""ZHA Metering sensor."""
value_attribute = 1024
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return 'W'
@property
def state(self):
"""Return the state of the entity."""
if self._state is None:
return None
return round(float(self._state))
class ElectricalMeasurementSensor(Sensor):
"""ZHA Electrical Measurement sensor."""
value_attribute = 1291
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return 'W'
@property
def force_update(self) -> bool:
"""Force update this entity."""
return True
@property
def state(self):
"""Return the state of the entity."""
if self._state is None:
return None
return round(float(self._state) / 10, 1)
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return True
async def async_update(self):
"""Retrieve latest state."""
_LOGGER.debug("%s async_update", self.entity_id)
result = await helpers.safe_read(
self.cluster, ['active_power'],
allow_cache=False, only_cache=(not self._initialized))
self._state = result.get('active_power', self._state)
|
|
from django.utils.translation import ugettext as _
from corehq.apps.groups.models import Group
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.api.es import CaseES
from corehq.apps.reports.standard import CustomProjectReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
from corehq.util.timezones.conversions import PhoneTime
from dimagi.utils.decorators.memoized import memoized
from corehq.elastic import stream_es_query, ES_URLS
from custom.bihar.reports.display import MCHMotherDisplay, MCHChildDisplay
from custom.bihar.utils import get_all_owner_ids_from_group
class MCHBaseReport(CustomProjectReport, CaseListReport):
ajax_pagination = True
asynchronous = True
exportable = True
exportable_all = True
emailable = False
fix_left_col = True
report_template_path = "bihar/reports/report.html"
model = None
fields = [
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.select.SelectOpenCloseFilter',
]
@property
def case_filter(self):
group_id = self.request_params.get('group', '')
filters = []
if group_id:
group = Group.get(group_id)
users_in_group = get_all_owner_ids_from_group(group)
if users_in_group:
or_stm = []
for user_id in users_in_group:
or_stm.append({'term': {'owner_id': user_id}})
filters.append({"or": or_stm})
else:
filters.append({'term': {'owner_id': group_id}})
return {'and': filters} if filters else {}
@property
@memoized
def case_es(self):
return CaseES(self.domain)
@property
@memoized
def rendered_report_title(self):
return self.name
def date_to_json(self, date):
if date:
return (PhoneTime(date, self.timezone).user_time(self.timezone)
.ui_string('%d/%m/%Y'))
else:
return ''
@property
def get_all_rows(self):
query_results = stream_es_query(q=self.es_query, es_url=ES_URLS["cases"], size=999999, chunksize=100)
case_displays = (self.model(self, self.get_case(case))
for case in query_results)
return self.get_cases(case_displays)
def build_query(self, case_type=None, afilter=None, status=None, owner_ids=None, user_ids=None, search_string=None):
def _domain_term():
return {"term": {"domain.exact": self.domain}}
subterms = [_domain_term(), afilter] if afilter else [_domain_term()]
if case_type:
subterms.append({"term": {"type.exact": case_type}})
if status:
subterms.append({"term": {"closed": (status == 'closed')}})
and_block = {'and': subterms} if subterms else {}
es_query = {
'query': {
'filtered': {
'query': {"match_all": {}},
'filter': and_block
}
},
'sort': self.get_sorting_block(),
'from': self.pagination.start,
'size': self.pagination.count,
}
return es_query
@property
@memoized
def es_query(self):
query = self.build_query(case_type=self.case_type, afilter=self.case_filter,
status=self.case_status)
return query
@property
def rows(self):
case_displays = (self.model(self, self.get_case(case))
for case in self.es_results['hits'].get('hits', []))
return self.get_cases(case_displays)
@property
def export_table(self):
table = super(MCHBaseReport, self).export_table
# remove first row from table headers
table[0][1].pop(0)
return table
class MotherMCHRegister(MCHBaseReport):
name = "Mother MCH register"
slug = "mother_mch_register"
default_case_type = "cc_bihar_pregnancy"
model = MCHMotherDisplay
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("CHW Name")),
DataTablesColumn(_("Mother Name"), sortable=False),
DataTablesColumnGroup(
_("Beneficiary Information"),
DataTablesColumn(_("Husband Name"), sortable=False),
DataTablesColumn(_("City/ward/village"), sortable=False),
DataTablesColumn(_("Full address"), sortable=False),
DataTablesColumn(_("MCTS ID"), sortable=False),
DataTablesColumn(_("Mobile number"), sortable=False),
DataTablesColumn(_("Whose Mobile Number"), sortable=False),
DataTablesColumn(_("Mother DOB / AGE"), sortable=False),
DataTablesColumn(_("JSY beneficiary"), sortable=False),
DataTablesColumn(_("Caste"), sortable=False)),
DataTablesColumnGroup(
_("Provider Information"),
DataTablesColumn(_("ASHA Name"), sortable=False),
DataTablesColumn(_("Asha phone"), sortable=False),
DataTablesColumn(_("AWC Code , AWC name"), sortable=False),
DataTablesColumn(_("AWW name"), sortable=False),
DataTablesColumn(_("AWW phone number"), sortable=False),
DataTablesColumn(_("LMP"), sortable=False),
DataTablesColumn(_("EDD"), sortable=False)),
DataTablesColumnGroup(
_("First ANC (within 12 weeks)"),
DataTablesColumn(_("ANC 1 Date"), sortable=False),
DataTablesColumn(_("ANC 1 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 1 Weight"), sortable=False),
DataTablesColumn(_("ANC Hb"), sortable=False),
DataTablesColumn(_("ANC1 completed within 12 weeks? "), sortable=False)),
DataTablesColumnGroup(
_("Second ANC (14-26 weeks)"),
DataTablesColumn(_("ANC 2 Date"), sortable=False),
DataTablesColumn(_("ANC 2 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 2 Weight"), sortable=False)),
DataTablesColumnGroup(
_("Third ANC (28-34 weeks)"),
DataTablesColumn(_("ANC 3 Date"), sortable=False),
DataTablesColumn(_("ANC 3 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 3 Weight"), sortable=False)),
DataTablesColumnGroup(
_("Fourth ANC (34 weeks to Delivery)"),
DataTablesColumn(_("ANC 4 Date"), sortable=False),
DataTablesColumn(_("ANC 4 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 4 Weight"), sortable=False),
DataTablesColumn(_("TT1 date"), sortable=False),
DataTablesColumn(_("TT2 date"), sortable=False),
DataTablesColumn(_("TT Booster"), sortable=False),
DataTablesColumn(_("Received date of 100 IFA tablets "), sortable=False),
DataTablesColumn(_("Anemia"), sortable=False),
DataTablesColumn(_("Any complications"), sortable=False),
DataTablesColumn(_("RTI /STI <yes/no>"), sortable=False)),
DataTablesColumnGroup(
_("Pregnancy Outcome"),
DataTablesColumn(_("Date of delivery"), sortable=False),
DataTablesColumn(
_("Place of delivery (home - SBA/Non-SBA) (Hospital - public/private)"), sortable=False),
DataTablesColumn(_("Nature of delivery"), sortable=False),
DataTablesColumn(_("Complications"), sortable=False),
DataTablesColumn(_("Discharge date"), sortable=False),
DataTablesColumn(_("Received date of JSY benefits"), sortable=False),
DataTablesColumn(_("Abortion type"), sortable=False)),
DataTablesColumnGroup(
_("Post Delivery Details"),
DataTablesColumn(
_("First PNC visit (within 48 hours / within 7 days/ after 7 days)"), sortable=False),
DataTablesColumn(_("Complications after delivery"), sortable=False),
DataTablesColumn(_("Type of family planning adopted after delivery"), sortable=False),
DataTablesColumn(_("Checked mother and infant immediate after delivery?"), sortable=False),
DataTablesColumn(_("Infant outcome number code"), sortable=False)),
DataTablesColumnGroup(
_("Child 1 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 2 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 3 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 4 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False),
DataTablesColumn(_("Migrate status "), sortable=False))
)
return headers
@classmethod
def get_cases(self, case_displays):
for disp in case_displays:
yield [
disp.chw_name,
disp.mother_name,
disp.husband_name,
disp.ward_number,
disp.village,
disp.mcts_id,
disp.mobile_number,
disp.mobile_number_whose,
disp.dob_age,
disp.jsy_beneficiary,
disp.caste,
disp.asha_name,
disp.asha_number,
disp.awc_code_name,
disp.aww_name,
disp.aww_number,
disp.lmp,
disp.edd,
disp.anc_date_1,
disp.blood_pressure_1,
disp.weight_1,
disp.hemoglobin,
disp.anc_completed,
disp.anc_date_2,
disp.blood_pressure_2,
disp.weight_2,
disp.anc_date_3,
disp.blood_pressure_3,
disp.weight_3,
disp.anc_date_4,
disp.blood_pressure_4,
disp.weight_4,
disp.tt1_date,
disp.tt2_date,
disp.tt_booster,
disp.ifa_tablets,
disp.anemia,
disp.complications,
disp.rti_sti,
disp.add,
disp.home_sba_assist,
disp.delivery_nature,
disp.complications,
disp.discharge_date,
disp.jsy_money_date,
disp.abortion_type,
disp.first_pnc_time,
disp.delivery_complications,
disp.family_planning_type,
disp.all_pnc_on_time,
disp.num_children,
disp.case_name_1,
disp.gender_1,
disp.first_weight_1,
disp.breastfed_hour_1,
disp.case_name_2,
disp.gender_2,
disp.first_weight_2,
disp.breastfed_hour_2,
disp.case_name_3,
disp.gender_3,
disp.first_weight_3,
disp.breastfed_hour_3,
disp.case_name_4,
disp.gender_4,
disp.first_weight_4,
disp.breastfed_hour_4,
disp.status
]
@property
def fixed_cols_spec(self):
return dict(num=2, width=350)
class ChildMCHRegister(MCHBaseReport):
name = "Child MCH register"
slug = "child_mch_register"
default_case_type = "cc_bihar_newborn"
model = MCHChildDisplay
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("CHW Name")),
DataTablesColumn(_("Child Name"), sortable=False),
DataTablesColumn(_("Father and Mother Name"), sortable=False),
DataTablesColumnGroup(
_("Beneficiary Information"),
DataTablesColumn(_("Mother's MCTS ID"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("City/ward/village"), sortable=False),
DataTablesColumn(_("Address"), sortable=False),
DataTablesColumn(_("Mobile number"), sortable=False),
DataTablesColumn(_("Whose Mobile Number"), sortable=False),
DataTablesColumn(_("DOB / AGE"), sortable=False),
DataTablesColumn(_("Place of delivery (home - SBA/Non-SBA) (Hospital - public/private)"), sortable=False),
DataTablesColumn(_("Caste"), sortable=False)),
DataTablesColumnGroup(
_("Provider Information"),
DataTablesColumn(_("ASHA Name"), sortable=False),
DataTablesColumn(_("Asha phone"), sortable=False),
DataTablesColumn(_("AWC Code , AWC name"), sortable=False),
DataTablesColumn(_("AWW name"), sortable=False),
DataTablesColumn(_("AWW phone number"), sortable=False)),
DataTablesColumnGroup(
_("At Birth"),
DataTablesColumn(_("BCG"), sortable=False),
DataTablesColumn(_("OPV0"), sortable=False),
DataTablesColumn(_("Hepatitis-Birth dose "), sortable=False)),
DataTablesColumnGroup(
_("At 6 Weeks"),
DataTablesColumn(_("DPT1"), sortable=False),
DataTablesColumn(_("OPV1"), sortable=False),
DataTablesColumn(_("Hepatitis-B1"), sortable=False)),
DataTablesColumnGroup(
_("At 10 Weeks"),
DataTablesColumn(_("DPT2"), sortable=False),
DataTablesColumn(_("OPV2"), sortable=False),
DataTablesColumn(_("Hepatitis-B2"), sortable=False)),
DataTablesColumnGroup(
_("At 14 Weeks"),
DataTablesColumn(_("DPT3"), sortable=False),
DataTablesColumn(_("OPV3"), sortable=False),
DataTablesColumn(_("Hepatitis-B3"), sortable=False)),
DataTablesColumnGroup(
_("Between 9-12 Months"),
DataTablesColumn(_("Measles (1st dose)"), sortable=False)),
DataTablesColumnGroup(
_("Between 16-24 Months"),
DataTablesColumn(
_("Vitamin A dose-1 "), sortable=False),
DataTablesColumn(_("Measles (2nd dose)/ MR Vaccine"))),
DataTablesColumnGroup(
_("After 2 Years"),
DataTablesColumn(_("DPT Booster"), sortable=False),
DataTablesColumn(_("OPV Booster"), sortable=False),
DataTablesColumn(_("Vitamin A dose-2"), sortable=False),
DataTablesColumn(_("Vitamin A dose-3"), sortable=False),
DataTablesColumn(_("JE Vaccine"), sortable=False))
)
return headers
@classmethod
def get_cases(self, case_displays):
for disp in case_displays:
yield [
disp.chw_name,
disp.child_name,
disp.father_mother_name,
disp.mcts_id,
disp.gender,
disp.ward_number,
disp.village,
disp.mobile_number,
disp.mobile_number_whose,
disp.dob_age,
disp.home_sba_assist,
disp.caste,
disp.asha_name,
disp.asha_number,
disp.awc_code_name,
disp.aww_name,
disp.aww_number,
disp.bcg_date,
disp.opv_0_date,
disp.hep_b_0_date,
disp.dpt_1_date,
disp.opv_1_date,
disp.hep_b_1_date,
disp.dpt_2_date,
disp.opv_2_date,
disp.hep_b_2_date,
disp.dpt_3_date,
disp.opv_3_date,
disp.hep_b_3_date,
disp.measles_date,
disp.vit_a_1_date,
disp.date_measles_booster,
disp.dpt_booster_date,
disp.opv_booster_date,
disp.vit_a_2_date,
disp.vit_a_3_date,
disp.date_je
]
@property
def fixed_cols_spec(self):
return dict(num=3, width=450)
|
|
"""Provides models for processes and process patches.
"""
# System imports
import warnings
from inspect import getsource
from copy import copy
from os.path import isfile
from os import stat
# Six imports
from six import string_types
# ROOT imports
from ROOT import TChain, TColor, SetOwnership
# owls-hep imports
from owls_hep.expression import multiplied
from owls_hep.output import print_warning
# Set up default exports
__all__ = [
'Patch',
'Process',
]
class Patch(object):
"""A reusable process patch weighs/filters events according to an
expression.
"""
def __init__(self, selection):
"""Initializes a new instance of the Patch class.
Args:
selection: The selection expression to apply to the process data
"""
self._selection = selection
def state(self):
"""Returns a representation of the patch's internal state, if any.
"""
return (self._selection,)
def selection(self):
"""Returns the selection string for the patch.
Returns:
The selection string.
"""
return self._selection
def __repr__(self):
"""Returns the representation of the patch.
Returns:
The representation
"""
return 'Patch({})'.format(self._selection)
class Process(object):
"""Represents a physical process whose events may be encoded in one or more
data files and which should be rendered according to a certain style.
"""
def __init__(self,
files,
tree,
label,
sample_type = 'undef',
friends = (),
line_color = 1,
fill_color = 0,
marker_style = None,
metadata = {}):
"""Initializes a new instance of the Process class.
Args:
files: An iterable of ROOT file paths for files representing the
process
tree: The ROOT TTree path within the files to use
label: The ROOT TLatex label string to use when rendering the
process
line_color: The ROOT TColor number or hex string (#rrggbb) to use
as the line color when rendering the process
fill_color: The ROOT TColor number or hex string (#rrggbb) to use
as the fill color when rendering the process
marker_style: The ROOT TMarker number to use as the marker style
when rendering the process
metadata: A (pickleable) object containing optional metadata
"""
# Store parameters
self._files = tuple(files)
self._files_size_time = None
self._tree = tree
self._label = label
self._sample_type = sample_type
self._friends = friends
self._line_color = line_color
self._fill_color = fill_color
self._marker_style = marker_style
self._metadata = metadata
# Translate hex colors if necessary
if isinstance(self._line_color, string_types):
self._line_color = TColor.GetColor(self._line_color)
if isinstance(self._fill_color, string_types):
self._fill_color = TColor.GetColor(self._fill_color)
# Create initial patches container
self._patches = ()
def __hash__(self):
"""Returns a hash for the process.
"""
# Hash the state
return hash(self.state())
def __repr__(self):
return '{} ({})'.format(self._label,
', '.join((repr(p) for p in self._patches)))
def _get_files_size_time(self):
"""Populate the file sizes and modification times tuple.
"""
if self._files_size_time is None:
self._files_size_time = tuple([(stat(f).st_size, stat(f).st_mtime)
for f in self._files])
def state(self):
"""Returns a the state for the process.
"""
# Use only files, tree, patches, friends, and sample_type in the
# state since those are all that really matter for data processing
self._get_files_size_time()
return (self._files,
self._files_size_time,
self._tree,
self._sample_type,
self._friends,
self.patches())
def label(self):
"""Returns the label of the process.
"""
return self._label
def files(self):
"""Returns the files for the process.
"""
return self._files
def sample_type(self):
"""Returns the sample type for the process.
"""
return self._sample_type
def metadata(self):
"""Returns the metadata for the process, if any.
"""
return self._metadata
def patches(self):
"""Returns an expression of patches for the process, if any.
"""
return multiplied(*[p.selection() for p in self._patches])
# NOTE: We could instead return a list of TTrees/TFiles, because using
# individual TFile/TTree objects might be slightly faster than creating
# one huge TChain.
def load(self):
"""Loads the process data.
Returns:
A TChain for the process.
"""
chain = TChain(self._tree)
for f in self._files:
if not isfile(f):
raise RuntimeError('file does not exist {0}'.format(f))
chain.Add(f)
for friend in self._friends:
chain.AddFriend(self._load_friend(*friend))
return chain
def _load_friend(self, file, tree, index):
if not isfile(file):
raise RuntimeError('file does not exist {0}'.format(file))
chain = TChain(tree)
chain.Add(file)
if index is not None:
chain.BuildIndex(index)
return chain
def retreed(self, tree):
"""Creates a new copy of the process with a different tree.
Args:
tree: The tree to set for the new process
Returns:
A copy of the process with the tree modified.
"""
# Create the copy
result = copy(self)
# Retree
result._tree = tree
# All done
return result
def patched(self,
patch,
label = None,
line_color = None,
fill_color = None,
marker_style = None,
metadata = None):
"""Creates a new copy of the process with a patch applied.
Args:
patch: The patch to apply in the new process
Returns:
A copy of the process with the additional patch applied.
"""
# Create the copy
result = copy(self)
if label is not None: result._label = label
if line_color is not None: result._line_color = line_color
if fill_color is not None: result._fill_color = fill_color
if marker_style is not None: result._marker_style = marker_style
if metadata is not None: result._metadata = metadata
# Add the patch
result._patches += (patch,)
# All done
return result
def style(self, histogram):
"""Applies the process' style to a histogram.
Args:
histogram: The histogram to style
"""
# Set title
histogram.SetTitle(self._label)
# Set line color
histogram.SetLineColor(self._line_color)
# Set fill style and color
histogram.SetFillStyle(1001)
histogram.SetFillColor(self._fill_color)
# Set marker style
if self._marker_style is not None:
histogram.SetMarkerStyle(self._marker_style)
# TODO: This should be configurable
histogram.SetMarkerSize(2)
histogram.SetMarkerColor(histogram.GetLineColor())
else:
# HACK: Set marker style to an invalid value if not specified,
# because we need some way to differentiate rendering in the legend
histogram.SetMarkerStyle(0)
# Make lines visible
histogram.SetLineWidth(2)
class MultiProcess(object):
"""Represents a combined process whose events may be encoded in one or
more data processes and which should be rendered according to a certain
style.
"""
def __init__(self,
subprocesses,
tree,
label,
line_color = 1,
fill_color = 0,
marker_style = None,
metadata = None):
"""Initializes a new instance of the Process class.
Args:
subporocesses: An iterable of processes
label: The ROOT TLatex label string to use when rendering the
process
line_color: The ROOT TColor number or hex string (#rrggbb) to use
as the line color when rendering the process
fill_color: The ROOT TColor number or hex string (#rrggbb) to use
as the fill color when rendering the process
marker_style: The ROOT TMarker number to use as the marker style
when rendering the process
metadata: A (pickleable) object containing optional metadata
"""
# Store parameters
self._subprocesses = subprocesses
self._label = label
self._line_color = line_color
self._fill_color = fill_color
self._marker_style = marker_style
self._metadata = metadata
# Translate hex colors if necessary
if isinstance(self._line_color, string_types):
self._line_color = TColor.GetColor(self._line_color)
if isinstance(self._fill_color, string_types):
self._fill_color = TColor.GetColor(self._fill_color)
def __hash__(self):
raise NotImplementedError('method not supported')
def load(self, properties):
raise NotImplementedError('method not supported')
def retreed(self, tree):
raise NotImplementedError('method not supported')
def patched(self, patch):
raise NotImplementedError('method not supported')
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Thread-local resource stack.
This module is not part of the public API surface of `gcloud`.
"""
import calendar
import datetime
import json
import os
import re
import socket
import sys
from threading import local as Local
from google.protobuf import timestamp_pb2
try:
from google.appengine.api import app_identity
except ImportError:
app_identity = None
import six
from six.moves.http_client import HTTPConnection
from six.moves import configparser
from gcloud.environment_vars import PROJECT
from gcloud.environment_vars import CREDENTIALS
_NOW = datetime.datetime.utcnow # To be replaced by tests.
_RFC3339_MICROS = '%Y-%m-%dT%H:%M:%S.%fZ'
_RFC3339_NO_FRACTION = '%Y-%m-%dT%H:%M:%S'
# datetime.strptime cannot handle nanosecond precision: parse w/ regex
_RFC3339_NANOS = re.compile(r"""
(?P<no_fraction>
\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2} # YYYY-MM-DDTHH:MM:SS
)
\. # decimal point
(?P<nanos>\d{1,9}) # nanoseconds, maybe truncated
Z # Zulu
""", re.VERBOSE)
DEFAULT_CONFIGURATION_PATH = '~/.config/gcloud/configurations/config_default'
class _LocalStack(Local):
"""Manage a thread-local LIFO stack of resources.
Intended for use in :class:`gcloud.datastore.batch.Batch.__enter__`,
:class:`gcloud.storage.batch.Batch.__enter__`, etc.
"""
def __init__(self):
super(_LocalStack, self).__init__()
self._stack = []
def __iter__(self):
"""Iterate the stack in LIFO order.
"""
return iter(reversed(self._stack))
def push(self, resource):
"""Push a resource onto our stack.
"""
self._stack.append(resource)
def pop(self):
"""Pop a resource from our stack.
:rtype: object
:returns: the top-most resource, after removing it.
:raises IndexError: if the stack is empty.
"""
return self._stack.pop()
@property
def top(self):
"""Get the top-most resource
:rtype: object
:returns: the top-most item, or None if the stack is empty.
"""
if len(self._stack) > 0:
return self._stack[-1]
class _UTC(datetime.tzinfo):
"""Basic UTC implementation.
Implementing a small surface area to avoid depending on ``pytz``.
"""
_dst = datetime.timedelta(0)
_tzname = 'UTC'
_utcoffset = _dst
def dst(self, dt): # pylint: disable=unused-argument
"""Daylight savings time offset."""
return self._dst
def fromutc(self, dt):
"""Convert a timestamp from (naive) UTC to this timezone."""
if dt.tzinfo is None:
return dt.replace(tzinfo=self)
return super(_UTC, self).fromutc(dt)
def tzname(self, dt): # pylint: disable=unused-argument
"""Get the name of this timezone."""
return self._tzname
def utcoffset(self, dt): # pylint: disable=unused-argument
"""UTC offset of this timezone."""
return self._utcoffset
def __repr__(self):
return '<%s>' % (self._tzname,)
def __str__(self):
return self._tzname
def _ensure_tuple_or_list(arg_name, tuple_or_list):
"""Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
"""
if not isinstance(tuple_or_list, (tuple, list)):
raise TypeError('Expected %s to be a tuple or list. '
'Received %r' % (arg_name, tuple_or_list))
return list(tuple_or_list)
def _app_engine_id():
"""Gets the App Engine application ID if it can be inferred.
:rtype: str or ``NoneType``
:returns: App Engine application ID if running in App Engine,
else ``None``.
"""
if app_identity is None:
return None
return app_identity.get_application_id()
def _file_project_id():
"""Gets the project id from the credentials file if one is available.
:rtype: str or ``NoneType``
:returns: Project-ID from JSON credentials file if value exists,
else ``None``.
"""
credentials_file_path = os.getenv(CREDENTIALS)
if credentials_file_path:
with open(credentials_file_path, 'rb') as credentials_file:
credentials_json = credentials_file.read()
credentials = json.loads(credentials_json.decode('utf-8'))
return credentials.get('project_id')
def _default_service_project_id():
"""Retrieves the project ID from the gcloud command line tool.
Files that cannot be opened with configparser are silently ignored; this is
designed so that you can specify a list of potential configuration file
locations.
:rtype: str or ``NoneType``
:returns: Project-ID from default configuration file else ``None``
"""
search_paths = []
# Workaround for GAE not supporting pwd which is used by expanduser.
try:
search_paths.append(os.path.expanduser(DEFAULT_CONFIGURATION_PATH))
except ImportError:
pass
windows_config_path = os.path.join(os.getenv('APPDATA', ''),
'gcloud', 'configurations',
'config_default')
search_paths.append(windows_config_path)
config = configparser.RawConfigParser()
config.read(search_paths)
if config.has_section('core'):
return config.get('core', 'project')
def _compute_engine_id():
"""Gets the Compute Engine project ID if it can be inferred.
Uses 169.254.169.254 for the metadata server to avoid request
latency from DNS lookup.
See https://cloud.google.com/compute/docs/metadata#metadataserver
for information about this IP address. (This IP is also used for
Amazon EC2 instances, so the metadata flavor is crucial.)
See https://github.com/google/oauth2client/issues/93 for context about
DNS latency.
:rtype: str or ``NoneType``
:returns: Compute Engine project ID if the metadata service is available,
else ``None``.
"""
host = '169.254.169.254'
uri_path = '/computeMetadata/v1/project/project-id'
headers = {'Metadata-Flavor': 'Google'}
connection = HTTPConnection(host, timeout=0.1)
try:
connection.request('GET', uri_path, headers=headers)
response = connection.getresponse()
if response.status == 200:
return response.read()
except socket.error: # socket.timeout or socket.error(64, 'Host is down')
pass
finally:
connection.close()
def _get_production_project():
"""Gets the production project if it can be inferred."""
return os.getenv(PROJECT)
def _determine_default_project(project=None):
"""Determine default project ID explicitly or implicitly as fall-back.
In implicit case, supports three environments. In order of precedence, the
implicit environments are:
* GCLOUD_PROJECT environment variable
* GOOGLE_APPLICATION_CREDENTIALS JSON file
* Get default service project from
``$ gcloud beta auth application-default login``
* Google App Engine application ID
* Google Compute Engine project ID (from metadata server)
:type project: str
:param project: Optional. The project name to use as default.
:rtype: str or ``NoneType``
:returns: Default project if it can be determined.
"""
if project is None:
project = _get_production_project()
if project is None:
project = _file_project_id()
if project is None:
project = _default_service_project_id()
if project is None:
project = _app_engine_id()
if project is None:
project = _compute_engine_id()
return project
def _millis(when):
"""Convert a zone-aware datetime to integer milliseconds.
:type when: :class:`datetime.datetime`
:param when: the datetime to convert
:rtype: int
:returns: milliseconds since epoch for ``when``
"""
micros = _microseconds_from_datetime(when)
return micros // 1000
def _datetime_from_microseconds(value):
"""Convert timestamp to datetime, assuming UTC.
:type value: float
:param value: The timestamp to convert
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the value.
"""
return _EPOCH + datetime.timedelta(microseconds=value)
def _microseconds_from_datetime(value):
"""Convert non-none datetime to microseconds.
:type value: :class:`datetime.datetime`
:param value: The timestamp to convert.
:rtype: int
:returns: The timestamp, in microseconds.
"""
if not value.tzinfo:
value = value.replace(tzinfo=UTC)
# Regardless of what timezone is on the value, convert it to UTC.
value = value.astimezone(UTC)
# Convert the datetime to a microsecond timestamp.
return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond
def _millis_from_datetime(value):
"""Convert non-none datetime to timestamp, assuming UTC.
:type value: :class:`datetime.datetime`, or None
:param value: the timestamp
:rtype: int, or ``NoneType``
:returns: the timestamp, in milliseconds, or None
"""
if value is not None:
return _millis(value)
def _total_seconds_backport(offset):
"""Backport of timedelta.total_seconds() from python 2.7+.
:type offset: :class:`datetime.timedelta`
:param offset: A timedelta object.
:rtype: int
:returns: The total seconds (including microseconds) in the
duration.
"""
seconds = offset.days * 24 * 60 * 60 + offset.seconds
return seconds + offset.microseconds * 1e-6
def _total_seconds(offset):
"""Version independent total seconds for a time delta.
:type offset: :class:`datetime.timedelta`
:param offset: A timedelta object.
:rtype: int
:returns: The total seconds (including microseconds) in the
duration.
"""
if sys.version_info[:2] < (2, 7): # pragma: NO COVER Python 2.6
return _total_seconds_backport(offset)
else:
return offset.total_seconds()
def _rfc3339_to_datetime(dt_str):
"""Convert a microsecond-precision timetamp to a native datetime.
:type dt_str: str
:param dt_str: The string to convert.
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the string.
"""
return datetime.datetime.strptime(
dt_str, _RFC3339_MICROS).replace(tzinfo=UTC)
def _rfc3339_nanos_to_datetime(dt_str):
"""Convert a nanosecond-precision timestamp to a native datetime.
.. note::
Python datetimes do not support nanosecond precision; this function
therefore truncates such values to microseconds.
:type dt_str: str
:param dt_str: The string to convert.
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the string.
:raises ValueError: If the timestamp does not match the RFC 3339
regular expression.
"""
with_nanos = _RFC3339_NANOS.match(dt_str)
if with_nanos is None:
raise ValueError(
'Timestamp: %r, does not match pattern: %r' % (
dt_str, _RFC3339_NANOS.pattern))
bare_seconds = datetime.datetime.strptime(
with_nanos.group('no_fraction'), _RFC3339_NO_FRACTION)
fraction = with_nanos.group('nanos')
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
micros = nanos // 1000
return bare_seconds.replace(microsecond=micros, tzinfo=UTC)
def _datetime_to_rfc3339(value):
"""Convert a native timestamp to a string.
:type value: :class:`datetime.datetime`
:param value: The datetime object to be converted to a string.
:rtype: str
:returns: The string representing the datetime stamp.
"""
return value.strftime(_RFC3339_MICROS)
def _to_bytes(value, encoding='ascii'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python2 it does not modify ``unicode`` objects.
:type value: str / bytes or unicode
:param value: The string/bytes value to be converted.
:type encoding: str
:param encoding: The encoding to use to convert unicode to bytes. Defaults
to "ascii", which will not allow any characters from
ordinals larger than 127. Other useful values are
"latin-1", which which will only allows byte ordinals
(up to 255) and "utf-8", which will encode any unicode
that needs to be.
:rtype: str / bytes
:returns: The original value converted to bytes (if unicode) or as passed
in if it started out as bytes.
:raises TypeError: if the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise TypeError('%r could not be converted to bytes' % (value,))
def _bytes_to_unicode(value):
"""Converts bytes to a unicode value, if necessary.
:type value: bytes
:param value: bytes value to attempt string conversion on.
:rtype: str
:returns: The original value converted to unicode (if bytes) or as passed
in if it started out as unicode.
:raises ValueError: if the value could not be converted to unicode.
"""
result = (value.decode('utf-8')
if isinstance(value, six.binary_type) else value)
if isinstance(result, six.text_type):
return result
else:
raise ValueError('%r could not be converted to unicode' % (value,))
def _pb_timestamp_to_datetime(timestamp):
"""Convert a Timestamp protobuf to a datetime object.
:type timestamp: :class:`google.protobuf.timestamp_pb2.Timestamp`
:param timestamp: A Google returned timestamp protobuf.
:rtype: :class:`datetime.datetime`
:returns: A UTC datetime object converted from a protobuf timestamp.
"""
return (
_EPOCH +
datetime.timedelta(
seconds=timestamp.seconds,
microseconds=(timestamp.nanos / 1000.0),
)
)
def _datetime_to_pb_timestamp(when):
"""Convert a datetime object to a Timestamp protobuf.
:type when: :class:`datetime.datetime`
:param when: the datetime to convert
:rtype: :class:`google.protobuf.timestamp_pb2.Timestamp`
:returns: A timestamp protobuf corresponding to the object.
"""
ms_value = _microseconds_from_datetime(when)
seconds, micros = divmod(ms_value, 10**6)
nanos = micros * 10**3
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)
def _name_from_project_path(path, project, template):
"""Validate a URI path and get the leaf object's name.
:type path: str
:param path: URI path containing the name.
:type project: str or NoneType
:param project: The project associated with the request. It is
included for validation purposes. If passed as None,
disables validation.
:type template: str
:param template: Template regex describing the expected form of the path.
The regex must have two named groups, 'project' and
'name'.
:rtype: str
:returns: Name parsed from ``path``.
:raises ValueError: if the ``path`` is ill-formed or if the project from
the ``path`` does not agree with the ``project``
passed in.
"""
if isinstance(template, str):
template = re.compile(template)
match = template.match(path)
if not match:
raise ValueError('path "%s" did not match expected pattern "%s"' % (
path, template.pattern,))
if project is not None:
found_project = match.group('project')
if found_project != project:
raise ValueError(
'Project from client (%s) should agree with '
'project from resource(%s).' % (project, found_project))
return match.group('name')
try:
from pytz import UTC # pylint: disable=unused-import,wrong-import-order
except ImportError:
UTC = _UTC() # Singleton instance to be used throughout.
# Need to define _EPOCH at the end of module since it relies on UTC.
_EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=UTC)
|
|
from __future__ import print_function, division
from sympy import Basic, Symbol, symbols, lambdify
from util import interpolate, rinterpolate, create_bounds, update_bounds
from sympy.core.compatibility import xrange
class ColorGradient(object):
colors = [0.4, 0.4, 0.4], [0.9, 0.9, 0.9]
intervals = 0.0, 1.0
def __init__(self, *args):
if len(args) == 2:
self.colors = list(args)
self.intervals = [0.0, 1.0]
elif len(args) > 0:
if len(args) % 2 != 0:
raise ValueError("len(args) should be even")
self.colors = [args[i] for i in xrange(1, len(args), 2)]
self.intervals = [args[i] for i in xrange(0, len(args), 2)]
assert len(self.colors) == len(self.intervals)
def copy(self):
c = ColorGradient()
c.colors = [e[::] for e in self.colors]
c.intervals = self.intervals[::]
return c
def _find_interval(self, v):
m = len(self.intervals)
i = 0
while i < m - 1 and self.intervals[i] <= v:
i += 1
return i
def _interpolate_axis(self, axis, v):
i = self._find_interval(v)
v = rinterpolate(self.intervals[i - 1], self.intervals[i], v)
return interpolate(self.colors[i - 1][axis], self.colors[i][axis], v)
def __call__(self, r, g, b):
c = self._interpolate_axis
return c(0, r), c(1, g), c(2, b)
default_color_schemes = {} # defined at the bottom of this file
class ColorScheme(object):
def __init__(self, *args, **kwargs):
self.args = args
self.f, self.gradient = None, ColorGradient()
if len(args) == 1 and not isinstance(args[0], Basic) and callable(args[0]):
self.f = args[0]
elif len(args) == 1 and isinstance(args[0], str):
if args[0] in default_color_schemes:
cs = default_color_schemes[args[0]]
self.f, self.gradient = cs.f, cs.gradient.copy()
else:
self.f = lambdify('x,y,z,u,v', args[0])
else:
self.f, self.gradient = self._interpret_args(args, kwargs)
self._test_color_function()
if not isinstance(self.gradient, ColorGradient):
raise ValueError("Color gradient not properly initialized. "
"(Not a ColorGradient instance.)")
def _interpret_args(self, args, kwargs):
f, gradient = None, self.gradient
atoms, lists = self._sort_args(args)
s = self._pop_symbol_list(lists)
s = self._fill_in_vars(s)
# prepare the error message for lambdification failure
f_str = ', '.join(str(fa) for fa in atoms)
s_str = (str(sa) for sa in s)
s_str = ', '.join(sa for sa in s_str if sa.find('unbound') < 0)
f_error = ValueError("Could not interpret arguments "
"%s as functions of %s." % (f_str, s_str))
# try to lambdify args
if len(atoms) == 1:
fv = atoms[0]
try:
f = lambdify(s, [fv, fv, fv])
except TypeError:
raise f_error
elif len(atoms) == 3:
fr, fg, fb = atoms
try:
f = lambdify(s, [fr, fg, fb])
except TypeError:
raise f_error
else:
raise ValueError("A ColorScheme must provide 1 or 3 "
"functions in x, y, z, u, and/or v.")
# try to intrepret any given color information
if len(lists) == 0:
gargs = []
elif len(lists) == 1:
gargs = lists[0]
elif len(lists) == 2:
try:
(r1, g1, b1), (r2, g2, b2) = lists
except TypeError:
raise ValueError("If two color arguments are given, "
"they must be given in the format "
"(r1, g1, b1), (r2, g2, b2).")
gargs = lists
elif len(lists) == 3:
try:
(r1, r2), (g1, g2), (b1, b2) = lists
except Exception:
raise ValueError("If three color arguments are given, "
"they must be given in the format "
"(r1, r2), (g1, g2), (b1, b2). To create "
"a multi-step gradient, use the syntax "
"[0, colorStart, step1, color1, ..., 1, "
"colorEnd].")
gargs = [[r1, g1, b1], [r2, g2, b2]]
else:
raise ValueError("Don't know what to do with collection "
"arguments %s." % (', '.join(str(l) for l in lists)))
if gargs:
try:
gradient = ColorGradient(*gargs)
except Exception as ex:
raise ValueError(("Could not initialize a gradient "
"with arguments %s. Inner "
"exception: %s") % (gargs, str(ex)))
return f, gradient
def _pop_symbol_list(self, lists):
symbol_lists = []
for l in lists:
mark = True
for s in l:
if s is not None and not isinstance(s, Symbol):
mark = False
break
if mark:
lists.remove(l)
symbol_lists.append(l)
if len(symbol_lists) == 1:
return symbol_lists[0]
elif len(symbol_lists) == 0:
return []
else:
raise ValueError("Only one list of Symbols "
"can be given for a color scheme.")
def _fill_in_vars(self, args):
defaults = symbols('x,y,z,u,v')
if len(args) == 0:
return defaults
if not isinstance(args, (tuple, list)):
raise v_error
if len(args) == 0:
return defaults
for s in args:
if s is not None and not isinstance(s, Symbol):
raise v_error
# when vars are given explicitly, any vars
# not given are marked 'unbound' as to not
# be accidentally used in an expression
vars = [Symbol('unbound%i' % (i)) for i in range(1, 6)]
# interpret as t
if len(args) == 1:
vars[3] = args[0]
# interpret as u,v
elif len(args) == 2:
if args[0] is not None:
vars[3] = args[0]
if args[1] is not None:
vars[4] = args[1]
# interpret as x,y,z
elif len(args) >= 3:
# allow some of x,y,z to be
# left unbound if not given
if args[0] is not None:
vars[0] = args[0]
if args[1] is not None:
vars[1] = args[1]
if args[2] is not None:
vars[2] = args[2]
# interpret the rest as t
if len(args) >= 4:
vars[3] = args[3]
# ...or u,v
if len(args) >= 5:
vars[4] = args[4]
return vars
def _sort_args(self, args):
atoms, lists = [], []
for a in args:
if isinstance(a, (tuple, list)):
lists.append(a)
else:
atoms.append(a)
return atoms, lists
def _test_color_function(self):
if not callable(self.f):
raise ValueError("Color function is not callable.")
try:
result = self.f(0, 0, 0, 0, 0)
if len(result) != 3:
raise ValueError("length should be equal to 3")
except TypeError as te:
raise ValueError("Color function needs to accept x,y,z,u,v, "
"as arguments even if it doesn't use all of them.")
except AssertionError as ae:
raise ValueError("Color function needs to return 3-tuple r,g,b.")
except Exception as ie:
pass # color function probably not valid at 0,0,0,0,0
def __call__(self, x, y, z, u, v):
try:
return self.f(x, y, z, u, v)
except Exception as e:
return None
def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over a single
independent variable u.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in xrange(len(u_set)):
if verts[_u] is None:
cverts.append(None)
else:
x, y, z = verts[_u]
u, v = u_set[_u], None
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
cverts.append(c)
if callable(inc_pos):
inc_pos()
# scale and apply gradient
for _u in xrange(len(u_set)):
if cverts[_u] is not None:
for _c in range(3):
# scale from [f_min, f_max] to [0,1]
cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1],
cverts[_u][_c])
# apply gradient
cverts[_u] = self.gradient(*cverts[_u])
if callable(inc_pos):
inc_pos()
return cverts
def apply_to_surface(self, verts, u_set, v_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over two
independent variables u and v.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*len(v_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in xrange(len(u_set)):
column = list()
for _v in xrange(len(v_set)):
if verts[_u][_v] is None:
column.append(None)
else:
x, y, z = verts[_u][_v]
u, v = u_set[_u], v_set[_v]
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
column.append(c)
if callable(inc_pos):
inc_pos()
cverts.append(column)
# scale and apply gradient
for _u in xrange(len(u_set)):
for _v in xrange(len(v_set)):
if cverts[_u][_v] is not None:
# scale from [f_min, f_max] to [0,1]
for _c in range(3):
cverts[_u][_v][_c] = rinterpolate(bounds[_c][0],
bounds[_c][1], cverts[_u][_v][_c])
# apply gradient
cverts[_u][_v] = self.gradient(*cverts[_u][_v])
if callable(inc_pos):
inc_pos()
return cverts
def str_base(self):
return ", ".join(str(a) for a in self.args)
def __repr__(self):
return "%s" % (self.str_base())
x, y, z, t, u, v = symbols('x,y,z,t,u,v')
default_color_schemes['rainbow'] = ColorScheme(z, y, x)
default_color_schemes['zfade'] = ColorScheme(z, (0.4, 0.4, 0.97),
(0.97, 0.4, 0.4), (None, None, z))
default_color_schemes['zfade3'] = ColorScheme(z, (None, None, z),
[0.00, (0.2, 0.2, 1.0),
0.35, (0.2, 0.8, 0.4),
0.50, (0.3, 0.9, 0.3),
0.65, (0.4, 0.8, 0.2),
1.00, (1.0, 0.2, 0.2)])
default_color_schemes['zfade4'] = ColorScheme(z, (None, None, z),
[0.0, (0.3, 0.3, 1.0),
0.30, (0.3, 1.0, 0.3),
0.55, (0.95, 1.0, 0.2),
0.65, (1.0, 0.95, 0.2),
0.85, (1.0, 0.7, 0.2),
1.0, (1.0, 0.3, 0.2)])
|
|
#!/usr/bin/python
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dellemc_unity
short_description: Configure and manage Dell EMC Unity Storage System
description:
- This module can be used to configure and manage Dell EMC Unity Storage System.
- The module supports check mode.
version_added: "2.2"
author:
- "Jiale Huo (jiale.huo@emc.com)"
- "Craig Smith (craig.j.smith@emc.com)"
options:
unity_hostname:
description:
- Hostname of the Unity system's management interface.
required: true
type: string
unity_username:
description:
- Username of the Unity system's default administrator user.
required: false
default: "admin"
type: string
unity_password:
description:
- Password of the Unity system's default administrator user.
required: false
default: "Password123#"
type: string
unity_license_path:
description:
- Path to the license file of the Unity system.
required: false
type: string
unity_updates:
description:
- Update resources of the Unity system.
- See "Unisphere Management REST API Programer's Guide" for examples of how to update Unity system resources.
- See "Unisphere Management REST API Reference Guide" for details and arguments of each individual resource's update operations.
required: false
type: list
suboptions:
resource_type:
description:
- Type of the resource to be queried.
required: true
type: string
id:
description:
- ID of an instance of the resouce type to be updated.
- If this option is present, then instance update of the resouce type will be executed.
- Otherwise, if this option is missing, then the update operation either creates a new instance, or executes a class-level action.
- If the "action" option is present, then a class-level action on the resource type is executed.
- Otherwise, if the "action" option is missing, then a new instance of the resource type is created.
required: false
type: string
action:
description:
- Action of the update operation.
- If the "id" option is present, then the action is executed on the instance.
- Otherwise, if the "id" option is missing, then the action is executed at the class-level on the resouce type.
required: false
default: "modify"
type: string
attributes:
description:
- The attributes to compare to decide whether an update should be executed.
- If attributes are missing, then the default, hard-coded attribute will be compared against the existing values.
- If attributes is a list, then queries of attributes of the same names will be compared to the ones in the update.
- Sometimes an attribute in the query field is different from that as an update argument, in this case, a dictionary mapping queried attributes to update arguments can be used.
- If the update is on an instance with ID, then the attributes specifies which one of the current values of the instance should be compared with the values to be updated. If all values are the same, then the update will not be executed, but a warning will be issued.
- If the update is to create a new instance, then the attributes are used to search for instances of the same attribute values. If such duplicates exist, a warning will be issued in check mode.
- Dotted attributes can be used to compare related resources.
required: false
type: list or dictionary
filter:
description:
- A filter for query to find duplicates of an instance creation update.
- See "Unisphere Management REST API Programmer's Guide" for details on how to create a filter for queries.
- It can be a partial filter, complemented by the list of attributes to compare.
- If the filter is missing, then the default, hard-coded filter will be used.
required: false
type: string
language:
description:
- Overrides the value of the Accept-language: header.
- This is useful for testing from a plain browser or from an environment where URL parameters are easier to use than HTTP headers.
- The language parameter specifies the localization language for error messages, events, alerts, and other localizable responses.
required: false
choices:
- de-DE: German
- en-US: English
- es-MX: Latin American Spanish
- fr-FR: French
- ja-JP: Japanese
- ko-KR: Korean
- pt-BR: Brazilian Portuguese
- ru-RU: Russian
- zh-CN: Chinese
default: en-US
type: string
timeout:
description:
- Seconds before timeout.
- Executes the request in the background. Most active management requests (ones that attempt to change the configuration) support this option.
- The documentation for each API method in the Unisphere Management REST API Reference Guide specifies whether the method supports this option.
required: false
type: int
unity_password_updates:
description:
- Update passwords of users of the Unity system. a
required: false
type: list
suboptions:
username:
description:
- Name of the user.
required: true
type: string
password:
description:
- Current password of the user.
required: true
type: string
new_password:
description:
- New passowrd of the user.
required: true
type: string
unity_queries:
description:
- Query the Unity system for resource information.
- See "Unisphere Management REST API Programmer's Guide" for detailed description and examples of the query parameters.
- See "Unisphere Management REST API Reference Guide" for details and attributes (field names) of each individual resource's query operations.
required: false
type: list
suboptions:
resource_type:
description:
- Type of the resource to be queried.
required: true
type: string
id:
description:
- ID of an instance of the resouce type to be queried.
- If this option is missing, then collection query of the resource type will be executed.
- Otherwise, if this option is present, then instance query of the resource type will be executed.
required: false
type: string
compact:
description:
- Omits metadata from each instance in the query response.
required: false
default: true
type: bool
fields:
description:
- Specifies a comma-separated list of attributes to return in a response.
- If you do not use this parameter, a query will return the id attribute only.
- When using fields, you can:
- Use dot notation syntax to return the values of related attributes.
- Optionally, define a new attribute from field expressions associated with one or more existing attributes.
required: false
type: string
filter:
description:
- Filters the response data against a set of criteria. Only matching resource instances are returned. Filtering is case insensitive.
- When using filter, you can use dot notation syntax to filter by the attributes of related resource types.
- Only applies to collection query requests.
required: false
type: string
groupby:
description:
- Groups the specified values and applies the @sum function to each group.
- For example, you could use groupby with @sum to return a summary of disk sizes for each disk type.
- Only applies to collection query requests.
required: false
type: string
language:
description:
- Overrides the value of the Accept-language: header.
- This is useful for testing from a plain browser or from an environment where URL parameters are easier to use than HTTP headers.
- The language parameter specifies the localization language for error messages, events, alerts, and other localizable responses.
required: false
choices:
- de-DE: German
- en-US: English
- es-MX: Latin American Spanish
- fr-FR: French
- ja-JP: Japanese
- ko-KR: Korean
- pt-BR: Brazilian Portuguese
- ru-RU: Russian
- zh-CN: Chinese
default: en-US
type: string
orderby:
description:
- Specifies how to sort response data. You can sort response data in ascending or descending order by the attributes of the queried resource type. And you can use dot notation syntax to sort response data by the attributes of related resource types.
- Only applies to collection query requests.
required: false
type: string
page:
description:
- Identifies the page to return in a response by specifying the page number. If this parameter is not specified, the server returns all resource instances that meet the request criteria in page 1.
- Only applies to collection query requests.
required: false
type: int
per_page:
description:
- Specifies the number of resource type instances that form a page. If this parameter is not specified, the server returns all resource instances that meet the request criteria in the page specified by page (or in page 1, if page is also not specified).
- The server imposes an upper limit of 2000 on the number of resource instances returned in a page.
- Only applies to collection query requests.
required: false
type: int
with_entrycount:
description:
- Indicates whether to return the entryCount response component in the response data. The entryCount response component indicates the number of resource instances in the complete list. You can use it to get the total number of entries when paging returned a partial response.
- By default, the entryCount response component is not returned. Set with_entrycount=true to return the entryCount response component.
- Only applies to collection query requests.
required: false
default: true
type: bool
notes:
- GitHub project: U(https://github.com/jialehuo/ansible-dellemc-unity)
- This module supports check mode.
requirements:
- Python >= 2.7
- requests >= 1.3
- Unity >= 4.0
'''
EXAMPLES = '''
- name: Initial setup
dellemc_unity:
unity_hostname: "192.168.0.100"
unity_username: admin
unity_password: Password123#
unity_updates:
- {resource_type: system, id: '0', attributes: {'isEULAAccepted':'isEulaAccepted'}, isEulaAccepted: 'true'}
unity_password_updates:
- {username: admin, password: Password123#, new_password: Password123!}
unity_license_path: /home/labadmin/unity.lic
- name: Updates and queries
dellemc_unity:
unity_hostname: "192.168.0.202"
unity_username: admin
unity_password: Password123!
unity_updates:
- {resource_type: user, name: test1, password: Welcome1!, role: administrator, attributes: [name]}
- {resource_type: user, id: 'user_test1', attributes: {'role.id':'role'}, role: 'operator'}
- {resource_type: remoteSyslog, id: '0', enabled: True, address: '192.168.0.11:515', protocol: 1, facility: 0}
- {resource_type: dnsServer, id: '0', addresses: [10.254.66.23, 10.254.66.24]}
- {resource_type: ntpServer, id: '0', attributes: [addresses], addresses: [10.254.140.21, 10.254.140.22], rebootPrivilege: 2}
unity_password_updates:
- {username: test1, password: Welcome1!, new_password: Welcome2!}
unity_queries:
- {resource_type: user, id: 'user_test1', fields: 'role.id'}
- {resource_type: remoteSyslog, id: "0", fields: 'address,protocol,facility,enabled'} # id parameter has to be of the string type
- {resource_type: dnsServer, fields: "domain, addresses, origin", page: 1, per_page: 100}
- {resource_type: ntpServer, id: "0", fields: addresses} # id parameter has to be of the string type
- name: Deletes
dellemc_unity:
unity_hostname: "192.168.0.202"
unity_username: admin
unity_password: Password123!
unity_updates:
- {resource_type: user, id: 'user_test1', action: 'delete'}
'''
RETURN = '''
unity_query_results:
description:
- A list of JSON objects detailing the results of each successful query operation.
returned: always
type: list
sample: >
"unity_query_results": [
{
"entries": [
{
"content": {
"id": "user_test1",
"role": {
"id": "operator"
}
}
}
],
"entryCount": 1,
"query": {
"fields": "role.id",
"id": "user_test1",
"resource_type": "user"
},
"url": "https://192.168.0.202/api/instances/user/user_test1?compact=true&fields=role.id"
},
{
"entries": [
{
"content": {
"address": "192.168.0.11:515",
"enabled": true,
"facility": 0,
"id": "0",
"protocol": 1
}
}
],
"entryCount": 1,
"query": {
"fields": "address,protocol,facility,enabled",
"id": "0",
"resource_type": "remoteSyslog"
},
"url": "https://192.168.0.202/api/instances/remoteSyslog/0?compact=true&fields=address%2Cprotocol%2Cfacility%2Cenabled"
},
{
"entries": [
{
"content": {
"addresses": [
"10.254.66.23",
"10.254.66.24"
],
"id": "0",
"origin": 2
}
}
],
"entryCount": 1,
"query": {
"fields": "domain, addresses, origin",
"page": 1,
"per_page": 100,
"resource_type": "dnsServer"
},
"url": "https://192.168.0.202/api/types/dnsServer/instances?compact=true&fields=domain%2C+addresses%2C+origin&with_entrycount=true&page=1&per_page=100"
},
{
"entries": [
{
"content": {
"addresses": [
"10.254.140.21",
"10.254.140.22"
],
"id": "0"
}
}
],
"entryCount": 1,
"query": {
"fields": "addresses",
"id": "0",
"resource_type": "ntpServer"
},
"url": "https://192.168.0.202/api/instances/ntpServer/0?compact=true&fields=addresses"
}
]
contains:
entries:
description:
- A list of JSON objects for each instance of the resource type returned by the query.
returned: always
type: complex
contains:
content:
description:
- Content of the instance.
- Contains at least the ID of the instance, and possibly other fields specified by the 'fields' parameter in the 'unity_queries' option.
returned: always
type: complex
entryCount:
description:
- Count of entries returned.
type: int
query:
description:
- The original query.
returned: always
type: complex
url:
description:
- URL of the query.
returned: always
type: string
unity_update_results:
description:
- A list of JSON objects detailing the results of each operation.
returned: always
type: list
sample: >
"unity_update_results": [
{
"args": {
"name": "test1",
"password": "Welcome1!",
"role": "administrator"
},
"HTTP_method": "POST",
"response": {
"@base": "https://192.168.0.202/api/instances/user",
"content": {
"id": "user_test1"
},
"links": [
{
"href": "/user_test1",
"rel": "self"
}
],
"updated": "2017-04-04T13:32:05.837Z"
},
"url": "https://192.168.0.202/api/types/user/instances"
},
{
"args": {
"address": "192.168.0.11:515",
"enabled": true,
"facility": 0,
"protocol": 1
},
"HTTP_method": "POST",
"url": "https://192.168.0.202/api/instances/remoteSyslog/0/action/modify"
},
{
"update": {
"addresses": [
"10.254.66.23",
"10.254.66.24"
],
"id": "0",
"resource_type": "dnsServer"
},
"warning": "The existing instances already has the same attributes as the update operation. No update will happen."
},
{
"args": {
"addresses": [
"10.254.140.21",
"10.254.140.22"
],
"rebootPrivilege": 2
},
"HTTP_method": "POST",
"url": "https://192.168.0.202/api/instances/ntpServer/0/action/modify"
},
{
"HTTP_method": "DELETE",
"url": "https://192.168.0.202/api/instances/user/user_test1"
}
]
contains:
HTTP_method:
description:
- HTTP method used to effect the update.
returned: success
type: string
url:
description:
- URL of the operation to change the resource.
returned: success
type: string
args:
description:
- Arguments of the operation to change the resource.
returned: success
type: complex
response:
description:
- Non-empty response of the update operation from the Unity system.
returned: success
type: complex
update:
description:
- The original update request.
- Only returned when the update failed.
returned: failure
type: complex
message:
description:
- Warning or failure message of the failed update operation.
returned: failure
type: string
'''
import requests, json, re
from ansible.module_utils.basic import AnsibleModule
from datetime import datetime
actionAttribs = {
'create': {
'alertConfigSNMPTarget': {'address': 'targetAddress'},
'capabilityProfile': ['name', 'pool'],
'cifsServer': ['nasServer'],
'pool': ['name'],
'user': ['name']
},
'modify': {
'alertConfig': {
'locale': 'alertLocale',
'isThresholdAlertsEnabled': 'isThresholdAlertsEnabled',
'minEmailNotificationSeverity': 'minEmailNotificationSeverity',
'minSNMPTrapNotificationSeverity': 'minSNMPTrapNotificationSeverity',
'emailFromAddress': 'emailFromAddress',
'destinationEmails': 'destinationEmails'
},
'alertConfigSNMPTarget': {
'address': 'targetAddress',
'username': 'username',
'authProto': 'authProtocol',
'privacyProto': 'privProtocol'
},
'capabilityProfile': ['name', 'description', 'usageTags'],
'ntpServer': ['addresses'],
'pool': {
'name': 'name',
'description': 'description',
'storageResourceType': 'storageResourceType',
'alertThreshold': 'alertThreshold',
'poolSpaceHarvestHighThreshold': 'poolSpaceHarvestHighThreshold',
'poolSpaceHarvestLowThreshold': 'poolSpaceHarvestLowThreshold',
'snapSpaceHarvestHighThreshold': 'snapSpaceHarvestHighThreshold',
'snapSpaceHarvestLowThreshold': 'snapSpaceHarvestLowThreshold',
'isHarvestEnabled': 'isHarvestEnabled',
'isSnapHarvestEnabled': 'isSnapHarvestEnabled',
'isFASTCacheEnabled': 'isFASTCacheEnabled',
'isFASTVpScheduleEnabled': 'isFASTVpScheduleEnabled',
'poolFastVP.isScheduleEnabled': 'isFASTVpScheduleEnabled'
},
'system': {
'name':'name',
'isUpgradeComplete':'isUpgradeCompleted',
'isAutoFailbackEnabled':'isAutoFailbackEnabled',
'isEULAAccepted':'isEulaAccepted'
},
'cifsServer': ['name', 'description', 'netbiosName', 'domain', 'workgroup', 'nasServer'],
'user': {'role.id': 'role'},
},
}
actionFilters = {
'create': {
}
}
class Unity:
def __init__(self, module):
self.hostname = module.params['unity_hostname']
self.username = module.params['unity_username']
self.password = module.params['unity_password']
self.licensePath = module.params['unity_license_path']
self.updates = module.params['unity_updates']
self.passwordUpdates = module.params['unity_password_updates']
self.queries = module.params['unity_queries']
self.module = module
self.checkMode = module.check_mode
self.apibase = 'https://' + self.hostname # Base URL of the REST API
self.headers = {'X-EMC-REST-CLIENT': 'true', 'content-type': 'application/json', 'Accept': 'application/json'} # HTTP headers for REST API requests, less the 'EMC-CSRF-TOKEN' header
self.session = requests.Session()
self.changed = False
self.updateResults = []
self.queryResults = []
self.err = None
def exitFail(self):
self.module.fail_json(changed=self.changed, msg = self.err, unity_update_results = self.updateResults, unity_query_results = self.queryResults)
def exitSuccess(self):
self.module.exit_json(changed=self.changed, unity_update_results = self.updateResults, unity_query_results = self.queryResults)
def _getMsg(self, resp):
try:
msg = json.loads(resp.text)
except ValueError:
msg = {'httpStatusCode': resp.status_code, 'messages': [{'en-US': resp.text}]}
return msg
def _getResult(self, resp, **kwargs):
if resp.status_code // 100 == 2: # HTTP status code 2xx = success
return resp
self.err = self._getMsg(resp)
self.err.update({'url': resp.url})
if resp.status_code == 401 and kwargs.get('auth'): # Unauthorized password
self.err['messages'][0]['en-US'] = "Authentication error for User '" + kwargs['auth'].username + "'" # Update error message
self.exitFail()
def _doGet(self, url, params=None, **kwargs):
if kwargs is None:
kwargs = {}
kwargs.update({'headers': self.headers, 'verify': False})
resp = self.session.get(self.apibase + url, params=params, **kwargs)
return self._getResult(resp, **kwargs)
def _changeResult(self, resp, url, args=None, changed=True, msg=None, **kwargs):
if resp:
url = resp.url
elif 'params' in kwargs: # Reconstruct URL with parameters
url += '?'
for key, value in kwargs['params'].items():
url += key + '=' + value + '&'
url = url.strip('?&')
if (resp is None) or (resp and resp.status_code // 100 == 2):
if changed:
self.changed = changed
if changed or msg:
changeContent = {'changed': changed}
if args:
changeContent['args'] = args
if resp and resp.text: # append response if it exists
changeContent['response'] = json.loads(resp.text)
if msg: # append messages if they exist
changeContent.update(msg)
self.updateResults.append(changeContent)
else:
self.err = self._getMsg(resp)
self.err['url'] = resp.url
if args is not None:
self.err['args'] = args
self.exitFail()
def _doPost(self, url, args, changed=True, msg=None, **kwargs):
if self.checkMode:
resp = None
else:
if kwargs is None:
kwargs = {}
kwargs.update({'headers': self.headers, 'verify': False})
resp = self.session.post(self.apibase + url, json = args, **kwargs)
self._changeResult(resp, url, args, changed=changed, msg=msg, **kwargs)
def _doDelete(self, url, msg=None, **kwargs):
if self.checkMode:
resp = None
else:
if kwargs is None:
kwargs = {}
kwargs.update({'headers': self.headers, 'verify': False})
resp = self.session.delete(self.apibase + url, **kwargs)
self._changeResult(resp, url, msg=msg, **kwargs)
def startSession(self):
url = '/api/instances/system/0'
auth = requests.auth.HTTPBasicAuth(self.username, self.password)
resp = self._doGet(url, auth=auth)
# Add 'EMC-CSRF-TOKEN' header
self.headers['EMC-CSRF-TOKEN']=resp.headers['EMC-CSRF-TOKEN']
def stopSession(self):
url = '/api/types/loginSessionInfo/action/logout'
args = {'localCleanupOnly' : 'true'}
self._doPost(url, args, changed=False)
def uploadLicense(self):
url = self.apibase + '/upload/license'
resp = None
msg = {'resource_type': 'license', 'action': 'upload'}
changed = self.isLicenseUpdate()
if changed:
if not self.checkMode:
files = {'upload': open(self.licensePath, 'rb')}
headers = {'X-EMC-REST-CLIENT':'true', 'EMC-CSRF-TOKEN': self.headers['EMC-CSRF-TOKEN']}
resp = self.session.post(url, files = files, headers=headers, verify=False)
else:
msg.update({'warn': 'All licenses are up-to-date. No upload will happen.'})
self._changeResult(resp, url, args={'licensePath': self.licensePath}, changed=changed, msg=msg)
def isLicenseUpdate(self):
isUpdate = False
query = {'resource_type': 'license', 'fields': 'id, name, issued'}
result = self.runQuery(query)
oldIssued = {}
for entry in result['entries']:
if entry.get('id'):
oldIssued[entry['id'].upper()] = datetime.strptime(entry.get('issued', '1970-01-01T00:00:00.000Z'), '%Y-%m-%dT%H:%M:%S.%fZ')
reID = re.compile('^INCREMENT (?P<id>\w+)')
reIssued = re.compile('ISSUED=(?P<issued>\d{1,2}-[A-Z][a-z]{2}-\d{4})')
newIssued = {}
id = None
with open(self.licensePath, 'r') as f:
for line in f:
if id is None:
m = reID.search(line)
if m:
id = m.group('id').upper()
else:
m = reIssued.search(line)
if m:
newIssued[id] = datetime.strptime(m.group('issued'), '%d-%b-%Y')
id = None
for id in newIssued.keys():
if newIssued[id] > oldIssued.get(id, datetime(1970,1,1)):
isUpdate = True
return isUpdate
def runUpdates(self):
for update in self.updates:
self.runUpdate(update)
def runUpdate(self, update):
paramKeys = ['language', 'timeout']
urlKeys = ['resource_type', 'id', 'action', 'attributes', 'filter'] + paramKeys
params = {key: update[key] for key in update if key in paramKeys}
args = {key: update[key] for key in update if key not in urlKeys}
msg = {}
if 'resource_type' in update: # A resource must have the "resource_type" parameter
msg['resource_type'] = update['resource_type']
else:
self.err = {'error': 'Update has no "resource_type" parameter', 'update': update}
self.exitFail()
if 'id' in update: # Update an existing resource instance with ID
msg['id'] = update['id']
url = '/api/instances/' + update['resource_type'] + '/' + update['id'] + '/action/' + update.get('action', 'modify')
if 'action' not in update:
update['action'] = 'modify' # default action
msg['action'] = update['action']
if self.isDuplicate(update):
msg['warn'] = 'The existing instances already has the same attributes as the update operation. No update will happen.'
self._changeResult(None, url, args, changed=False, msg=msg, params=params)
return
elif update['action'] == 'delete':
msg = update
url = '/api/instances/' + update['resource_type'] + '/' + update['id']
if not self.isDuplicate(update):
msg['warn'] = 'The instance to be deleted does not exist. No update will happen.'
self._changeResult(None, url, args, changed=False, msg=msg, params=params)
return
else:
resp = self._doDelete(url, msg)
return
else:
if 'action' in update: # Class-level action
url = '/api/types/' + update['resource_type'] + '/action/' + update['action']
else:
update['action'] = 'create' # Create a new instance
msg['action'] = update['action']
url = '/api/types/' + update['resource_type'] + '/instances'
if self.checkMode: # Only check duplicate entries during check mode. The users accept the consequences if they still want to add the new instance
duplicates = self.isDuplicate(update)
if duplicates:
msg.update({'warn': 'Instances with the same attributes already exist for the creation operation. Create the new instance at your own risk.', 'duplicates': duplicates})
self._changeResult(None, url, args, changed=False, msg=msg, params=params)
return
msg['action'] = update['action']
resp = self._doPost(url, args, params = params, msg=msg)
def isDuplicate(self, update):
# If this is an password update, then only proceed when the password is different from the old one
if 'password' in update and 'oldPassword' in update:
return update['password'] == update['oldPassword']
# If this is an update of Proxy password, then do no checks because it is not possible to verify the HTTP Proxy's password
if 'proxyPassword' in update:
return false
query = {key: update[key] for key in update if key in ['resource_type', 'id', 'language']}
attrs = None
filter = None
if update['action'] in ['create', 'modify']: # Only create or modify actions need to compare attributes with existing resource instances
# First, use the default, hard-coded attributes
attrs = actionAttribs[update['action']].get(update['resource_type'])
# Next, if there is customer supplied attributes in the Ansible task, then override the default attributes
if 'attributes' in update:
attrs = update['attributes']
# Last, if attributes is still not set, then use all attributes in the update that are resource-type specific
if attrs is None: # if attributes to catch duplicates are not specified, find them in the update parameters
attrs = {attr: attr for attr in update if attr not in ['resource_type', 'id', 'action', 'language', 'timeout', 'password', 'new_password', 'attributes', 'filter']}
if isinstance(attrs, list):
attributes = {attr: attr for attr in attrs}
elif isinstance(attrs, dict):
attributes = attrs
if update['action'] == 'create': # Only create action needs a filter to find duplicates
# First, use the default, hard-coded filter
filter = actionFilters[update['action']].get(update['resource_type'])
# Next, if there is customer supplied filter in the Ansible task, then override the default filter
if 'filter' in update:
filter = update['filter']
# Last, if filter is still not set, then set it to empty string
if filter is None:
filter = ''
if update['action'] == 'modify': # Only modify action adds the 'fields' argument to the query
query['fields'] = ','.join([field for field in attributes.keys() if attributes[field] in update])
elif update['action'] == 'create': # Only create action adds the 'filter' argument to the query
for queryAttr, updateAttr in attributes.items():
if updateAttr in update:
filter = queryAttr + self.processFilterValue(self.getDottedValue(update, updateAttr)) + ' and ' + filter
filter = re.sub(' and $', '', filter) # strip the trailing 'and' if the original filter is empty string
query['filter'] = filter
result = self.runQuery(query)
if update['action'] == 'modify': # For modify action, compare queried attributes and update attributes
content = result
for queryAttr, updateAttr in attributes.items():
if updateAttr in update and self.getDottedValue(content, queryAttr) != self.getDottedValue(update, updateAttr):
return False
else:
return True
elif 'entries' in result and len(result['entries']) > 0: # For class-level queries, the updated resource is a duplicate if the query returns some entries
return result['entries']
elif 'id' in result: # For instance level queries, the updated resource is a duplicate if the query result contains the 'id' field
return result
else:
return None
def getDottedValue(self, dictionary, dottedKey, separator = '.'):
value = dictionary
for key in dottedKey.split(separator):
if value:
value = value.get(key)
else:
break
return value
def processFilterValue(self, value):
if isinstance(value, str):
value = ' eq "' + value + '"'
else:
value = ' eq ' + str(value)
return value
def runPasswordUpdates(self):
for update in self.passwordUpdates:
self.runPasswordUpdate(update)
def runPasswordUpdate(self, update):
username = update.get('username')
password = update.get('password')
newPassword = update.get('new_password')
kwargs = {'auth': requests.auth.HTTPBasicAuth(username, password), 'headers': self.headers, 'verify': False}
resp = requests.get(self.apibase+'/api/instances/system/0', **kwargs)
self._getResult(resp, **kwargs) # process get results
update = {'resource_type': 'user', 'id': 'user_' + username, 'password':newPassword, 'oldPassword':password}
self.runUpdate(update)
def runQueries(self):
for query in self.queries:
result = self.runQuery(query)
self.queryResults.append(result)
def runQuery(self, query):
if not 'resource_type' in query: # A query must have the "resource_type" parameter
self.err = {'error': 'Query has no "resource_type" parameter', 'query': query}
self.exitFail()
instanceKeys = ['compact', 'fields', 'language'] # Instance query keys
collectionKeys = ['compact', 'fields', 'filter', 'groupby', 'language', 'orderby', 'page', 'per_page', 'with_entrycount'] # Collection query keys
if 'id' in query:
url = '/api/instances/' + query['resource_type'] + '/' + query['id']
paramKeys = instanceKeys
else:
url = '/api/types/' + query['resource_type'] + '/instances'
paramKeys = collectionKeys
params = {key: query[key] for key in paramKeys if key in query} # dictioanry comprehension to create a sub-dictioanry from the query with only keys in paramKeys
if 'compact' not in params:
params['compact'] = 'true' # By default, omit metadata from each instance in the query response
if 'id' not in query and 'with_entrycount' not in params: # Collection query without the 'with_entrycount' parameter
params['with_entrycount'] = 'true' # By default, return the entryCount response component in the response data.
resp = self._doGet(url, params)
r = json.loads(resp.text)
result = {'resource_type': query['resource_type']}
if 'id' in query:
result['id'] = query['id']
result.update(r['content'])
else:
result['entries'] = []
for entry in r['entries']:
result['entries'].append(entry['content'])
return result
def run(self):
self.startSession()
if self.updates:
self.runUpdates()
if self.passwordUpdates:
self.runPasswordUpdates()
if self.licensePath:
self.uploadLicense()
if self.queries:
self.runQueries()
self.stopSession()
def main():
module = AnsibleModule(
argument_spec=dict(
unity_hostname=dict(default=None, required=True, type='str'),
unity_username=dict(default='admin', type='str'),
unity_password=dict(default='Password123#', type='str'), #, no_log=True),
unity_license_path = dict(default=None, type='path'),
unity_updates = dict(default=None, type='list'),
unity_password_updates = dict(default=None, type='list'), #, no_log=True),
unity_queries = dict(default=None, type='list')
),
supports_check_mode=True
)
unity = Unity(module)
unity.run()
if unity.err:
unity.exitFail()
else:
unity.exitSuccess()
if __name__ == '__main__':
main()
|
|
from app.lookups.sqlite.protpeptable import ProtPepTable
from app.dataformats import prottable as ph
class ProtGeneTableBase(ProtPepTable):
stdheaderfields = [
ph.HEADER_NO_PSM,
ph.HEADER_NO_PEPTIDE,
ph.HEADER_NO_UNIPEP,
]
singlefields = stdheaderfields + [ph.HEADER_QVAL, ph.HEADER_AREA]
class ProtTableDB(ProtGeneTableBase):
datatype = 'protein'
colmap = {'protein_group_master': ['master_id', 'protein_acc'],
'proteins': ['pacc_id', 'protein_acc'],
'protein_precur_quanted': ['pacc_id', 'prottable_id', 'quant'],
'protein_fdr': ['pacc_id', 'prottable_id', 'fdr'],
'protquant_channels': ['channel_id', 'prottable_id',
'channel_name', 'amount_psms_name'],
'protein_iso_quanted': ['proteinquant_id', 'pacc_id',
'channel_id', 'quantvalue',
'amount_psms'],
}
def add_tables(self, tabletypes=[]):
self.create_tables(['protein_tables', 'protein_iso_quanted',
'protquant_channels', 'protein_precur_quanted',
'protein_fdr'])
def create_pdata_map(self):
"""This runs only once, returns the data which is not dependent on sets,
in a dict with accessions as keys"""
sql = """
SELECT pgm.master_id, p.protein_acc, IFNULL(g.gene_acc, 'NA'),
IFNULL(aid.assoc_id, 'NA'), cov.coverage, sub.pgc,
sub.pgcnr, IFNULL(pd.description, 'NA') FROM protein_group_master AS pgm
LEFT OUTER JOIN (
SELECT master_id, GROUP_CONCAT(protein_acc) AS pgc,
COUNT(protein_acc) AS pgcnr FROM protein_group_content
GROUP BY master_id
) AS sub ON sub.master_id=pgm.master_id
INNER JOIN proteins AS p ON pgm.pacc_id=p.pacc_id
LEFT OUTER JOIN protein_coverage AS cov ON
p.protein_acc=cov.protein_acc
LEFT OUTER JOIN ensg_proteins AS egp ON pgm.pacc_id=egp.pacc_id
LEFT OUTER JOIN genes AS g ON egp.gene_id=g.gene_id
LEFT OUTER JOIN genename_proteins AS gnp ON pgm.pacc_id=gnp.pacc_id
LEFT OUTER JOIN associated_ids AS aid ON aid.gn_id=gnp.gn_id
LEFT OUTER JOIN prot_desc AS pd ON pd.pacc_id=p.pacc_id
"""
cursor = self.get_cursor()
pgdata = {}
for mid, macc, gacc, aid, cov, pgc, pgnr, desc in cursor.execute(sql):
pgdata[mid] = {
ph.HEADER_PROTEIN: macc,
ph.HEADER_GENEID: gacc,
ph.HEADER_GENENAME: aid,
ph.HEADER_COVERAGE: cov,
ph.HEADER_CONTENTPROT: pgc,
ph.HEADER_NO_PROTEIN: pgnr,
ph.HEADER_DESCRIPTION: desc,
}
return pgdata
def merge_features(self):
sql = """
SELECT bs.set_name, pgm.master_id, COUNT(DISTINCT ppg.psm_id),
COUNT (DISTINCT ps.pep_id), COUNT(DISTINCT uni.pep_id),
IFNULL(pf.fdr, 'NA'), ppq.quant, GROUP_CONCAT(pqc.channel_name), GROUP_CONCAT(piq.quantvalue),
GROUP_CONCAT(piq.amount_psms)
FROM psm_protein_groups AS ppg
JOIN biosets AS bs
INNER JOIN psms ON ppg.psm_id=psms.psm_id
INNER JOIN peptide_sequences AS ps ON psms.pep_id=ps.pep_id
INNER JOIN protein_tables AS pt ON pt.set_id=bs.set_id
INNER JOIN protein_group_master AS pgm ON pgm.master_id=ppg.master_id
INNER JOIN proteins AS prots ON prots.pacc_id=pgm.pacc_id
LEFT OUTER JOIN protein_fdr AS pf ON pf.prottable_id=pt.prottable_id AND
pf.pacc_id=prots.pacc_id
LEFT OUTER JOIN protein_precur_quanted AS ppq ON ppq.prottable_id=pt.prottable_id AND
ppq.pacc_id=prots.pacc_id
LEFT OUTER JOIN protquant_channels AS pqc ON pqc.prottable_id=pt.prottable_id
LEFT OUTER JOIN protein_iso_quanted AS piq ON piq.channel_id=pqc.channel_id AND
piq.pacc_id=prots.pacc_id
LEFT OUTER JOIN (
SELECT ppg.pep_id AS pep_id FROM (
SELECT psms.pep_id AS pep_id, COUNT (DISTINCT ppg.master_id) AS nrpep
FROM psm_protein_groups AS ppg INNER JOIN psms USING(psm_id)
GROUP BY psms.pep_id
) AS ppg WHERE ppg.nrpep==1
) AS uni ON uni.pep_id=ps.pep_id
GROUP BY pgm.master_id, bs.set_id
"""
cursor = self.get_cursor()
print(sql)
cursor.execute(sql)
return cursor
class GeneTableDB(ProtGeneTableBase):
datatype = 'gene'
colmap = {'genes': ['gene_id', 'gene_acc'],
'gene_precur_quanted': ['gene_id', 'genetable_id', 'quant'],
'gene_fdr': ['gene_id', 'genetable_id', 'fdr'],
'genequant_channels': ['channel_id', 'genetable_id',
'channel_name', 'amount_psms_name'],
'gene_iso_quanted': ['genequant_id', 'gene_id',
'channel_id', 'quantvalue', 'amount_psms'],
}
def add_tables(self, tabletypes=[]):
self.create_tables(['gene_tables', 'gene_iso_quanted',
'genequant_channels', 'gene_precur_quanted',
'gene_fdr'])
def create_pdata_map(self):
"""This runs only once, returns the data which is not dependent on sets,
in a dict with accessions as keys"""
sql = """
SELECT g.gene_acc, GROUP_CONCAT(p.protein_acc, ';'), IFNULL(aid.assoc_id, 'NA'),
IFNULL(pd.description, 'NA')
FROM genes AS g
LEFT OUTER JOIN ensg_proteins AS egp ON egp.gene_id=g.gene_id
LEFT OUTER JOIN proteins AS p ON p.pacc_id=egp.pacc_id
LEFT OUTER JOIN genename_proteins AS gnp ON gnp.pacc_id=egp.pacc_id
LEFT OUTER JOIN associated_ids AS aid ON aid.gn_id=gnp.gn_id
LEFT OUTER JOIN prot_desc AS pd ON pd.pacc_id=p.pacc_id
GROUP BY g.gene_acc
"""
cursor = self.get_cursor()
pgdata = {}
for gacc, pacc, aid, desc in cursor.execute(sql):
pgdata[gacc] = {
ph.HEADER_PROTEINS: pacc,
ph.HEADER_GENEID: gacc,
ph.HEADER_GENENAME: aid,
ph.HEADER_DESCRIPTION: desc,
}
return pgdata
def merge_features(self):
### protein_acc on gene table is indexed??
sql = """
SELECT bs.set_name, g.gene_acc, COUNT(DISTINCT ppsm.psm_id),
COUNT (DISTINCT ps.pep_id), COUNT(DISTINCT uniq.pep_id),
IFNULL(gf.fdr, 'NA'), gpq.quant, GROUP_CONCAT(gqc.channel_name), GROUP_CONCAT(giq.quantvalue),
GROUP_CONCAT(giq.amount_psms)
FROM protein_psm AS ppsm
JOIN biosets AS bs
INNER JOIN psms ON ppsm.psm_id=psms.psm_id
INNER JOIN peptide_sequences AS ps ON psms.pep_id=ps.pep_id
INNER JOIN gene_tables AS gt ON gt.set_id=bs.set_id
INNER JOIN proteins AS p ON p.protein_acc=ppsm.protein_acc
INNER JOIN ensg_proteins AS egp ON egp.pacc_id=p.pacc_id
INNER JOIN genes AS g ON g.gene_id=egp.gene_id
LEFT OUTER JOIN gene_fdr AS gf ON gf.genetable_id=gt.genetable_id AND
gf.gene_id=g.gene_id
LEFT OUTER JOIN gene_precur_quanted AS gpq ON gpq.genetable_id=gt.genetable_id AND
gpq.gene_id=g.gene_id
LEFT OUTER JOIN genequant_channels AS gqc ON gqc.genetable_id=gt.genetable_id
LEFT OUTER JOIN gene_iso_quanted AS giq ON giq.channel_id=gqc.channel_id AND
giq.gene_id=g.gene_id
LEFT OUTER JOIN (
SELECT psmg.pep_id AS pep_id FROM (
SELECT psms.pep_id AS pep_id, COUNT (DISTINCT g.gene_acc) AS nrpep
FROM protein_psm AS ppsm INNER JOIN psms USING(psm_id)
INNER JOIN proteins AS p ON p.protein_acc=ppsm.protein_acc
INNER JOIN ensg_proteins AS egp ON egp.pacc_id=p.pacc_id
INNER JOIN genes AS g ON g.gene_id=egp.gene_id
GROUP BY psms.pep_id
) AS psmg WHERE psmg.nrpep==1
) AS uniq ON uniq.pep_id=ps.pep_id
GROUP BY g.gene_id, bs.set_id
"""
cursor = self.get_cursor()
cursor.execute(sql)
return cursor
class GeneTableAssocIDsDB(GeneTableDB):
datatype = 'assoc'
colmap = {'associated_ids': ['gn_id', 'assoc_id'],
'assoc_precur_quanted': ['gn_id', 'genetable_id', 'quant'],
'assoc_fdr': ['gn_id', 'genetable_id', 'fdr'],
'genequant_channels': ['channel_id', 'genetable_id',
'channel_name', 'amount_psms_name'],
'assoc_iso_quanted': ['genequant_id', 'gn_id',
'channel_id', 'quantvalue', 'amount_psms'],
}
def add_tables(self, tabletypes=[]):
self.create_tables(['gene_tables', 'assoc_iso_quanted',
'genequant_channels', 'assoc_precur_quanted',
'assoc_fdr'])
def create_pdata_map(self):
"""This runs only once, returns the data which is not dependent on sets,
in a dict with accessions as keys"""
sql = """
SELECT gn.assoc_id, GROUP_CONCAT(p.protein_acc, ';'), IFNULL(g.gene_acc, 'NA'),
IFNULL(pd.description, 'NA')
FROM associated_ids AS gn
LEFT OUTER JOIN genename_proteins AS gnp ON gnp.gn_id=gn.gn_id
LEFT OUTER JOIN proteins AS p ON p.pacc_id=gnp.pacc_id
LEFT OUTER JOIN ensg_proteins AS egp ON egp.pacc_id=gnp.pacc_id
LEFT OUTER JOIN genes AS g ON gn.gn_id=egp.gene_id
LEFT OUTER JOIN prot_desc AS pd ON pd.pacc_id=p.pacc_id
GROUP BY gn.gn_id
"""
cursor = self.get_cursor()
pgdata = {}
for aid, pacc, gacc, desc in cursor.execute(sql):
pgdata[aid] = {
ph.HEADER_PROTEINS: pacc,
ph.HEADER_GENEID: gacc,
ph.HEADER_GENENAME: aid,
ph.HEADER_DESCRIPTION: desc,
}
return pgdata
def merge_features(self):
### protein_acc on gene table is indexed??
### check distinct stuff, pgroups etc need it, matched with content numbers (which cnanot habe it)
sql = """
SELECT bs.set_name, gn.assoc_id, COUNT(DISTINCT ppsm.psm_id),
COUNT (DISTINCT ps.pep_id), COUNT(DISTINCT uniq.pep_id),
IFNULL(gf.fdr, 'NA'), gpq.quant, GROUP_CONCAT(gqc.channel_name), GROUP_CONCAT(giq.quantvalue),
GROUP_CONCAT(giq.amount_psms)
FROM protein_psm AS ppsm
JOIN biosets AS bs
INNER JOIN psms ON ppsm.psm_id=psms.psm_id
INNER JOIN peptide_sequences AS ps ON psms.pep_id=ps.pep_id
INNER JOIN gene_tables AS gt ON gt.set_id=bs.set_id
INNER JOIN proteins AS p ON p.protein_acc=ppsm.protein_acc
INNER JOIN genename_proteins AS gnp ON gnp.pacc_id=p.pacc_id
INNER JOIN associated_ids AS gn ON gn.gn_id=gnp.gn_id
LEFT OUTER JOIN assoc_fdr AS gf ON gf.genetable_id=gt.genetable_id AND
gf.gn_id=gn.gn_id
LEFT OUTER JOIN assoc_precur_quanted AS gpq ON gpq.genetable_id=gt.genetable_id AND
gpq.gn_id=gn.gn_id
LEFT OUTER JOIN genequant_channels AS gqc ON gqc.genetable_id=gt.genetable_id
LEFT OUTER JOIN assoc_iso_quanted AS giq ON giq.channel_id=gqc.channel_id AND
giq.gn_id=gn.gn_id
LEFT OUTER JOIN (
SELECT psmg.pep_id AS pep_id FROM (
SELECT psms.pep_id AS pep_id, COUNT (DISTINCT gn.assoc_id) AS nrpep
FROM protein_psm AS ppsm INNER JOIN psms USING(psm_id)
INNER JOIN proteins AS p ON p.protein_acc=ppsm.protein_acc
INNER JOIN genename_proteins AS gnp ON gnp.pacc_id=p.pacc_id
INNER JOIN associated_ids AS gn ON gn.gn_id=gnp.gn_id
GROUP BY psms.pep_id
) AS psmg WHERE psmg.nrpep==1
) AS uniq ON uniq.pep_id=ps.pep_id
GROUP BY gn.gn_id, bs.set_id
"""
cursor = self.get_cursor()
cursor.execute(sql)
return cursor
|
|
# Modified works:
# --------------------------------------------------------
# Copyright (c) 2017 - 2018 Kentaro Wada.
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# Original works:
# --------------------------------------------------------
# expand_boxes, segm_results
# Copyright (c) 2017-present, Facebook, Inc.
# Licensed under The Apache License [see LICENSE for details]
# https://github.com/facebookresearch/Detectron
# --------------------------------------------------------
# Copyright (c) 2017 Preferred Networks, Inc.
# Licensed under The MIT License [see LICENSE for details]
# https://github.com/chainer/chainercv
# --------------------------------------------------------
# Faster R-CNN implementation by Chainer
# Copyright (c) 2016 Shunta Saito
# Licensed under The MIT License [see LICENSE for details]
# https://github.com/mitmul/chainer-faster-rcnn
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# https://github.com/rbgirshick/py-faster-rcnn
# --------------------------------------------------------
from __future__ import division
import cv2
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
from chainercv.links.model.faster_rcnn.utils.loc2bbox import loc2bbox
from chainercv.utils import non_maximum_suppression
from chainer_mask_rcnn.datasets import concat_examples
def expand_boxes(boxes, scale):
"""Expand an array of boxes by a given scale."""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def segm_results(bbox, label, roi_mask, im_h, im_w):
if len(bbox) == 0:
return np.zeros((0, im_h, im_w), dtype=bool)
mask_size = roi_mask.shape[2]
assert roi_mask.shape[3] == mask_size
ref_boxes = bbox[:, [1, 0, 3, 2]]
masks = roi_mask
all_masks = []
mask_ind = 0
M = mask_size
scale = (M + 2.0) / M
ref_boxes = expand_boxes(ref_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((M + 2, M + 2), dtype=np.float32)
for mask_ind in range(len(ref_boxes)):
label_i = label[mask_ind]
padded_mask[1:-1, 1:-1] = masks[mask_ind, label_i, :, :]
ref_box = ref_boxes[mask_ind, :]
w = (ref_box[2] - ref_box[0] + 1)
h = (ref_box[3] - ref_box[1] + 1)
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > 0.5, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, im_w)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - ref_box[1]):(y_1 - ref_box[1]),
(x_0 - ref_box[0]):(x_1 - ref_box[0])]
im_mask = im_mask.astype(bool)
all_masks.append(im_mask)
all_masks = np.asarray(all_masks)
return all_masks
class MaskRCNN(chainer.Chain):
def __init__(
self, extractor, rpn, head,
mean,
min_size=600,
max_size=1000,
loc_normalize_mean=(0., 0., 0., 0.),
loc_normalize_std=(0.1, 0.1, 0.2, 0.2),
detections_per_im=100):
super(MaskRCNN, self).__init__()
with self.init_scope():
self.extractor = extractor
self.rpn = rpn
self.head = head
self.mean = mean
self.min_size = min_size
self.max_size = max_size
self.loc_normalize_mean = loc_normalize_mean
self.loc_normalize_std = loc_normalize_std
self.nms_thresh = 0.5
self.score_thresh = 0.05
self._detections_per_im = detections_per_im
@property
def n_class(self):
# Total number of classes including the background.
return self.head.n_class
def __call__(self, x, scales):
img_size = x.shape[2:]
h = self.extractor(x)
rpn_locs, rpn_scores, rois, roi_indices, anchor =\
self.rpn(h, img_size, scales)
roi_cls_locs, roi_scores, roi_masks = self.head(
h, rois, roi_indices)
return roi_cls_locs, roi_scores, rois, roi_indices, roi_masks
def prepare(self, imgs):
prepared_imgs = []
sizes = []
scales = []
for img in imgs:
_, H, W = img.shape
scale = 1.
if self.min_size:
scale = self.min_size / min(H, W)
if self.max_size and scale * max(H, W) > self.max_size:
scale = self.max_size / max(H, W)
img = img.transpose(1, 2, 0)
img = cv2.resize(img, None, fx=scale, fy=scale)
img = img.transpose(2, 0, 1)
img = (img - self.mean).astype(np.float32, copy=False)
prepared_imgs.append(img)
sizes.append((H, W))
scales.append(scale)
return prepared_imgs, sizes, scales
def _suppress(self, raw_cls_bbox, raw_prob):
bbox = list()
label = list()
score = list()
assign = list()
raw_assign = np.arange(len(raw_cls_bbox))
# skip cls_id = 0 because it is the background class
for l in range(1, self.n_class):
cls_bbox_l = raw_cls_bbox.reshape((-1, self.n_class, 4))[:, l, :]
prob_l = raw_prob[:, l]
raw_assign_l = raw_assign
# thresholding by score
keep = prob_l > self.score_thresh
cls_bbox_l = cls_bbox_l[keep]
prob_l = prob_l[keep]
assign_l = raw_assign_l[keep]
# thresholding by nms
keep = non_maximum_suppression(
cls_bbox_l, self.nms_thresh, prob_l)
bbox.append(cls_bbox_l[keep])
# The labels are in [0, self.n_class - 2].
label.append((l - 1) * np.ones((len(keep),)))
score.append(prob_l[keep])
assign.append(assign_l[keep])
bbox = np.concatenate(bbox, axis=0).astype(np.float32)
label = np.concatenate(label, axis=0).astype(np.int32)
score = np.concatenate(score, axis=0).astype(np.float32)
assign = np.concatenate(assign, axis=0).astype(np.int32)
return bbox, label, score, assign
def _to_bboxes(self, roi_cls_locs, roi_scores, rois, roi_indices, sizes,
scales):
if isinstance(roi_cls_locs, chainer.Variable):
roi_cls_locs = roi_cls_locs.array
probs = F.softmax(roi_scores).array
del roi_scores
bboxes = []
labels = []
scores = []
assigns = []
for index in range(len(sizes)):
scale = scales[index]
size = sizes[index]
keep = roi_indices == index
roi_cls_loc = roi_cls_locs[keep]
prob = probs[keep]
roi = rois[keep] / scale
# Convert predictions to bounding boxes in image coordinates.
# Bounding boxes are scaled to the scale of the input images.
mean = self.xp.tile(self.xp.asarray(self.loc_normalize_mean),
self.n_class)
std = self.xp.tile(self.xp.asarray(self.loc_normalize_std),
self.n_class)
roi_cls_loc = (roi_cls_loc * std + mean).astype(np.float32)
roi_cls_loc = roi_cls_loc.reshape((-1, self.n_class, 4))
roi_cls = self.xp.broadcast_to(roi[:, None], roi_cls_loc.shape)
cls_bbox = loc2bbox(roi_cls.reshape((-1, 4)),
roi_cls_loc.reshape((-1, 4)))
cls_bbox = cls_bbox.reshape((-1, self.n_class * 4))
# clip bounding box
cls_bbox[:, 0::2] = self.xp.clip(cls_bbox[:, 0::2], 0, size[0])
cls_bbox[:, 1::2] = self.xp.clip(cls_bbox[:, 1::2], 0, size[1])
# clip roi
roi[:, 0::2] = self.xp.clip(roi[:, 0::2], 0, size[0])
roi[:, 1::2] = self.xp.clip(roi[:, 1::2], 0, size[1])
raw_cls_bbox = cuda.to_cpu(cls_bbox)
raw_prob = cuda.to_cpu(prob)
bbox, label, score, assign = self._suppress(raw_cls_bbox, raw_prob)
bbox_int = bbox.astype(np.int32)
bbox_sizes = ((bbox_int[:, 2] - bbox_int[:, 0]) *
(bbox_int[:, 3] - bbox_int[:, 1]))
keep = bbox_sizes > 0
bbox = bbox[keep]
label = label[keep]
score = score[keep]
assign = assign[keep]
if self._detections_per_im > 0:
indices = np.argsort(score)
keep = indices >= (len(indices) - self._detections_per_im)
bbox = bbox[keep]
label = label[keep]
score = score[keep]
assign = assign[keep]
bboxes.append(bbox)
labels.append(label)
scores.append(score)
assigns.append(assign)
return bboxes, labels, scores, assigns
def _to_roi_masks(self, h, bboxes, roi_indices, scales):
batch_size = h.shape[0]
bboxes = np.concatenate(bboxes, axis=0)
if bboxes.size == 0:
n_fg_class = self.n_class - 1
mask_size = self.head.mask_size
return [
np.zeros((0, n_fg_class, mask_size, mask_size),
dtype=np.float32)
for _ in range(batch_size)
]
with chainer.using_config('train', False), chainer.no_backprop_mode():
# use predicted bbox as rois
rois = bboxes * scales[roi_indices][:, None]
rois = self.xp.asarray(rois, dtype=np.float32)
_, _, roi_masks = self.head(
x=h,
rois=rois,
roi_indices=self.xp.asarray(roi_indices),
pred_bbox=False,
)
roi_masks = cuda.to_cpu(roi_masks.array)
return [roi_masks[roi_indices == i] for i in range(batch_size)]
def _to_masks(self, bboxes, labels, scores, roi_masks, sizes):
masks = []
for bbox, label, score, roi_mask, size in \
zip(bboxes, labels, scores, roi_masks, sizes):
n_fg_class = self.n_class - 1
n_instance, n_fg_class_x_n_mask_class, roi_H, roi_W = \
roi_mask.shape
n_mask_class = n_fg_class_x_n_mask_class // n_fg_class
roi_mask = roi_mask.reshape(
(n_instance, n_mask_class, n_fg_class, roi_H, roi_W)
)
roi_mask = roi_mask[np.arange(n_instance), :, label, :, :]
roi_mask = roi_mask.transpose(0, 2, 3, 1)
mask = np.zeros((n_instance, size[0], size[1]), dtype=np.int32)
bbox_int = bbox.astype(np.int32)
for i in range(n_instance):
y1, x1, y2, x2 = bbox_int[i]
roi_H, roi_W = y2 - y1, x2 - x1
mask_score = cv2.resize(roi_mask[i], (roi_W, roi_H))
mask[i, y1:y2, x1:x2] = np.argmax(mask_score, axis=2)
masks.append(mask)
return masks
def predict(self, imgs):
imgs, sizes, scales = self.prepare(imgs)
batch = list(zip(imgs, scales))
x, scales = concat_examples(batch, padding=0)
x = self.xp.asarray(x)
with chainer.using_config('train', False), chainer.no_backprop_mode():
h = self.extractor(x)
rpn_locs, rpn_scores, rois, roi_indices, anchor = self.rpn(
h, x.shape[2:], scales,
)
roi_cls_locs, roi_scores, _ = self.head(
h, rois, roi_indices, pred_mask=False,
)
bboxes, labels, scores, assigns = self._to_bboxes(
roi_cls_locs, roi_scores, rois, roi_indices, sizes, scales,
)
roi_indices = cuda.to_cpu(roi_indices)
roi_indices = np.concatenate([
roi_indices[roi_indices == i][assign]
for i, assign in enumerate(assigns)
], axis=0)
roi_masks = self._to_roi_masks(h, bboxes, roi_indices, scales)
masks = self._to_masks(bboxes, labels, scores, roi_masks, sizes)
return bboxes, masks, labels, scores
|
|
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance Proxy
"""
import json
from oslo_log import log
from oslo_service import loopingcall
from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume.drivers.zfssa import restclient
from cinder.volume.drivers.zfssa import webdavclient
LOG = log.getLogger(__name__)
def factory_restclient(url, **kwargs):
return restclient.RestClientURL(url, **kwargs)
class ZFSSAApi(object):
"""ZFSSA API proxy class"""
def __init__(self):
self.host = None
self.url = None
self.rclient = None
def __del__(self):
if self.rclient and self.rclient.islogin():
self.rclient.logout()
def _is_pool_owned(self, pdata):
"""Returns True if the pool's owner is the same as the host."""
svc = '/api/system/v1/version'
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error getting version: '
'svc: %(svc)s.'
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'svc': svc,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
vdata = json.loads(ret.data)
return vdata['version']['asn'] == pdata['pool']['asn'] and \
vdata['version']['nodename'] == pdata['pool']['owner']
def set_host(self, host, timeout=None):
self.host = host
self.url = "https://" + self.host + ":215"
self.rclient = factory_restclient(self.url, timeout=timeout)
def login(self, auth_str):
"""Login to the appliance"""
if self.rclient and not self.rclient.islogin():
self.rclient.login(auth_str)
def logout(self):
self.rclient.logout()
def verify_service(self, service, status='online'):
"""Checks whether a service is online or not"""
svc = '/api/service/v1/services/' + service
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
if data['<status>'] != status:
exception_msg = (_('%(service)s Service is not %(status)s '
'on storage appliance: %(host)s')
% {'service': service,
'status': status,
'host': self.host})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def get_asn(self):
"""Returns appliance asn."""
svc = '/api/system/v1/version'
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error getting appliance version details. '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['version']['asn']
def get_replication_targets(self):
"""Returns all replication targets configured on the appliance."""
svc = '/api/storage/v1/replication/targets'
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error getting replication target details. '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val
def edit_inherit_replication_flag(self, pool, project, volume, set=True):
"""Edit the inherit replication flag for volume."""
svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s'
'/filesystems/%(volume)s/replication'
% {'pool': pool,
'project': project,
'volume': volume})
arg = {'inherited': set}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error setting replication inheritance '
'to %(set)s '
'for volume: %(vol)s '
'project %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'set': set,
'project': project,
'vol': volume,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_replication_action(self, host_pool, host_project, tgt_name,
tgt_pool, volume):
"""Create a replication action."""
arg = {'pool': host_pool,
'project': host_project,
'target_pool': tgt_pool,
'target': tgt_name}
if volume is not None:
arg.update({'share': volume})
svc = '/api/storage/v1/replication/actions'
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating replication action on: '
'pool: %(pool)s '
'Project: %(proj)s '
'volume: %(vol)s '
'for target: %(tgt)s and pool: %(tgt_pool)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'pool': host_pool,
'proj': host_project,
'vol': volume,
'tgt': tgt_name,
'tgt_pool': tgt_pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['action']['id']
def delete_replication_action(self, action_id):
"""Delete a replication action."""
svc = '/api/storage/v1/replication/actions/%s' % action_id
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'replication action: %(id)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'id': action_id,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def send_repl_update(self, action_id):
"""Send replication update
Send replication update to the target appliance and then wait for
it to complete.
"""
svc = '/api/storage/v1/replication/actions/%s/sendupdate' % action_id
ret = self.rclient.put(svc)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error sending replication update '
'for action id: %(id)s . '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'id': action_id,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def _loop_func():
svc = '/api/storage/v1/replication/actions/%s' % action_id
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error getting replication action: %(id)s. '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'id': action_id,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
if val['action']['last_result'] == 'success':
raise loopingcall.LoopingCallDone()
elif (val['action']['last_result'] == '<unknown>' and
val['action']['state'] == 'sending'):
pass
else:
exception_msg = (_('Error sending replication update. '
'Returned error: %(err)s. '
'Action: %(id)s.')
% {'err': val['action']['last_result'],
'id': action_id})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
timer.start(interval=5).wait()
def get_replication_source(self, asn):
"""Return the replication source json which has a matching asn."""
svc = "/api/storage/v1/replication/sources"
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error getting replication source details. '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
for source in val['sources']:
if source['asn'] == asn:
return source
return None
def sever_replication(self, package, src_name, project=None):
"""Sever Replication at the destination.
This method will sever the package and move the volume to a project,
if project name is not passed in then the package name is selected
as the project name
"""
svc = ('/api/storage/v1/replication/sources/%(src)s/packages/%(pkg)s'
'/sever' % {'src': src_name, 'pkg': package})
if not project:
project = package
arg = {'projname': project}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error severing the package: %(package)s '
'from source: %(src)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'package': package,
'src': src_name,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def move_volume(self, pool, project, volume, tgt_project):
"""Move a LUN from one project to another within the same pool."""
svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s'
'/filesystems/%(volume)s' % {'pool': pool,
'project': project,
'volume': volume})
arg = {'project': tgt_project}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error moving volume: %(vol)s '
'from source project: %(src)s '
'to target project: %(tgt)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'vol': volume,
'src': project,
'tgt': tgt_project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def delete_project(self, pool, project):
"""Delete a project."""
svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s' %
{'pool': pool,
'project': project})
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'project: %(project)s '
'on pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def get_pool_stats(self, pool):
"""Get pool stats.
Get space available and total properties of a pool
returns (avail, total).
"""
svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting Pool Stats: '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.InvalidVolume(reason=exception_msg)
val = json.loads(ret.data)
if not self._is_pool_owned(val):
LOG.error(_LE('Error Pool ownership: Pool %(pool)s is not owned '
'by %(host)s.'),
{'pool': pool, 'host': self.host})
raise exception.InvalidInput(reason=pool)
avail = val['pool']['usage']['available']
total = val['pool']['usage']['total']
return avail, total
def create_project(self, pool, project, compression=None, logbias=None):
"""Create a project on a pool.
Check first whether the pool exists.
"""
self.verify_pool(pool)
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/storage/v1/pools/' + pool + '/projects'
arg = {
'name': project
}
if compression and compression != '':
arg.update({'compression': compression})
if logbias and logbias != '':
arg.update({'logbias': logbias})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Project: '
'%(project)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_initiator(self, initiator, alias, chapuser=None,
chapsecret=None):
"""Create an iSCSI initiator."""
svc = '/api/san/v1/iscsi/initiators/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/initiators'
arg = {
'initiator': initiator,
'alias': alias
}
if chapuser and chapuser != '' and chapsecret and chapsecret != '':
arg.update({'chapuser': chapuser,
'chapsecret': chapsecret})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Initiator: '
'%(initiator)s on '
'Alias: %(alias)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def add_to_initiatorgroup(self, initiator, initiatorgroup):
"""Add an iSCSI initiator to initiatorgroup"""
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/initiator-groups'
arg = {
'name': initiatorgroup,
'initiators': [initiator]
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Adding Initiator: '
'%(initiator)s on group'
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
else:
val = json.loads(ret.data)
inits = val['group']['initiators']
if inits is None:
exception_msg = (_('Error Getting Initiators: '
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
if initiator in inits:
return
inits.append(initiator)
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
arg = {
'initiators': inits
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Adding Initiator: '
'%(initiator)s on group'
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_target(self, alias, interfaces=None, tchapuser=None,
tchapsecret=None):
"""Create an iSCSI target.
:param interfaces: an array with network interfaces
:param tchapuser, tchapsecret: target's chapuser and chapsecret
:returns: target iqn
"""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/targets'
arg = {
'alias': alias
}
if tchapuser and tchapuser != '' and tchapsecret and \
tchapsecret != '':
arg.update({'targetchapuser': tchapuser,
'targetchapsecret': tchapsecret,
'auth': 'chap'})
if interfaces is not None and len(interfaces) > 0:
arg.update({'interfaces': interfaces})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Target: '
'%(alias)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['target']['iqn']
def get_target(self, alias):
"""Get an iSCSI target iqn."""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting Target: '
'%(alias)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['target']['iqn']
def add_to_targetgroup(self, iqn, targetgroup):
"""Add an iSCSI target to targetgroup."""
svc = '/api/san/v1/iscsi/target-groups/' + targetgroup
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svccrt = '/api/san/v1/iscsi/target-groups'
arg = {
'name': targetgroup,
'targets': [iqn]
}
ret = self.rclient.post(svccrt, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating TargetGroup: '
'%(targetgroup)s with'
'IQN: %(iqn)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'targetgroup': targetgroup,
'iqn': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
return
arg = {
'targets': [iqn]
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Adding to TargetGroup: '
'%(targetgroup)s with'
'IQN: %(iqn)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'targetgroup': targetgroup,
'iqn': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_pool(self, pool):
"""Checks whether pool exists."""
svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying Pool: '
'%(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_project(self, pool, project):
"""Checks whether project exists."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Project: %(project)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_initiator(self, iqn):
"""Check whether initiator iqn exists."""
svc = '/api/san/v1/iscsi/initiators/' + iqn
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Initiator: %(iqn)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'initiator': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_target(self, alias):
"""Check whether target alias exists."""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Target: %(alias)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_lun(self, pool, project, lun, volsize, targetgroup, specs):
"""Create a LUN.
specs - contains volume properties (e.g blocksize, compression).
"""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns'
arg = {
'name': lun,
'volsize': volsize,
'targetgroup': targetgroup,
'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'
}
if specs:
arg.update(specs)
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Volume: %(lun)s '
'Size: %(size)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'size': volsize,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val
def get_lun(self, pool, project, lun):
"""return iscsi lun properties."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + "/luns/" + lun
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Volume: %(lun)s on '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
ret = {
'guid': val['lun']['lunguid'],
'number': val['lun']['assignednumber'],
'initiatorgroup': val['lun']['initiatorgroup'],
'size': val['lun']['volsize'],
'nodestroy': val['lun']['nodestroy'],
'targetgroup': val['lun']['targetgroup']
}
if 'origin' in val['lun']:
ret.update({'origin': val['lun']['origin']})
return ret
def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup):
"""Set the initiatorgroup property of a LUN."""
if initiatorgroup == '':
initiatorgroup = 'com.sun.ms.vss.hg.maskAll'
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
arg = {
'initiatorgroup': initiatorgroup
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: '
'%(initiatorgroup)s Pool: %(pool)s Project: '
'%(project)s Return code: %(ret.status)d Message: '
'%(ret.data)s.'),
{'lun': lun,
'initiatorgroup': initiatorgroup,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
def delete_lun(self, pool, project, lun):
"""delete iscsi lun."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
LOG.error(_LE('Error Deleting Volume: %(lun)s to Pool: %(pool)s '
'Project: %(project)s Return code: %(ret.status)d '
'Message: %(ret.data)s.'),
{'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
def create_snapshot(self, pool, project, lun, snapshot):
"""create snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots'
arg = {
'name': snapshot
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Snapshot: %(snapshot)s on'
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def delete_snapshot(self, pool, project, lun, snapshot):
"""delete snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def clone_snapshot(self, pool, project, lun, snapshot, clone):
"""clone snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone'
arg = {
'project': project,
'share': clone,
'nodestroy': True
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Cloning '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s of '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def set_lun_props(self, pool, project, lun, **kargs):
"""set lun properties."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
if kargs is None:
return
ret = self.rclient.put(svc, kargs)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Setting props '
'Props: %(props)s on '
'Volume: %(lun)s of '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'props': kargs,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def has_clones(self, pool, project, lun, snapshot):
"""Checks whether snapshot has clones or not."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['snapshot']['numclones'] != 0
def get_initiator_initiatorgroup(self, initiator):
"""Returns the initiator group of the initiator."""
groups = []
svc = "/api/san/v1/iscsi/initiator-groups"
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
msg = _('Error getting initiator groups.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
val = json.loads(ret.data)
for initiator_group in val['groups']:
if initiator in initiator_group['initiators']:
groups.append(initiator_group["name"])
if len(groups) == 0:
LOG.debug("Initiator group not found. Attaching volume to "
"default initiator group.")
groups.append('default')
return groups
class ZFSSANfsApi(ZFSSAApi):
"""ZFSSA API proxy class for NFS driver"""
projects_path = '/api/storage/v1/pools/%s/projects'
project_path = projects_path + '/%s'
shares_path = project_path + '/filesystems'
share_path = shares_path + '/%s'
share_snapshots_path = share_path + '/snapshots'
share_snapshot_path = share_snapshots_path + '/%s'
services_path = '/api/service/v1/services/'
def __init__(self, *args, **kwargs):
super(ZFSSANfsApi, self).__init__(*args, **kwargs)
self.webdavclient = None
def set_webdav(self, https_path, auth_str):
self.webdavclient = webdavclient.ZFSSAWebDAVClient(https_path,
auth_str)
def verify_share(self, pool, project, share):
"""Checks whether the share exists"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'share: %(share)s on '
'Project: %(project)s and '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'share': share,
'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot(self, pool, project, share, snapshot):
"""create snapshot of a share"""
svc = self.share_snapshots_path % (pool, project, share)
arg = {
'name': snapshot
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Snapshot: %(snapshot)s on'
'share: %(share)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def delete_snapshot(self, pool, project, share, snapshot):
"""delete snapshot of a share"""
svc = self.share_snapshot_path % (pool, project, share, snapshot)
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'Snapshot: %(snapshot)s on '
'Share: %(share)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot_of_volume_file(self, src_file="", dst_file=""):
src_file = '.zfs/snapshot/' + src_file
return self.webdavclient.request(src_file=src_file, dst_file=dst_file,
method='COPY')
def delete_snapshot_of_volume_file(self, src_file=""):
return self.webdavclient.request(src_file=src_file, method='DELETE')
def create_volume_from_snapshot_file(self, src_file="", dst_file="",
method='COPY'):
return self.webdavclient.request(src_file=src_file, dst_file=dst_file,
method=method)
def _change_service_state(self, service, state=''):
svc = self.services_path + service + '/' + state
ret = self.rclient.put(svc)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Verifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('%(service)s service state: %(data)s',
{'service': service, 'data': data})
status = 'online' if state == 'enable' else 'disabled'
if data['<status>'] != status:
exception_msg = (_('%(service)s Service is not %(status)s '
'on storage appliance: %(host)s')
% {'service': service,
'status': status,
'host': self.host})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def enable_service(self, service):
self._change_service_state(service, state='enable')
self.verify_service(service)
def disable_service(self, service):
self._change_service_state(service, state='disable')
self.verify_service(service, status='offline')
def modify_service(self, service, edit_args=None):
"""Edit service properties"""
if edit_args is None:
edit_args = {}
svc = self.services_path + service
ret = self.rclient.put(svc, edit_args)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error modifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('Modify %(service)s service '
'return data: %(data)s',
{'service': service,
'data': data})
def create_share(self, pool, project, share, args):
"""Create a share in the specified pool and project"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = self.shares_path % (pool, project)
args.update({'name': share})
ret = self.rclient.post(svc, args)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Share: %(name)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'name': share,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
else:
LOG.debug('Editing properties of a pre-existing share')
ret = self.rclient.put(svc, args)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error editing share: '
'%(share)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'share': share,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def get_share(self, pool, project, share):
"""return share properties"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Share: %(share)s on '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['filesystem']
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService, quorum
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.transactional_message_copier import TransactionalMessageCopier
from kafkatest.utils import is_int
from ducktape.tests.test import Test
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from ducktape.utils.util import wait_until
import time
class TransactionsTest(Test):
"""Tests transactions by transactionally copying data from a source topic to
a destination topic and killing the copy process as well as the broker
randomly through the process. In the end we verify that the final output
topic contains exactly one committed copy of each message in the input
topic.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(TransactionsTest, self).__init__(test_context=test_context)
self.input_topic = "input-topic"
self.output_topic = "output-topic"
self.num_brokers = 3
# Test parameters
self.num_input_partitions = 2
self.num_output_partitions = 3
self.num_seed_messages = 100000
self.transaction_size = 750
# The transaction timeout should be lower than the progress timeout, but at
# least as high as the request timeout (which is 30s by default). When the
# client is hard-bounced, progress may depend on the previous transaction
# being aborted. When the broker is hard-bounced, we may have to wait as
# long as the request timeout to get a `Produce` response and we do not
# want the coordinator timing out the transaction.
self.transaction_timeout = 40000
self.progress_timeout_sec = 60
self.consumer_group = "transactions-test-consumer-group"
self.zk = ZookeeperService(test_context, num_nodes=1) if quorum.for_test(test_context) == quorum.zk else None
self.kafka = KafkaService(test_context,
num_nodes=self.num_brokers,
zk=self.zk,
controller_num_nodes_override=1)
def setUp(self):
if self.zk:
self.zk.start()
def seed_messages(self, topic, num_seed_messages):
seed_timeout_sec = 10000
seed_producer = VerifiableProducer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic,
message_validator=is_int,
max_messages=num_seed_messages,
enable_idempotence=True)
seed_producer.start()
wait_until(lambda: seed_producer.num_acked >= num_seed_messages,
timeout_sec=seed_timeout_sec,
err_msg="Producer failed to produce messages %d in %ds." %\
(self.num_seed_messages, seed_timeout_sec))
return seed_producer.acked
def get_messages_from_topic(self, topic, num_messages):
consumer = self.start_consumer(topic, group_id="verifying_consumer")
return self.drain_consumer(consumer, num_messages)
def bounce_brokers(self, clean_shutdown):
for node in self.kafka.nodes:
if clean_shutdown:
self.kafka.restart_node(node, clean_shutdown = True)
else:
self.kafka.stop_node(node, clean_shutdown = False)
gracePeriodSecs = 5
if self.zk:
wait_until(lambda: len(self.kafka.pids(node)) == 0 and not self.kafka.is_registered(node),
timeout_sec=self.kafka.zk_session_timeout + gracePeriodSecs,
err_msg="Failed to see timely deregistration of hard-killed broker %s" % str(node.account))
else:
brokerSessionTimeoutSecs = 18
wait_until(lambda: len(self.kafka.pids(node)) == 0,
timeout_sec=brokerSessionTimeoutSecs + gracePeriodSecs,
err_msg="Failed to see timely disappearance of process for hard-killed broker %s" % str(node.account))
time.sleep(brokerSessionTimeoutSecs + gracePeriodSecs)
self.kafka.start_node(node)
def create_and_start_message_copier(self, input_topic, input_partition, output_topic, transactional_id, use_group_metadata):
message_copier = TransactionalMessageCopier(
context=self.test_context,
num_nodes=1,
kafka=self.kafka,
transactional_id=transactional_id,
consumer_group=self.consumer_group,
input_topic=input_topic,
input_partition=input_partition,
output_topic=output_topic,
max_messages=-1,
transaction_size=self.transaction_size,
transaction_timeout=self.transaction_timeout,
use_group_metadata=use_group_metadata
)
message_copier.start()
wait_until(lambda: message_copier.alive(message_copier.nodes[0]),
timeout_sec=10,
err_msg="Message copier failed to start after 10 s")
return message_copier
def bounce_copiers(self, copiers, clean_shutdown):
for _ in range(3):
for copier in copiers:
wait_until(lambda: copier.progress_percent() >= 20.0,
timeout_sec=self.progress_timeout_sec,
err_msg="%s : Message copier didn't make enough progress in %ds. Current progress: %s" \
% (copier.transactional_id, self.progress_timeout_sec, str(copier.progress_percent())))
self.logger.info("%s - progress: %s" % (copier.transactional_id,
str(copier.progress_percent())))
copier.restart(clean_shutdown)
def create_and_start_copiers(self, input_topic, output_topic, num_copiers, use_group_metadata):
copiers = []
for i in range(0, num_copiers):
copiers.append(self.create_and_start_message_copier(
input_topic=input_topic,
output_topic=output_topic,
input_partition=i,
transactional_id="copier-" + str(i),
use_group_metadata=use_group_metadata
))
return copiers
def start_consumer(self, topic_to_read, group_id):
consumer = ConsoleConsumer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic_to_read,
group_id=group_id,
message_validator=is_int,
from_beginning=True,
isolation_level="read_committed")
consumer.start()
# ensure that the consumer is up.
wait_until(lambda: (len(consumer.messages_consumed[1]) > 0) == True,
timeout_sec=60,
err_msg="Consumer failed to consume any messages for %ds" %\
60)
return consumer
def drain_consumer(self, consumer, num_messages):
# wait until we read at least the expected number of messages.
# This is a safe check because both failure modes will be caught:
# 1. If we have 'num_seed_messages' but there are duplicates, then
# this is checked for later.
#
# 2. If we never reach 'num_seed_messages', then this will cause the
# test to fail.
wait_until(lambda: len(consumer.messages_consumed[1]) >= num_messages,
timeout_sec=90,
err_msg="Consumer consumed only %d out of %d messages in %ds" %\
(len(consumer.messages_consumed[1]), num_messages, 90))
consumer.stop()
return consumer.messages_consumed[1]
def copy_messages_transactionally(self, failure_mode, bounce_target,
input_topic, output_topic,
num_copiers, num_messages_to_copy,
use_group_metadata):
"""Copies messages transactionally from the seeded input topic to the
output topic, either bouncing brokers or clients in a hard and soft
way as it goes.
This method also consumes messages in read_committed mode from the
output topic while the bounces and copy is going on.
It returns the concurrently consumed messages.
"""
copiers = self.create_and_start_copiers(input_topic=input_topic,
output_topic=output_topic,
num_copiers=num_copiers,
use_group_metadata=use_group_metadata)
concurrent_consumer = self.start_consumer(output_topic,
group_id="concurrent_consumer")
clean_shutdown = False
if failure_mode == "clean_bounce":
clean_shutdown = True
if bounce_target == "brokers":
self.bounce_brokers(clean_shutdown)
elif bounce_target == "clients":
self.bounce_copiers(copiers, clean_shutdown)
copier_timeout_sec = 120
for copier in copiers:
wait_until(lambda: copier.is_done,
timeout_sec=copier_timeout_sec,
err_msg="%s - Failed to copy all messages in %ds." %\
(copier.transactional_id, copier_timeout_sec))
self.logger.info("finished copying messages")
return self.drain_consumer(concurrent_consumer, num_messages_to_copy)
def setup_topics(self):
self.kafka.topics = {
self.input_topic: {
"partitions": self.num_input_partitions,
"replication-factor": 3,
"configs": {
"min.insync.replicas": 2
}
},
self.output_topic: {
"partitions": self.num_output_partitions,
"replication-factor": 3,
"configs": {
"min.insync.replicas": 2
}
}
}
@cluster(num_nodes=9)
@matrix(failure_mode=["hard_bounce", "clean_bounce"],
bounce_target=["brokers", "clients"],
check_order=[True, False],
use_group_metadata=[True, False])
def test_transactions(self, failure_mode, bounce_target, check_order, use_group_metadata, metadata_quorum=quorum.all):
security_protocol = 'PLAINTEXT'
self.kafka.security_protocol = security_protocol
self.kafka.interbroker_security_protocol = security_protocol
self.kafka.logs["kafka_data_1"]["collect_default"] = True
self.kafka.logs["kafka_data_2"]["collect_default"] = True
self.kafka.logs["kafka_operational_logs_debug"]["collect_default"] = True
if check_order:
# To check ordering, we simply create input and output topics
# with a single partition.
# We reduce the number of seed messages to copy to account for the fewer output
# partitions, and thus lower parallelism. This helps keep the test
# time shorter.
self.num_seed_messages = self.num_seed_messages // 3
self.num_input_partitions = 1
self.num_output_partitions = 1
self.setup_topics()
self.kafka.start()
input_messages = self.seed_messages(self.input_topic, self.num_seed_messages)
concurrently_consumed_messages = self.copy_messages_transactionally(
failure_mode, bounce_target, input_topic=self.input_topic,
output_topic=self.output_topic, num_copiers=self.num_input_partitions,
num_messages_to_copy=self.num_seed_messages, use_group_metadata=use_group_metadata)
output_messages = self.get_messages_from_topic(self.output_topic, self.num_seed_messages)
concurrently_consumed_message_set = set(concurrently_consumed_messages)
output_message_set = set(output_messages)
input_message_set = set(input_messages)
num_dups = abs(len(output_messages) - len(output_message_set))
num_dups_in_concurrent_consumer = abs(len(concurrently_consumed_messages)
- len(concurrently_consumed_message_set))
assert num_dups == 0, "Detected %d duplicates in the output stream" % num_dups
assert input_message_set == output_message_set, "Input and output message sets are not equal. Num input messages %d. Num output messages %d" %\
(len(input_message_set), len(output_message_set))
assert num_dups_in_concurrent_consumer == 0, "Detected %d dups in concurrently consumed messages" % num_dups_in_concurrent_consumer
assert input_message_set == concurrently_consumed_message_set, \
"Input and concurrently consumed output message sets are not equal. Num input messages: %d. Num concurrently_consumed_messages: %d" %\
(len(input_message_set), len(concurrently_consumed_message_set))
if check_order:
assert input_messages == sorted(input_messages), "The seed messages themselves were not in order"
assert output_messages == input_messages, "Output messages are not in order"
assert concurrently_consumed_messages == output_messages, "Concurrently consumed messages are not in order"
|
|
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.scenarios.storage.storperf.StorPerf
import mock
import unittest
import requests
import json
from yardstick.benchmark.scenarios.storage import storperf
def mocked_requests_config_post(*args, **kwargs):
class MockResponseConfigPost:
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
return MockResponseConfigPost('{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622","stack_created": "false"}', 200)
def mocked_requests_config_get(*args, **kwargs):
class MockResponseConfigGet:
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
return MockResponseConfigGet('{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622","stack_created": "true"}', 200)
def mocked_requests_job_get(*args, **kwargs):
class MockResponseJobGet:
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
return MockResponseJobGet('{"_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}', 200)
def mocked_requests_job_post(*args, **kwargs):
class MockResponseJobPost:
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
return MockResponseJobPost('{"job_id": \
"d46bfb8c-36f4-4a40-813b-c4b4a437f728"}', 200)
def mocked_requests_job_delete(*args, **kwargs):
class MockResponseJobDelete:
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
return MockResponseJobDelete('{}', 200)
def mocked_requests_delete(*args, **kwargs):
class MockResponseDelete:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
return MockResponseDelete('{}', 200)
def mocked_requests_delete_failed(*args, **kwargs):
class MockResponseDeleteFailed:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
if args[0] == "http://172.16.0.137:5000/api/v1.0/configurations":
return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
return MockResponseDeleteFailed('{}', 404)
class StorPerfTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
'host': {
'ip': '172.16.0.137',
'user': 'cirros',
'key_filename': "mykey.key"
}
}
self.result = {}
@mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
side_effect=mocked_requests_config_post)
@mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
side_effect=mocked_requests_config_get)
def test_successful_setup(self, mock_post, mock_get):
options = {
"agent_count": 8,
"public_network": 'ext-net',
"volume_size": 10,
"block_sizes": 4096,
"queue_depths": 4,
"workload": "rs",
"StorPerf_ip": "192.168.23.2",
"query_interval": 10,
"timeout": 60
}
args = {
"options": options
}
s = storperf.StorPerf(args, self.ctx)
s.setup()
self.assertTrue(s.setup_done)
@mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
side_effect=mocked_requests_job_post)
@mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
side_effect=mocked_requests_job_get)
@mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.delete',
side_effect=mocked_requests_job_delete)
def test_successful_run(self, mock_post, mock_get, mock_delete):
options = {
"agent_count": 8,
"public_network": 'ext-net',
"volume_size": 10,
"block_sizes": 4096,
"queue_depths": 4,
"workload": "rs",
"StorPerf_ip": "192.168.23.2",
"query_interval": 10,
"timeout": 60
}
args = {
"options": options
}
s = storperf.StorPerf(args, self.ctx)
s.setup_done = True
sample_output = '{"_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
expected_result = json.loads(sample_output)
s.run(self.result)
self.assertEqual(self.result, expected_result)
@mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.delete', side_effect=mocked_requests_delete)
def test_successful_teardown(self, mock_delete):
options = {
"agent_count": 8,
"public_network": 'ext-net',
"volume_size": 10,
"block_sizes": 4096,
"queue_depths": 4,
"workload": "rs",
"StorPerf_ip": "192.168.23.2",
"query_interval": 10,
"timeout": 60
}
args = {
"options": options
}
s = storperf.StorPerf(args, self.ctx)
s.teardown()
self.assertFalse(s.setup_done)
@mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.delete', side_effect=mocked_requests_delete_failed)
def test_failed_teardown(self, mock_delete):
options = {
"agent_count": 8,
"public_network": 'ext-net',
"volume_size": 10,
"block_sizes": 4096,
"queue_depths": 4,
"workload": "rs",
"StorPerf_ip": "192.168.23.2",
"query_interval": 10,
"timeout": 60
}
args = {
"options": options
}
s = storperf.StorPerf(args, self.ctx)
self.assertRaises(AssertionError, s.teardown(), self.result)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
import sys
sys.path.append("../build/")
import phisSchema
import pyxb
import warnings
# Strategy:
# Perhaps cleanest would be to build a separate interface for data that may vary from VFB.
# This also allows separation of Jython code
# OTOH - this gives another layer of mappings to maintain.
# Sketch of interface:
# minimal vars to set (for now):
# image_id, image URL, source links; expressed feature (+ its type - gene or transgene); classification of struc & overlapped region
# Generator functions live outside the classes. They generate objects that must then be bound.
def gen_OntologyTerm(id_name, ID):
"""Takes id_name lookup dict for ontology terms and an ID
Returns a phisSchema.OntologyTerm object"""
ot = phisSchema.OntologyTerm()
ot.termId = ID
ot.termLabel = id_name[ID]
return ot
def gen_Link(display_name, url):
"""Takes display_name and URI as args and returns a phisSchema.Link object"""
gen_Link = phisSchema.Link()
gen_Link.display_name = display_name
gen_Link.url = url
return gen_Link
def gen_Annotation(ot, text, mode):
"""Generate a phisSchema.Annotation object based on specified:
ot: ontology term
text: free text
mode: Manual/Automated"""
annotation = phisSchema.Annotation()
annotation.annotation_freetext = text
annotation.ontology_term = ot
annotation.annotationMode = mode
return annotation
def gen_roi_Coordinates(x, y, z):
"""Generate a phisSchema.Coordinates object for an roi
Each arg specifies a range in the form of a list or tuple
with 2 elements
"""
try:
assert len(x) == 2
assert len(y) == 2
assert len(z) == 2
except:
warnings.warn("Percent arrays should have only 2 members - specifying a range.")
coord = phisSchema.Coordinates()
coord.x_coordinates = _gen_PercentArray(*x)
coord.y_coordinates = _gen_PercentArray(*y)
coord.z_coordinates = _gen_PercentArray(*z)
return coord
def _gen_PercentArray(a, b):
AB = (a, b)
pa = phisSchema.PercentArray()
pa.extend(AB)
return pa
def gen_GenotypeComponent(gf_symbol=False, gf_id=False, gene_symbol=False, gene_id=False, gf_ensembl_id=False):
## How to specify channel. Use defaults? ###
"""Generate a phisSchema.GenotypeComponent object.
All args are strings. Please specify each arg with a keyword
"""
gc = phisSchema.GenotypeComponent()
if gene_id:
gc.gene_id = gene_id
if gene_symbol:
gc.gene_symbol = gene_symbol
if gf_symbol:
gc.genetic_feature_symbol = gf_symbol
if gf_id:
gc.genetic_feature_id = gf_id
if gf_ensembl_id:
gc.genetic_feature_ensembl_id = gf_ensembl_id
return gc
class imageDataSet():
"""Class to use for generating sets of images from a common source.
Assumes all datasets have common source name and URL.
And that they share a background channel marker and visualization methods
for background and signal channels. All of these are set by methods rather than KWARGS.
"""
# May not be worth bothering with a class here
def __init__(self, ont_dict):
### Do we have a way to distinguish general source from specific source links?
self.doc = phisSchema.Doc()
self.source = ''
self.background_channel_marker = ''
self.signal_channel_visualisation_methods = []
self.background_channel_visualisation_methods = []
self.ont_dict = ont_dict
def set_source(self, source_name, source_url):
"""source_name and source_url are strings"""
self.source = gen_Link(source_name, source_url)
def set_background_channel_marker(self, genotype_component):
"""Takes a phisSchema.genotypeComponent object as an arg"""
self.background_channel_marker = genotype_component
def add_signal_channel_visualisation_method(self, sfid):
"""sfid is the shortFormId of and FBbi visualisation method"""
self.signal_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid))
def add_background_channel_visualisation_method(self, sfid):
"""sfid is the shortFormId of and FBbi visualisation method"""
self.background_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid))
class VfbImage():
"""Interface class for loading VFB data.
Assumes 3D confocal image with 2 channels -
a background stain channel and a signal channel
depicting some interesting expression/anatomy"""
# Define constants here: Or should this just jump straight to populating model?
host = gen_Link("Virtual Fly Brain", "http://www.virtualflybrain.org") # for image_description.host
def __init__(self, ont, image_dataset):
"""ont: an ID:name dict of ontology terms used in XML to be produced
d: A image_dataset object
"""
self.ont = ont
self._initialise_image()
self._unpack_image_dataset(image_dataset)
self.image.image_description.host = self.host
def _unpack_image_dataset(self, image_dataset):
self.set_source(image_dataset.source)
# self.set_signal_channel_visualisation_method(image_dataset.) # Needs extend rather than append?
# self.set_background_channel_visualisation_method(image_dataset.) # Needs extend rather than append?
self.set_expressed_feature_for_background_channel(image_dataset.background_channel_marker)
def set_organism(self, stage, sex):
"""stage must be a phisSchema.ontologyTerm object; sex must be the string 'Male' or 'Female'"""
organism = phisSchema.Organism()
organism.taxon = "Drosophila melanogaster"
organism.sex = sex
organism.ncbi_taxon_id = "NCBItaxon_7227"
organism.stage=stage
self.image.organism = organism
def _initialise_image(self):
"""Assume 2 channels each with an associated ROI at 100%.
All objects generated by multiple iterations appended to common doc.
Generate IDs for two channels and corresponding ROIs according to the scheme:
image_id-a/b roi_id-a/b; channel_id-a/b - where id = self.vfb_image_id.
channel1/roi1 = background. channel2/roi2 = signal."""
# Generate Root objects
self.image = phisSchema.Image()
self.channel1 = phisSchema.Channel()
self.channel2 = phisSchema.Channel()
self.roi1 = phisSchema.Roi()
self.roi2 = phisSchema.Roi()
# bind root objects to doc
# Which pattern??
# This doesn't work for multiple images rois: self.doc.append(image)
# Need to work on checking the more obvious self.doc.image.append(self.image)
self.doc.image.append(self.image)
self.doc.channel.append(self.channel1)
self.doc.channel.append(self.channel2)
self.doc.roi.append(self.roi1)
self.doc.roi.append(self.roi2)
# Populate IDs
self.image.id = "image_" + self.vfb_image_id
self.channel1.id = "channel_" + self.vfb_image_id + "-a"
self.channel2.id = "channel_" + self.vfb_image_id + "-b"
self.roi1.id = "roi_" + self.vfb_image_id + "-a"
self.roi2.id = "roi_" + self.vfb_image_id + "-b"
self.image.associated_roi = pyxb.BIND() # Special magic
self.image.associated_roi.el.append(self.roi1.id) # Is this correct, or should I be populating a string array and appending that?
self.image.associated_roi.el.append(self.roi2.id)
self.image.associated_channel = pyxb.BIND()
self.image.associated_channel.el.append(self.channel1.id)
self.image.associated_channel.el.append(self.channel2.id)
self.channel1.associated_image = self.image.id
self.channel2.associated_image = self.image.id
self.roi1.associated_image = self.image.id
self.roi2.associated_image = self.image.id
self.roi1.associated_channel = pyxb.BIND()
self.roi1.associated_channel.el.append(self.channel1.id)
self.roi2.associated_channel = pyxb.BIND()
self.roi2.associated_channel.el.append(self.channel2.id)
self.channel1.associated_roi = pyxb.BIND()
self.channel1.associated_roi.el.append(self.roi1.id)
self.channel2.associated_roi = pyxb.BIND()
self.channel2.associated_roi.el.append(self.roi2.id)
# both ROIs cover whole image:
self.roi1.coordinates = gen_roi_Coordinates((0,100), (0,100), (0,100))
self.roi2.coordinates = gen_roi_Coordinates((0,100), (0,100), (0,100))
self.depicted_anatomy_background = phisSchema.AnnotationArray()
self.roi1.depicted_anatomical_structure = self.depicted_anatomy_background
self.depicted_anatomy_exp_channel = phisSchema.AnnotationArray()
self.roi2.depicted_anatomical_structure = self.depicted_anatomy_exp_channel
# Expansions. Add more here as needed.
self.image_description = phisSchema.ImageDescription()
self.image.image_description = self.image_description
self.image.image_description.sample_preparation = pyxb.BIND()
self.image.image_description.imaging_method = pyxb.BIND()
# Method 1 - intermediate node and directly bind
imaging_methods = phisSchema.OntologyTermArray()
self.image.image_description.imaging_method = imaging_methods # But remember - this is only possible because of an earlier pyxB expansion
imaging_methods.append(gen_OntologyTerm(self.ont, "FBbi_00000251"))
# Method 2 - pyxB.BIND() expansion
self.image.image_description.sample_preparation = pyxb.BIND()
self.image.image_description.sample_preparation.append(gen_OntologyTerm(self.ont, "FBbi_00000024")) # whole mount tissue
self.image.image_description.sample_preparation.append(gen_OntologyTerm(self.ont, "FBbi_00000002")) # chemically fixed
# Set methods generate the relevant object and bind it.
def set_dimensions(self, x, y, z=0):
"""x, y and z are dimensions in pixels. Z is optional (default 0)"""
dimensions = phisSchema.Dimensions()
dimensions.image_width = x
dimensions.image_height = y
dimensions.image_depth = z
self.image_description.image_dimensions = dimensions
def set_image_and_sample_type(self, wt_or_mut, exp_anat_phen):
self.image.image_description.sample_type = "wild type"
ita = phisSchema.ImageTypeArray()
ita.append("expression") # Use Expression if depicts expression pattern - otherwise use anatomy/phenotype. Don't this there is any case for using both.
self.image.image_description.image_type = ita
def set_source(self, source):
"""source must be a phisSchema.Link object.
Assumes source of image and organism are the same."""
self.image.image_description.image_generated_by = source
self.image.image_description.organism_generated_by = source
def set_background_channel_visualisation_method(self, sfid):
self.channel2.visualisation_method = pyxb.BIND()
self.channel2.visualisation_method.append(gen_OntologyTerm(self.ont, sfid))
def set_signal_channel_visualisation_method(self, sfid):
self.channel2.visualisation_method = pyxb.BIND()
self.channel2.visualisation_method.append(gen_OntologyTerm(self.ont, sfid))
def add_background_depicted_entity(self, sfid, text, mode):
# By convention, background channel is always roi1
annotation = gen_Annotation(gen_OntologyTerm(self.ont, sfid), text, mode)
self.depicted_anatomy_background.append(annotation)
def add_depicted_anatomy_for_expressed_feature(self, sfid, text, mode):
# By convention, background channel is always roi1
annotation = gen_Annotation(gen_OntologyTerm(self.ont, sfid), text, mode)
self.depicted_anatomy_exp_channel.append(annotation)
def set_is_expression_pattern(self, s = True):
"""By convention channel2 is signal channel."""
# Should really just be a boolean.
if s:
self.channel2.is_expression_pattern = "Yes"
else:
self.channel2.is_expression_pattern = "No"
def set_expressed_feature_for_signal_channel(self, genotype_component):
"""genotype_component: a phisSchema.GenotypeComponent object."""
self.channel2.depicts_expression_of = genotype_component
def set_expressed_feature_for_background_channel(self, genotype_component):
"""genotype_component: a phisSchema.GenotypeComponent object."""
self.channel1.depicts_expression_of = genotype_component
def set_image_context_url(self, url):
self.image.image_description.image_context_url = url
class VfbWtAdultBrainImage(VfbImage):
"""Args:
- ont is a name_id dict lookup for ontology terms.
- image_dataset is an imageDataSet object
- vfb_image_id is an id string for the image
- image_url is also a string
Compulsory fields to set in order to generate XML:
- set_sex("Male/Female")
- set_is_expression_pattern(True/False)
- add_depicted_anatomy_for_expressed_feature(ont_term)
Other necessary fields to set for usable XML:
- set_expressed_feature
- set_visualisation_method
Set by default:
- sample prep: chemically fixed; whole mount tissue
- imaging methods: confocal microscopy
- image has 2 channels - one background, and one signal.
- organism: Dmel
- stage: adult
- Background channel anatomy: adult brain
- Dimensions = 512,512,512
"""
# Consider ditching this subclass if don't find a bunch of more specific things to say. Might be better to have subclasses for neuron, clone and expression pattern
# One doc for all images.
def __init__(self, ont, image_dataset, vfb_image_id, image_url):
self.ont = ont
self.doc = image_dataset.doc
self.vfb_image_id = vfb_image_id
self._initialise_image()
self.image.image_description.image_url = image_url
self.set_source(image_dataset.source)
self.stage = gen_OntologyTerm(ont, "FBdv_00005369") # Hmmmm - global!
self.image.image_description.host = self.host
self.set_dimensions(512, 512, 512)
self.add_background_depicted_entity("FBbt_00003624", "background channel", "Manual")
ita = phisSchema.ImageTypeArray()
ita.append("expression") # Use Expression if depicts expression pattern - otherwise use anatomy/phenotype. Don't this there is any case for using both.
self.image.image_description.image_type = ita
self.image.image_description.sample_type = "wild type"
def set_sex(self, sex):
"""sex = string "Male"/"Femle". Automatically sets doc.image.organism"""
self.set_organism(self.stage, sex)
# Test
# For testing purposes. Will be autogenerated from ontology files in full run)
# Notes
# Assignment is simple - once you get all the way out to a node.
#depicted.termId = "FBbi_1234567"
#depicted.termLabel = "fubar"
# Append and instance of depicted to the list (el)
#image.depicted_anatomical_structure = pyxb.BIND()
#image.depicted_anatomical_structure.append(depicted)
# Testing
#print image.depicted_anatomical_structure.toxml()
# '<?xml version="1.0" ?><depicted_anatomical_structure><el><anatomy_ontology_id>FBbi_1234567</anatomy_ontology_id><anatomy_ontology_term>fubar</anatomy_ontology_term></el></depicted_anatomical_structure>'
# But all this feels quite verbose - can I make use of the Factory methods on some nodes to make this easier?
|
|
# pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
import numpy as np
from numpy.testing import *
from numpy.ma.testutils import *
from numpy.ma.core import *
class SubArray(np.ndarray):
# Defines a generic np.ndarray subclass, that stores some metadata
# in the dictionary `info`.
def __new__(cls,arr,info={}):
x = np.asanyarray(arr).view(cls)
x.info = info.copy()
return x
def __array_finalize__(self, obj):
if callable(getattr(super(SubArray, self),
'__array_finalize__', None)):
super(SubArray, self).__array_finalize__(obj)
self.info = getattr(obj, 'info', {}).copy()
return
def __add__(self, other):
result = super(SubArray, self).__add__(other)
result.info['added'] = result.info.get('added', 0) + 1
return result
def __iadd__(self, other):
result = super(SubArray, self).__iadd__(other)
result.info['iadded'] = result.info.get('iadded', 0) + 1
return result
subarray = SubArray
class MSubArray(SubArray, MaskedArray):
def __new__(cls, data, info={}, mask=nomask):
subarr = SubArray(data, info)
_data = MaskedArray.__new__(cls, data=subarr, mask=mask)
_data.info = subarr.info
return _data
def _get_series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
_series = property(fget=_get_series)
msubarray = MSubArray
class MMatrix(MaskedArray, np.matrix,):
def __new__(cls, data, mask=nomask):
mat = np.matrix(data)
_data = MaskedArray.__new__(cls, data=mat, mask=mask)
return _data
def __array_finalize__(self, obj):
np.matrix.__array_finalize__(self, obj)
MaskedArray.__array_finalize__(self, obj)
return
def _get_series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
_series = property(fget=_get_series)
mmatrix = MMatrix
# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
# setting to non-class values (and thus np.ma.core.masked_print_option)
# and overrides __array_wrap__, updating the info dict, to check that this
# doesn't get destroyed by MaskedArray._update_from. But this one also needs
# its own iterator...
class CSAIterator(object):
"""
Flat iterator object that uses its own setter/getter
(works around ndarray.flat not propagating subclass setters/getters
see https://github.com/numpy/numpy/issues/4564)
roughly following MaskedIterator
"""
def __init__(self, a):
self._original = a
self._dataiter = a.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
if not isinstance(out, np.ndarray):
out = out.__array__()
out = out.view(type(self._original))
return out
def __setitem__(self, index, value):
self._dataiter[index] = self._original._validate_input(value)
def __next__(self):
return next(self._dataiter).__array__().view(type(self._original))
next = __next__
class ComplicatedSubArray(SubArray):
def __str__(self):
return 'myprefix {0} mypostfix'.format(self.view(SubArray))
def __repr__(self):
# Return a repr that does not start with 'name('
return '<{0} {1}>'.format(self.__class__.__name__, self)
def _validate_input(self, value):
if not isinstance(value, ComplicatedSubArray):
raise ValueError("Can only set to MySubArray values")
return value
def __setitem__(self, item, value):
# validation ensures direct assignment with ndarray or
# masked_print_option will fail
super(ComplicatedSubArray, self).__setitem__(
item, self._validate_input(value))
def __getitem__(self, item):
# ensure getter returns our own class also for scalars
value = super(ComplicatedSubArray, self).__getitem__(item)
if not isinstance(value, np.ndarray): # scalar
value = value.__array__().view(ComplicatedSubArray)
return value
@property
def flat(self):
return CSAIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
def __array_wrap__(self, obj, context=None):
obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context)
if context is not None and context[0] is np.multiply:
obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1
return obj
class TestSubclassing(TestCase):
# Test suite for masked subclasses of ndarray.
def setUp(self):
x = np.arange(5)
mx = mmatrix(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
def test_data_subclassing(self):
# Tests whether the subclass is kept.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xsub = SubArray(x)
xmsub = masked_array(xsub, mask=m)
self.assertTrue(isinstance(xmsub, MaskedArray))
assert_equal(xmsub._data, xsub)
self.assertTrue(isinstance(xmsub._data, SubArray))
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
self.assertTrue(isinstance(mx._data, np.matrix))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
self.assertTrue(isinstance(log(mx), mmatrix))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
# Result should be a mmatrix
self.assertTrue(isinstance(add(mx, mx), mmatrix))
self.assertTrue(isinstance(add(mx, x), mmatrix))
# Result should work
assert_equal(add(mx, x), mx+x)
self.assertTrue(isinstance(add(mx, mx)._data, np.matrix))
self.assertTrue(isinstance(add.outer(mx, mx), mmatrix))
self.assertTrue(isinstance(hypot(mx, mx), mmatrix))
self.assertTrue(isinstance(hypot(mx, x), mmatrix))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
self.assertTrue(isinstance(divide(mx, mx), mmatrix))
self.assertTrue(isinstance(divide(mx, x), mmatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
x = array(arange(5), mask=[0]+[1]*4)
my = masked_array(subarray(x))
ym = msubarray(x)
#
z = (my+1)
self.assertTrue(isinstance(z, MaskedArray))
self.assertTrue(not isinstance(z, MSubArray))
self.assertTrue(isinstance(z._data, SubArray))
assert_equal(z._data.info, {})
#
z = (ym+1)
self.assertTrue(isinstance(z, MaskedArray))
self.assertTrue(isinstance(z, MSubArray))
self.assertTrue(isinstance(z._data, SubArray))
self.assertTrue(z._data.info['added'] > 0)
# Test that inplace methods from data get used (gh-4617)
ym += 1
self.assertTrue(isinstance(ym, MaskedArray))
self.assertTrue(isinstance(ym, MSubArray))
self.assertTrue(isinstance(ym._data, SubArray))
self.assertTrue(ym._data.info['iadded'] > 0)
#
ym._set_mask([1, 0, 0, 0, 1])
assert_equal(ym._mask, [1, 0, 0, 0, 1])
ym._series._set_mask([0, 0, 0, 0, 1])
assert_equal(ym._mask, [0, 0, 0, 0, 1])
#
xsub = subarray(x, info={'name':'x'})
mxsub = masked_array(xsub)
self.assertTrue(hasattr(mxsub, 'info'))
assert_equal(mxsub.info, xsub.info)
def test_subclasspreservation(self):
# Checks that masked_array(...,subok=True) preserves the class.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xinfo = [(i, j) for (i, j) in zip(x, m)]
xsub = MSubArray(x, mask=m, info={'xsub':xinfo})
#
mxsub = masked_array(xsub, subok=False)
self.assertTrue(not isinstance(mxsub, MSubArray))
self.assertTrue(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = asarray(xsub)
self.assertTrue(not isinstance(mxsub, MSubArray))
self.assertTrue(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = masked_array(xsub, subok=True)
self.assertTrue(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, xsub._mask)
#
mxsub = asanyarray(xsub)
self.assertTrue(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, m)
def test_subclass_items(self):
"""test that getter and setter go via baseclass"""
x = np.arange(5)
xcsub = ComplicatedSubArray(x)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
# getter should return a ComplicatedSubArray, even for single item
# first check we wrote ComplicatedSubArray correctly
self.assertTrue(isinstance(xcsub[1], ComplicatedSubArray))
self.assertTrue(isinstance(xcsub[1:4], ComplicatedSubArray))
# now that it propagates inside the MaskedArray
self.assertTrue(isinstance(mxcsub[1], ComplicatedSubArray))
self.assertTrue(mxcsub[0] is masked)
self.assertTrue(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
# also for flattened version (which goes via MaskedIterator)
self.assertTrue(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
self.assertTrue(mxcsub[0] is masked)
self.assertTrue(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
# setter should only work with ComplicatedSubArray input
# first check we wrote ComplicatedSubArray correctly
assert_raises(ValueError, xcsub.__setitem__, 1, x[4])
# now that it propagates inside the MaskedArray
assert_raises(ValueError, mxcsub.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4])
mxcsub[1] = xcsub[4]
mxcsub[1:4] = xcsub[1:4]
# also for flattened version (which goes via MaskedIterator)
assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4])
mxcsub.flat[1] = xcsub[4]
mxcsub.flat[1:4] = xcsub[1:4]
def test_subclass_repr(self):
"""test that repr uses the name of the subclass
and 'array' for np.ndarray"""
x = np.arange(5)
mx = masked_array(x, mask=[True, False, True, False, False])
self.assertTrue(repr(mx).startswith('masked_array'))
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
self.assertTrue(repr(mxsub).startswith(
'masked_{0}(data = [-- 1 -- 3 4]'.format(SubArray.__name__)))
def test_subclass_str(self):
"""test str with subclass that has overridden str, setitem"""
# first without override
x = np.arange(5)
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
self.assertTrue(str(mxsub) == '[-- 1 -- 3 4]')
xcsub = ComplicatedSubArray(x)
assert_raises(ValueError, xcsub.__setitem__, 0,
np.ma.core.masked_print_option)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
self.assertTrue(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix')
###############################################################################
if __name__ == '__main__':
run_module_suite()
|
|
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import inspect
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
import pytest
from scipy import linalg
import scipy.io
import mne
from mne import pick_types, Epochs, find_events, read_events
from mne.datasets.testing import requires_testing_data
from mne.transforms import apply_trans
from mne.utils import run_tests_if_main, assert_dig_allclose
from mne.io import read_raw_fif, read_raw_kit, read_epochs_kit
from mne.io.constants import FIFF
from mne.io.kit.coreg import read_sns
from mne.io.kit.constants import KIT
from mne.io.tests.test_raw import _test_raw_reader
from mne.surface import _get_ico_surface
FILE = inspect.getfile(inspect.currentframe())
parent_dir = op.dirname(op.abspath(FILE))
data_dir = op.join(parent_dir, 'data')
sqd_path = op.join(data_dir, 'test.sqd')
sqd_umd_path = op.join(data_dir, 'test_umd-raw.sqd')
epochs_path = op.join(data_dir, 'test-epoch.raw')
events_path = op.join(data_dir, 'test-eve.txt')
mrk_path = op.join(data_dir, 'test_mrk.sqd')
mrk2_path = op.join(data_dir, 'test_mrk_pre.sqd')
mrk3_path = op.join(data_dir, 'test_mrk_post.sqd')
elp_txt_path = op.join(data_dir, 'test_elp.txt')
hsp_txt_path = op.join(data_dir, 'test_hsp.txt')
elp_path = op.join(data_dir, 'test.elp')
hsp_path = op.join(data_dir, 'test.hsp')
data_path = mne.datasets.testing.data_path(download=False)
sqd_as_path = op.join(data_path, 'KIT', 'test_as-raw.con')
@requires_testing_data
def test_data():
"""Test reading raw kit files."""
pytest.raises(TypeError, read_raw_kit, epochs_path)
pytest.raises(TypeError, read_epochs_kit, sqd_path)
pytest.raises(ValueError, read_raw_kit, sqd_path, mrk_path, elp_txt_path)
pytest.raises(ValueError, read_raw_kit, sqd_path, None, None, None,
list(range(200, 190, -1)))
pytest.raises(ValueError, read_raw_kit, sqd_path, None, None, None,
list(range(167, 159, -1)), '*', 1, True)
# check functionality
raw_mrk = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_txt_path,
hsp_txt_path)
raw_py = _test_raw_reader(read_raw_kit, input_fname=sqd_path, mrk=mrk_path,
elp=elp_txt_path, hsp=hsp_txt_path,
stim=list(range(167, 159, -1)), slope='+',
stimthresh=1)
assert 'RawKIT' in repr(raw_py)
assert_equal(raw_mrk.info['kit_system_id'], KIT.SYSTEM_NYU_2010)
# check number/kind of channels
assert_equal(len(raw_py.info['chs']), 193)
kit_channels = (('kind', {FIFF.FIFFV_MEG_CH: 157, FIFF.FIFFV_REF_MEG_CH: 3,
FIFF.FIFFV_MISC_CH: 32, FIFF.FIFFV_STIM_CH: 1}),
('coil_type', {FIFF.FIFFV_COIL_KIT_GRAD: 157,
FIFF.FIFFV_COIL_KIT_REF_MAG: 3,
FIFF.FIFFV_COIL_NONE: 33}))
for label, target in kit_channels:
actual = {id_: sum(ch[label] == id_ for ch in raw_py.info['chs']) for
id_ in target.keys()}
assert_equal(actual, target)
# Test stim channel
raw_stim = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path,
stim='<', preload=False)
for raw in [raw_py, raw_stim, raw_mrk]:
stim_pick = pick_types(raw.info, meg=False, ref_meg=False,
stim=True, exclude='bads')
stim1, _ = raw[stim_pick]
stim2 = np.array(raw.read_stim_ch(), ndmin=2)
assert_array_equal(stim1, stim2)
# Binary file only stores the sensor channels
py_picks = pick_types(raw_py.info, exclude='bads')
raw_bin = op.join(data_dir, 'test_bin_raw.fif')
raw_bin = read_raw_fif(raw_bin, preload=True)
bin_picks = pick_types(raw_bin.info, stim=True, exclude='bads')
data_bin, _ = raw_bin[bin_picks]
data_py, _ = raw_py[py_picks]
# this .mat was generated using the Yokogawa MEG Reader
data_Ykgw = op.join(data_dir, 'test_Ykgw.mat')
data_Ykgw = scipy.io.loadmat(data_Ykgw)['data']
data_Ykgw = data_Ykgw[py_picks]
assert_array_almost_equal(data_py, data_Ykgw)
py_picks = pick_types(raw_py.info, stim=True, ref_meg=False,
exclude='bads')
data_py, _ = raw_py[py_picks]
assert_array_almost_equal(data_py, data_bin)
# KIT-UMD data
_test_raw_reader(read_raw_kit, input_fname=sqd_umd_path)
raw = read_raw_kit(sqd_umd_path)
assert_equal(raw.info['kit_system_id'], KIT.SYSTEM_UMD_2014_12)
# check number/kind of channels
assert_equal(len(raw.info['chs']), 193)
for label, target in kit_channels:
actual = {id_: sum(ch[label] == id_ for ch in raw.info['chs']) for
id_ in target.keys()}
assert_equal(actual, target)
# KIT Academia Sinica
raw = read_raw_kit(sqd_as_path, slope='+')
assert_equal(raw.info['kit_system_id'], KIT.SYSTEM_AS_2008)
assert_equal(raw.info['chs'][100]['ch_name'], 'MEG 101')
assert_equal(raw.info['chs'][100]['kind'], FIFF.FIFFV_MEG_CH)
assert_equal(raw.info['chs'][100]['coil_type'], FIFF.FIFFV_COIL_KIT_GRAD)
assert_equal(raw.info['chs'][157]['ch_name'], 'MEG 158')
assert_equal(raw.info['chs'][157]['kind'], FIFF.FIFFV_REF_MEG_CH)
assert_equal(raw.info['chs'][157]['coil_type'],
FIFF.FIFFV_COIL_KIT_REF_MAG)
assert_equal(raw.info['chs'][160]['ch_name'], 'EEG 001')
assert_equal(raw.info['chs'][160]['kind'], FIFF.FIFFV_EEG_CH)
assert_equal(raw.info['chs'][160]['coil_type'], FIFF.FIFFV_COIL_EEG)
assert_array_equal(find_events(raw), [[91, 0, 2]])
def test_epochs():
"""Test reading epoched SQD file."""
raw = read_raw_kit(sqd_path, stim=None)
events = read_events(events_path)
raw_epochs = Epochs(raw, events, None, tmin=0, tmax=.099, baseline=None)
data1 = raw_epochs.get_data()
epochs = read_epochs_kit(epochs_path, events_path)
data11 = epochs.get_data()
assert_array_equal(data1, data11)
def test_raw_events():
"""Test creating stim channel from raw SQD file."""
def evts(a, b, c, d, e, f=None):
out = [[269, a, b], [281, b, c], [1552, c, d], [1564, d, e]]
if f is not None:
out.append([2000, e, f])
return out
raw = read_raw_kit(sqd_path)
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(255, 254, 255, 254, 255, 0))
raw = read_raw_kit(sqd_path, slope='+')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 1, 0, 1, 0))
raw = read_raw_kit(sqd_path, stim='<', slope='+')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 128, 0, 128, 0))
raw = read_raw_kit(sqd_path, stim='<', slope='+', stim_code='channel')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 160, 0, 160, 0))
raw = read_raw_kit(sqd_path, stim=range(160, 162), slope='+',
stim_code='channel')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 160, 0, 160, 0))
def test_ch_loc():
"""Test raw kit loc."""
raw_py = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path,
stim='<')
raw_bin = read_raw_fif(op.join(data_dir, 'test_bin_raw.fif'))
ch_py = np.array([ch['loc'] for ch in
raw_py._raw_extras[0]['channels'][:160]])
# ch locs stored as m, not mm
ch_py[:, :3] *= 1e3
ch_sns = read_sns(op.join(data_dir, 'sns.txt'))
assert_array_almost_equal(ch_py, ch_sns, 2)
assert_array_almost_equal(raw_py.info['dev_head_t']['trans'],
raw_bin.info['dev_head_t']['trans'], 4)
for py_ch, bin_ch in zip(raw_py.info['chs'], raw_bin.info['chs']):
if bin_ch['ch_name'].startswith('MEG'):
# the stored ch locs have more precision than the sns.txt
assert_array_almost_equal(py_ch['loc'], bin_ch['loc'], decimal=2)
# test when more than one marker file provided
mrks = [mrk_path, mrk2_path, mrk3_path]
read_raw_kit(sqd_path, mrks, elp_txt_path, hsp_txt_path, preload=False)
# this dataset does not have the equivalent set of points :(
raw_bin.info['dig'] = raw_bin.info['dig'][:8]
raw_py.info['dig'] = raw_py.info['dig'][:8]
assert_dig_allclose(raw_py.info, raw_bin.info)
def test_hsp_elp():
"""Test KIT usage of *.elp and *.hsp files against *.txt files."""
raw_txt = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path)
raw_elp = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
# head points
pts_txt = np.array([dig_point['r'] for dig_point in raw_txt.info['dig']])
pts_elp = np.array([dig_point['r'] for dig_point in raw_elp.info['dig']])
assert_array_almost_equal(pts_elp, pts_txt, decimal=5)
# transforms
trans_txt = raw_txt.info['dev_head_t']['trans']
trans_elp = raw_elp.info['dev_head_t']['trans']
assert_array_almost_equal(trans_elp, trans_txt, decimal=5)
# head points in device space
pts_txt_in_dev = apply_trans(linalg.inv(trans_txt), pts_txt)
pts_elp_in_dev = apply_trans(linalg.inv(trans_elp), pts_elp)
assert_array_almost_equal(pts_elp_in_dev, pts_txt_in_dev, decimal=5)
def test_decimate(tmpdir):
"""Test decimation of digitizer headshapes with too many points."""
# load headshape and convert to meters
hsp_mm = _get_ico_surface(5)['rr'] * 100
hsp_m = hsp_mm / 1000.
# save headshape to a file in mm in temporary directory
tempdir = str(tmpdir)
sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
np.savetxt(sphere_hsp_path, hsp_mm)
# read in raw data using spherical hsp, and extract new hsp
with pytest.warns(RuntimeWarning,
match='was automatically downsampled .* FastScan'):
raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
# collect headshape from raw (should now be in m)
hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]
# with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
# should be a bit over 5000 points. If not, something is wrong or
# decimation resolution has been purposefully changed
assert len(hsp_dec) > 5000
# should have similar size, distance from center
dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
hsp_rad = np.mean(dist)
hsp_dec_rad = np.mean(dist_dec)
assert_array_almost_equal(hsp_rad, hsp_dec_rad, decimal=3)
run_tests_if_main()
|
|
#
# Builder.py -- build schedule plugin
#
# Eric Jeschke (eric@naoj.org)
#
import os
from collections import OrderedDict
from datetime import timedelta, datetime
# ginga imports
from ginga.misc.Bunch import Bunch
from ginga.gw import Widgets
# local imports
from qplan.plugins import PlBase
from qplan import entity
# Gen2 imports
have_gen2 = False
try:
from g2cam.status.client import StatusClient
from g2cam.status.common import STATNONE, STATERROR
have_gen2 = True
except ImportError:
pass
class Builder(PlBase.Plugin):
def __init__(self, controller):
super(Builder, self).__init__(controller)
self.show_bad = False
self.slot = None
self.stobj = None
self.w = Bunch()
prefs = self.controller.get_preferences()
self.settings = prefs.create_category('plugin_Builder')
self.settings.add_defaults(gen2_status_host='localhost',
gen2_status_user=None,
gen2_status_pass=None)
self.settings.load(onError='silent')
def build_gui(self, container):
vbox = Widgets.VBox()
vbox.set_border_width(4)
vbox.set_spacing(2)
gr = Widgets.GridBox()
gr.set_column_spacing(4)
i = 0
## self.w.observer = Widgets.TextEntry()
## self.w.observer.set_length(12)
## gr.add_widget(Widgets.Label('Observer'), 0, i)
## gr.add_widget(self.w.observer, 1, i)
## self.w.observer.set_tooltip("Name of observer doing this observation")
## self.w.observer.set_text("Nobody")
## i += 1
self.w.date = Widgets.TextEntry()
self.w.date.set_length(12)
gr.add_widget(Widgets.Label('Local date'), 0, i)
gr.add_widget(self.w.date, 1, i)
self.w.date.set_tooltip("Local date at beginning of interval")
i += 1
self.w.start_time = Widgets.TextEntry()
self.w.start_time.set_length(8)
gr.add_widget(Widgets.Label('Start Time'), 0, i)
gr.add_widget(self.w.start_time, 1, i)
self.w.start_time.set_tooltip("Local time for interval")
i += 1
self.w.len_time = Widgets.TextEntry()
self.w.len_time.set_length(8)
gr.add_widget(Widgets.Label('Length (min)'), 0, i)
gr.add_widget(self.w.len_time, 1, i)
self.w.len_time.set_text('70')
self.w.len_time.set_tooltip("Length of interval in MINUTES")
i += 1
self.w.az = Widgets.TextEntry()
self.w.az.set_length(8)
gr.add_widget(Widgets.Label('Az'), 0, i)
gr.add_widget(self.w.az, 1, i)
self.w.az.set_text('-90.0')
self.w.az.set_tooltip("Current azimuth of telescope")
i += 1
self.w.el = Widgets.TextEntry()
self.w.el.set_length(8)
gr.add_widget(Widgets.Label('El'), 0, i)
gr.add_widget(self.w.el, 1, i)
self.w.el.set_text('89.9')
self.w.el.set_tooltip("Current elevation of telescope")
i += 1
self.w.filter = Widgets.TextEntry()
self.w.filter.set_length(5)
gr.add_widget(Widgets.Label('Filter'), 0, i)
gr.add_widget(self.w.filter, 1, i)
self.w.filter.set_text('g')
self.w.filter.set_tooltip("Currently installed filter for HSC")
i += 1
self.w.seeing = Widgets.TextEntry()
self.w.seeing.set_length(5)
gr.add_widget(Widgets.Label('Seeing'), 0, i)
gr.add_widget(self.w.seeing, 1, i)
self.w.seeing.set_text('1.0')
self.w.seeing.set_tooltip("Current best estimate of seeing")
i += 1
self.w.trans = Widgets.TextEntry()
self.w.trans.set_length(5)
gr.add_widget(Widgets.Label('Transparency'), 0, i)
gr.add_widget(self.w.trans, 1, i)
self.w.trans.set_text('0.85')
self.w.trans.set_tooltip("Current best estimate of sky transparency")
i += 1
btn = Widgets.Button('Update')
btn.add_callback('activated', self.update_cb)
btn.set_tooltip("Update time, current pointing and active filter")
gr.add_widget(btn, 1, i)
i += 1
gr.add_widget(Widgets.Label(''), 1, i, stretch=4)
vbox.add_widget(gr)
hbox = Widgets.HBox()
fr = Widgets.Frame("Possible OBs")
self.tree1 = Widgets.TreeView(sortable=True,
use_alt_row_color=True,
selection='single')
self.tree1.add_callback('selected', self.select_ob_cb)
fr.set_widget(self.tree1)
hbox.add_widget(self.tree1, stretch=1)
vbx2 = Widgets.VBox()
vbx2.set_border_width(4)
vbx2.set_spacing(6)
## cb = Widgets.CheckBox("Show bad")
## cb.set_tooltip("Include OBs that cannot execute now")
## cb.add_callback('activated', self._toggle_show_bad)
## vbx2.add_widget(cb, stretch=0)
btn = Widgets.Button("Get OBs")
btn.set_tooltip("Find OBs that can execute within the period")
btn.add_callback('activated', self.find_executable_obs_cb)
vbx2.add_widget(btn, stretch=0)
# add stretch spacer
vbx2.add_widget(Widgets.Label(''), stretch=1)
hbox.add_widget(vbx2, stretch=0)
vbox.add_widget(hbox, stretch=1)
container.add_widget(vbox, stretch=1)
def start(self):
gen2_pass = self.settings.get('gen2_status_pass')
if gen2_pass is not None:
self.create_status_client()
def find_executable_obs_cb(self, widget):
self.tree1.clear()
self.view.update_pending()
# get a handle to the control panel plugin
cp = self.view.get_plugin('cp')
use_db = cp.w.use_qdb.get_state()
cp.update_scheduler(use_db=use_db)
sdlr = self.model.get_scheduler()
date_s = self.w.date.get_text().strip()
time_b = self.w.start_time.get_text().strip()
try:
time_start = sdlr.site.get_date("%s %s" % (date_s, time_b))
except Exception as e:
errmsg = 'Error parsing start date/time:: {}\n'.format(str(e))
errmsg += "\n".join([e.__class__.__name__, str(e)])
self.logger.error(errmsg)
self.controller.gui_do(self.controller.show_error, errmsg, raisetab=True)
return
# get the string for the date of observation in HST, which is what
# is used in the Schedule table
if time_start.hour < 9:
date_obs_local = (time_start - timedelta(hours=10)).strftime("%Y-%m-%d")
else:
date_obs_local = time_start.strftime("%Y-%m-%d")
self.logger.info("observation date (local) is '{}'".format(date_obs_local))
# find the record in the schedule table that matches our date;
# we need to get the list of filters and so on from it
rec = None
for _rec in sdlr.schedule_recs:
if _rec.date == date_obs_local:
rec = _rec
break
if rec is None:
errmsg = "Can't find a record in the Schedule table matching '{}'".format(date_obs_local)
self.logger.error(errmsg)
self.controller.gui_do(self.controller.show_error, errmsg, raisetab=True)
return
len_s = self.w.len_time.get_text().strip()
slot_length = max(0.0, float(len_s) * 60.0)
data = Bunch(rec.data)
# override some items from Schedule table
data.cur_filter = self.w.filter.get_text().strip()
data.cur_az = float(self.w.az.get_text().strip())
data.cur_el = float(self.w.el.get_text().strip())
data.seeing = float(self.w.seeing.get_text().strip())
data.transparency = float(self.w.trans.get_text().strip())
slot = entity.Slot(time_start, slot_length, data=data)
self.slot = slot
good, bad = sdlr.find_executable_obs(slot)
tree_dict = OrderedDict()
# Table header with units
columns = [('Best', 'index'),
('Program', 'program'),
('OB Code', 'ob_code'),
('Priority', 'priority'),
('Grade', 'grade'),
('Prep', 'prep'),
('On Source', 'time'),
('Target', 'target'),
('Filter', 'filter'),
('Delay', 'delay'),
('Reason', 'reason')]
self.tree1.setup_table(columns, 1, 'index')
# This is to get around table widget not sorting numbers properly
i_fmt = '{{0:0{0}d}}'.format(len(str(len(good))))
# Table contents
i = 1
for rec in good:
i_str = i_fmt.format(i)
bnch = Bunch(index=i_str,
program=rec.ob.program.proposal,
ob_code=rec.ob.name,
priority=rec.ob.priority,
grade=rec.ob.program.grade,
prep="%.2f" % (rec.prep_sec / 60.0),
time="%.2f" % (rec.ob.total_time / 60.0),
target=rec.ob.target.name,
filter=rec.ob.inscfg.filter,
delay=rec.delay_sec / 60.0,
_group=good,
_rec=rec,
reason='OK')
tree_dict[i_str] = bnch
i += 1
if self.show_bad:
for rec in bad:
i_str = i_fmt.format(i)
bnch = Bunch(index=i_str,
program=rec.ob.program.proposal,
ob_code=rec.ob.name,
priority=rec.ob.priority,
grade=rec.ob.program.grade,
prep="%.2f" % (rec.prep_sec / 60.0),
time="%.2f" % (rec.ob.total_time / 60.0),
target=rec.ob.target.name,
filter=rec.ob.inscfg.filter,
delay=rec.delay_sec / 60.0,
_group=bad,
_rec=rec,
reason='NG: ' + rec.reason)
tree_dict[i_str] = bnch
i += 1
self.tree1.set_tree(tree_dict)
self.tree1.set_optimal_column_widths()
def select_ob_cb(self, widget, s_dct):
sdlr = self.model.get_scheduler()
dcts = list(s_dct.values())
if len(dcts) == 0:
# selection cleared
sdlr.clear_schedules()
return
info = dcts[0]['_rec']
#print(info)
schedule = sdlr.slot_to_schedule(self.slot, info)
# set a name into the schedule to be retrieved in Report plugin
name = "{}.{}".format(info.ob.program.proposal, info.ob.name)
schedule.data.ope_name = name
self.model.select_schedule(schedule)
def _toggle_show_bad(self, w, tf):
self.show_bad = tf
def update_cb(self, w):
sdlr = self.model.get_scheduler()
now = datetime.now(tz=sdlr.timezone)
self.w.date.set_text(now.strftime('%Y-%m-%d'))
self.w.start_time.set_text(now.strftime('%H:%M:%S'))
if have_gen2 and self.stobj is not None:
gen2_pass = self.settings.get('gen2_status_pass')
if gen2_pass is not None:
self.fetch_gen2_status()
def fetch_gen2_status(self):
# Fetch current filter, Az, El from Gen2 status service and
# update first row in schedule sheet. Also, update start time.
try:
result = {'FITS.HSC.FILTER': 0, 'TSCS.AZ': 0, 'TSCS.EL': 0,
'FITS.HSC.SEEING': 0, 'FITS.HSC.TRANSPARENCY': 0}
self.stobj.fetch(result)
#print(result)
except Exception as e:
self.logger.error('Unexpected error in update_current_conditions_cb: %s' % str(e))
return
self.logger.info('From Gen2 current HSC filter %s Az %f El %f' % (result['FITS.HSC.FILTER'], result['TSCS.AZ'], result['TSCS.EL']))
cur_filter = result['FITS.HSC.FILTER']
if cur_filter not in (STATNONE, STATERROR, '0'):
# Filter name special cases
if 'HSC-' in cur_filter:
cur_filter = cur_filter.replace('HSC-', '')
if cur_filter == 'Y':
cur_filter = 'y'
if 'NB0' in cur_filter:
cur_filter = cur_filter.replace('NB0', 'NB')
if 'IB0' in cur_filter:
cur_filter = cur_filter.replace('IB0', 'IB')
# Set all 'NB' and 'IB' filters to lower-case
if ('NB' in cur_filter) or ('IB' in cur_filter):
cur_filter = cur_filter.lower()
self.logger.info('Current filter %s' % (cur_filter))
self.w.filter.set_text(cur_filter)
if result['TSCS.AZ'] not in (STATNONE, STATERROR):
cur_az = '%8.2f' % result['TSCS.AZ']
cur_az = cur_az.strip()
self.w.az.set_text(cur_az)
if result['TSCS.EL'] not in (STATNONE, STATERROR):
cur_el = '%8.2f' % result['TSCS.EL']
cur_el = cur_el.strip()
self.w.el.set_text(cur_el)
# Compensate for Subaru's funky az reading
az, el = (float(cur_az) - 180.0) % 360.0, float(cur_el)
slew_plt = self.view.get_plugin('SlewChart')
slew_plt.set_telescope_position(az, el)
def create_status_client(self):
gen2_host = self.settings.get('gen2_status_host')
gen2_user = self.settings.get('gen2_status_user')
gen2_pass = self.settings.get('gen2_status_pass')
self.stobj = StatusClient(gen2_host,
username=gen2_user, password=gen2_pass)
self.stobj.reconnect()
#END
|
|
# -*- coding: utf-8 -*-
"""Helper functions and classes for Houdini.
The name of this module is inspired from the Maya module.
"""
import hou
def get_network_pane():
"""returns the network pane"""
return hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor)
def get_scene_viewer():
"""returns the scene viewer"""
return hou.ui.paneTabOfType(hou.paneTabType.SceneViewer)
def create_spare_input(node, value=""):
"""creates spare inputs for the given node and sets the value
:param hou.Node node: The node to insert the spare input to.
:param str value: The value of the parameter
"""
# Create space input0 for rs proxy output node
parm_template_group = node.parmTemplateGroup()
parm_template_group.append(
hou.StringParmTemplate(
"spare_input0",
"Spare Input 0",
1,
string_type=hou.stringParmType.NodeReference,
)
)
node.setParmTemplateGroup(parm_template_group)
node.parm("spare_input0").set(value)
def very_nice_camera_rig(
focal_length=35, horizontal_film_aperture=36, vertical_film_aperture=24
):
"""creates a very nice camera rig where the Heading, Pitch and Roll controls are on different transform nodes
allowing more control on the camera movement
:param focal_length:
:param horizontal_film_aperture:
:param vertical_film_aperture:
"""
obj_context = hou.node("/obj")
camera = obj_context.createNode("cam")
# set camera attributes
camera.parm("focal").set(focal_length)
# set the film back in millimeters (yeah)
camera.parm("aperture").set(horizontal_film_aperture)
main_ctrl = obj_context.createNode("null", "main_ctrl1")
heading_ctrl = obj_context.createNode("null", "heading_ctrl1")
pitch_ctrl = obj_context.createNode("null", "pitch_ctrl1")
roll_ctrl = obj_context.createNode("null", "roll_ctrl1")
# create DAG hierarchy
camera.setInput(0, roll_ctrl)
roll_ctrl.setInput(0, pitch_ctrl)
pitch_ctrl.setInput(0, heading_ctrl)
heading_ctrl.setInput(0, main_ctrl)
# Parameters
# -----------------------------------------------------------
# Focal Length And Focal Plane Controls
# Create space input0 for rs proxy output node
# Focal Length
ptg = main_ctrl.parmTemplateGroup()
ptg.append(
hou.FloatParmTemplate(
"focal",
"Focal Length",
1,
default_value=[focal_length],
min=1.0,
min_is_strict=True,
)
)
ptg.append(
hou.FloatParmTemplate(
"aperture",
"Aperture",
1,
default_value=[horizontal_film_aperture],
min=1.0,
min_is_strict=True,
)
)
ptg.append(
hou.ToggleParmTemplate(
"useDepthOfField", "Use Depth Of Field", default_value=False
)
)
ptg.append(
hou.FloatParmTemplate(
"fstop", "f-stop", 1, default_value=[2.8], min=0.01, min_is_strict=True
)
)
ptg.append(
hou.FloatParmTemplate("focusOffset", "Focus Offset", 1, default_value=[0])
)
ptg.append(
hou.FloatParmTemplate("offsetX", "Offset X (PanH)", 1, default_value=[0])
)
ptg.append(
hou.FloatParmTemplate("offsetY", "Offset Y (PanV)", 1, default_value=[0])
)
ptg.append(
hou.FloatParmTemplate("offsetZ", "Offset Z (Depth)", 1, default_value=[0])
)
ptg.append(hou.FloatParmTemplate("roll1", "Roll", 1, default_value=[0]))
ptg.append(hou.FloatParmTemplate("pitch", "Pitch", 1, default_value=[0]))
ptg.append(hou.FloatParmTemplate("heading", "Heading", 1, default_value=[0]))
ptg.append(hou.FloatParmTemplate("camerar", "Camera Rotation", 3))
main_ctrl.setParmTemplateGroup(ptg)
translate_parm = ptg.find("t")
ptg
main_ctrl.parm("focal").set(focal_length)
main_ctrl.parm("aperture").set(horizontal_film_aperture)
main_ctrl_name = main_ctrl.name()
camera.parm("focal").setExpression('ch("../%s/focal")' % main_ctrl_name)
camera.parm("aperture").setExpression('ch("../%s/aperture")' % main_ctrl_name)
# Depth Of Field
camera.parm("RS_campro_dofEnable").setExpression(
'ch("../%s/useDepthOfField")' % main_ctrl_name
)
# F-Stop
camera.parm("fstop").setExpression('ch("../%s/fstop")' % main_ctrl_name)
# Camera Local Position and Offsets
# Focus Offset
camera.parm("tx").setExpression('ch("../%s/offsetX")' % main_ctrl_name)
camera.parm("ty").setExpression('ch("../%s/offsetY")' % main_ctrl_name)
camera.parm("tz").setExpression('ch("../%s/offsetZ")' % main_ctrl_name)
# Back to focal plane
camera.parm("focus").setExpression(
'ch("../{main_ctrl}/offsetZ") + ch("../{main_ctrl}/focusOffset")'.format(
main_ctrl=main_ctrl_name
)
)
# -----------------------------------------------------------
# Camera Orientation
roll_ctrl.parm("rz").setExpression('ch("../%s/roll1")' % main_ctrl_name)
pitch_ctrl.parm("rx").setExpression('ch("../%s/pitch")' % main_ctrl_name)
heading_ctrl.parm("ry").setExpression('ch("../%s/heading")' % main_ctrl_name)
camera.parm("rx").setExpression('ch("../%s/camerarx")' % main_ctrl_name)
camera.parm("ry").setExpression('ch("../%s/camerary")' % main_ctrl_name)
camera.parm("rz").setExpression('ch("../%s/camerarz")' % main_ctrl_name)
heading_ctrl.parm("tx").lock(True)
heading_ctrl.parm("ty").lock(True)
heading_ctrl.parm("tz").lock(True)
heading_ctrl.parm("rx").lock(True)
heading_ctrl.parm("ry").lock(True)
heading_ctrl.parm("rz").lock(True)
heading_ctrl.parm("sx").lock(True)
heading_ctrl.parm("sy").lock(True)
heading_ctrl.parm("sz").lock(True)
pitch_ctrl.parm("tx").lock(True)
pitch_ctrl.parm("ty").lock(True)
pitch_ctrl.parm("tz").lock(True)
pitch_ctrl.parm("rx").lock(True)
pitch_ctrl.parm("ry").lock(True)
pitch_ctrl.parm("rz").lock(True)
pitch_ctrl.parm("sx").lock(True)
pitch_ctrl.parm("sy").lock(True)
pitch_ctrl.parm("sz").lock(True)
roll_ctrl.parm("tx").lock(True)
roll_ctrl.parm("ty").lock(True)
roll_ctrl.parm("tz").lock(True)
roll_ctrl.parm("rx").lock(True)
roll_ctrl.parm("ry").lock(True)
roll_ctrl.parm("rz").lock(True)
roll_ctrl.parm("sx").lock(True)
roll_ctrl.parm("sy").lock(True)
roll_ctrl.parm("sz").lock(True)
camera.parm("tx").lock(True)
camera.parm("ty").lock(True)
camera.parm("tz").lock(True)
camera.parm("rx").lock(True)
camera.parm("ry").lock(True)
camera.parm("rz").lock(True)
camera.parm("sx").lock(True)
camera.parm("sy").lock(True)
camera.parm("sz").lock(True)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import locale
import argparse
import json
import csv
import uuid
import os
import datetime
from filters import valid_json_filter
from functools import partial
from pyspark import SparkContext, SparkConf
def dump(x):
return json.dumps(x)
def make_csv(emails):
filename = "tmp/"+str(uuid.uuid4())+".csv"
with open(filename, 'wb') as csv_file:
csv_writer=csv.writer(csv_file)
for email in emails:
rows=[]
for currency in email["currency_entities"]:
rows.append([email["senders"][0], str(email["tos"]), currency])
# print str(rows)
csv_writer.writerows (rows)
# This the regex side of the extractor:
def run_regex_extraction(sample_text_str):
# symbol usage:
# locales=('en_AG', 'en_AU.utf8', 'en_BW.utf8', 'en_CA.utf8',
# 'en_DK.utf8', 'en_GB.utf8', 'en_HK.utf8', 'en_IE.utf8', 'en_IN', 'en_NG',
# 'en_NZ.utf8', 'en_PH.utf8', 'en_SG.utf8', 'en_US.utf8', 'en_ZA.utf8',
# 'en_ZW.utf8')
locales =('en_AG', 'en_AU.utf8', 'en_CA.utf8',
'en_HK.utf8',
'en_NZ.utf8', 'en_SG.utf8', 'en_US.utf8',
'en_ZW.utf8')
# for l in locales:
# locale.setlocale(locale.LC_ALL, l)
# conv=locale.localeconv()
# print('{int_curr_symbol} ==> {currency_symbol}'.format(**conv))
# char_curr_sequence = conv['int_curr_symbol']
# currency_symbol = conv['currency_symbol']
#
# print char_curr_sequence
# symbol = '$'
# trans_amount_l = scan_str_for_currency_symbols(symbol, sample_text_str)
# print trans_amount_l
#
# print sample_text_str
symbol = '$'
trans_amount_l = scan_str_for_currency_symbols(symbol, sample_text_str)
print trans_amount_l
return trans_amount_l
# Currently working regex (without spaces between $ and digits):
_REGEX = ur'((?:0|[1-9]\d{0,3}(?:,?\d{3})*)(?:\.\d+)?)'
# _REGEX_ORIG = ur'([$])((?:0|[1-9]\d{0,3}(?:,?\d{3})*)(?:\.\d+)?)'
CURRENCY_CHAR_WINDOW = 6
TRANSACTION_CHAR_WINDOW = 25
# TODO do something better
transaction_keyword_list = ['balance', 'paid','deposit', 'withdraw']
# TODO LOAD from localization api etc
currency_key_list = ['$',"usd","us$"]
def contains_transaction_key(match, text):
excerpt_start = max(match.start() - TRANSACTION_CHAR_WINDOW, 0)
excerpt_prefix = text[ excerpt_start : match.start() ]
excerpt_stop = min(match.end() + TRANSACTION_CHAR_WINDOW, len(text)-1)
excerpt_suffix = text[ match.start():excerpt_stop ]
excerpt = text[excerpt_start : excerpt_stop]
excerpt_value_start = match.start() - excerpt_start
excerpt_value_stop = excerpt_stop - match.end()
# If the sample string contains *any* of the keywords return true
if any(transaction_keyword in excerpt.lower() for transaction_keyword in transaction_keyword_list):
return (True, excerpt)
return (False, excerpt)
# TODO make return tuple (SYMBOL, DISTANCE) or None
# TODO distance from the currency symbol to number NOT TRUE|FALSE
def contains_currency_symbol(match, text):
excerpt_start = max(match.start() - CURRENCY_CHAR_WINDOW, 0)
excerpt_prefix = text[ excerpt_start : match.start() ]
excerpt_stop = min(match.end() + CURRENCY_CHAR_WINDOW, len(text)-1)
excerpt_suffix = text[ match.start():excerpt_stop ]
excerpt = text[excerpt_start : excerpt_stop]
excerpt_value_start = match.start() - excerpt_start
excerpt_value_stop = excerpt_stop - match.end()
# If the sample string contains *any* of the keywords return true
if any(currency_key in excerpt.lower() for currency_key in currency_key_list):
return (True, excerpt)
return (False, excerpt)
def currency(full_text_str):
tagged_currency_entities = []
# symbol = '$'
text = full_text_str
total = 0
for match in re.finditer(_REGEX, text, re.MULTILINE):
regex_result = re.search(_REGEX, text)
# regex_result = re.search(ur'([$])(^\s*)((?:0|[1-9]\d{0,3}(?:,?\d{3})*)(?:\.\d+)?)', text)
value = text[match.start() : match.end()]
# Extract an excerpt of text that precedes the match
# TODO tune this logic
found_symbol = contains_currency_symbol(match, text)
found_transaction = contains_transaction_key(match, text)
# TODO should NOT be OR here -- but get it working for now
# if found_symbol[0] or found_transaction[0]:
# print "FOUND<=================================================="
# if found_symbol[0]:
# print u"<%s> <===CURRECY FOUND===> <%s> "%(value, found_symbol[1].replace(u"\n",u" "))
# elif found_transaction[0]:
# print u"<%s> <===TRANS FOUND===> <%s> "%(value, found_transaction[1].replace(u"\n",u" "))
# else:
# print u"<%s> <XX=NOT CURRECY=XX> <%s> "%(value, found_transaction[1].replace(u"\n",u" "))
if found_symbol[0] or found_transaction[0]:
c = {}
if found_symbol[0]:
c["symbol_ex"] = found_symbol[1]
if found_transaction[0]:
c["trans_ex"] = found_transaction[1]
if c:
c["value"] = value
tagged_currency_entities.append(c)
return tagged_currency_entities
def scan_str_for_currency_symbols(symbols, full_text_str):
# symbol = '$'
text = full_text_str
total = 0
while(True):
# regex_result = re.search(ur'([$])(\d+(?:\.\d{2})?)', text)
# Currently working regex (without spaces between $ and digits):
regex_result = re.search(_REGEX, text)
# regex_result = re.search(ur'([$])(^\s*)((?:0|[1-9]\d{0,3}(?:,?\d{3})*)(?:\.\d+)?)', text)
print regex_result
# regex_result = re.search(ur'(\d+(?:\.\d{2})?)([*symbols])', text)
if(regex_result == None):
break
start_ind = regex_result.start()
groups = regex_result.groups()
print groups
print start_ind
print groups[1]
total += float(groups[1].replace(',', ''))
text = text[start_ind + 1:]
return total
def process_email(email):
all_the_text = email.get("body", "") +" "+email.get("subject", "") + " " + " ".join(attch.get("contents","") for attch in email["attachments"])
currency_entities = currency(all_the_text)
# TODO extract attachment numbers
email["currency_entities"] = currency_entities
return email
def process_patition(emails):
for email in emails:
yield process_email(email)
def test():
return """I have sent $200.10 to you. That is 10 $-20.
Subtotal $1000
Total $6000
1000.23
Deposit $15,000.05
Withdraw 2,000,000,000.00
We have 2,000 lbs of limestone.
When will the 1,000,000.00 USD arrive?
Until then, we will rely on the 20,000 US$ we have remaining.
"""
if __name__ == '__main__':
# currency(test())
# print "DONE."
desc='currency extraction'
parser = argparse.ArgumentParser(
description=desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=desc)
# SPARK
#
parser.add_argument("input_content_path", help="input email or attachment content path")
parser.add_argument("output_content_currency", help="output text body enriched with currency tags and possibly text locations.")
parser.add_argument("-v", "--validate_json", action="store_true", help="Filter broken json. Test each json object and output broken objects to tmp/failed.")
args = parser.parse_args()
lex_date = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
print "INFO: Running with json filter {}.".format("enabled" if args.validate_json else "disabled")
filter_fn = partial(valid_json_filter, os.path.basename(__file__), lex_date, not args.validate_json)
conf = SparkConf().setAppName("Newman extract currency")
sc = SparkContext(conf=conf)
rdd_emails = sc.textFile(args.input_content_path).filter(filter_fn).map(lambda x: json.loads(x))
rdd_emails.mapPartitions(process_patition).map(dump).saveAsTextFile(args.output_content_currency)
|
|
"""
SAS distributions for polydispersity.
"""
# TODO: include dispersion docs with the disperser models
from __future__ import division, print_function
from math import sqrt # type: ignore
from collections import OrderedDict
import numpy as np # type: ignore
from scipy.special import gammaln # type: ignore
# pylint: disable=unused-import
try:
from typing import Tuple, List
from .modelinfo import ModelInfo
except ImportError:
pass
# pylint: enable=unused-import
# TODO: include dispersion docs with the disperser models
class Dispersion(object):
"""
Base dispersion object.
Subclasses should define *_weights(center, sigma, lb, ub)*
which returns the x points and their corresponding weights.
"""
type = "base disperser"
default = dict(npts=35, width=0, nsigmas=3)
def __init__(self, npts=None, width=None, nsigmas=None):
self.npts = self.default['npts'] if npts is None else int(npts)
self.width = self.default['width'] if width is None else width
self.nsigmas = self.default['nsigmas'] if nsigmas is None else nsigmas
def get_pars(self):
"""
Return the parameters to the disperser as a dictionary.
"""
pars = {'type': self.type}
pars.update(self.__dict__)
return pars
# pylint: disable=no-self-use
def set_weights(self, values, weights):
"""
Set the weights on the disperser if it is :class:`ArrayDispersion`.
"""
raise RuntimeError("set_weights is only available for ArrayDispersion")
def get_weights(self, center, lb, ub, relative):
"""
Return the weights for the distribution.
*center* is the center of the distribution
*lb*, *ub* are the min and max allowed values
*relative* is True if the distribution width is proportional to the
center value instead of absolute. For polydispersity use relative.
For orientation parameters use absolute.
"""
sigma = self.width * center if relative else self.width
if not relative:
# For orientation, the jitter is relative to 0 not the angle
center = 0
if sigma == 0 or self.npts < 2:
if lb <= center <= ub:
return np.array([center], 'd'), np.array([1.], 'd')
else:
return np.array([], 'd'), np.array([], 'd')
x, px = self._weights(center, sigma, lb, ub)
return x, px
def _weights(self, center, sigma, lb, ub):
"""actual work of computing the weights"""
raise NotImplementedError
def _linspace(self, center, sigma, lb, ub):
"""helper function to provide linear spaced weight points within range"""
npts, nsigmas = self.npts, self.nsigmas
x = center + np.linspace(-nsigmas*sigma, +nsigmas*sigma, npts)
x = x[(x >= lb) & (x <= ub)]
return x
class GaussianDispersion(Dispersion):
r"""
Gaussian dispersion, with 1-$\sigma$ width.
.. math::
w = \exp\left(-\tfrac12 (x - c)^2/\sigma^2\right)
"""
type = "gaussian"
default = dict(npts=35, width=0, nsigmas=3)
def _weights(self, center, sigma, lb, ub):
# TODO: sample high probability regions more densely
# i.e., step uniformly in cumulative density rather than x value
# so weight = 1/Npts for all weights, but values are unevenly spaced
x = self._linspace(center, sigma, lb, ub)
px = np.exp((x-center)**2 / (-2.0 * sigma * sigma))
return x, px
class UniformDispersion(Dispersion):
r"""
Uniform dispersion, with width $\sigma$.
.. math::
w = 1
"""
type = "uniform"
default = dict(npts=35, width=0, nsigmas=None)
def _weights(self, center, sigma, lb, ub):
x = np.linspace(center-sigma, center+sigma, self.npts)
x = x[(x >= lb) & (x <= ub)]
return x, np.ones_like(x)
class RectangleDispersion(Dispersion):
r"""
Uniform dispersion, with width $\sqrt{3}\sigma$.
.. math::
w = 1
"""
type = "rectangle"
default = dict(npts=35, width=0, nsigmas=1.73205)
def _weights(self, center, sigma, lb, ub):
x = self._linspace(center, sigma, lb, ub)
x = x[np.fabs(x-center) <= np.fabs(sigma)*sqrt(3.0)]
return x, np.ones_like(x)
class LogNormalDispersion(Dispersion):
r"""
log Gaussian dispersion, with 1-$\sigma$ width.
.. math::
w = \frac{\exp\left(-\tfrac12 (\ln x - c)^2/\sigma^2\right)}{x\sigma}
"""
type = "lognormal"
default = dict(npts=80, width=0, nsigmas=8)
def _weights(self, center, sigma, lb, ub):
x = self._linspace(center, sigma, max(lb, 1e-8), max(ub, 1e-8))
# sigma in the lognormal function is in ln(R) space, thus needs converting
sig = np.fabs(sigma/center)
px = np.exp(-0.5*((np.log(x)-np.log(center))/sig)**2)/(x*sig)
return x, px
class SchulzDispersion(Dispersion):
r"""
Schultz dispersion, with 1-$\sigma$ width.
.. math::
w = \frac{z^z\,R^{z-1}}{e^{Rz}\,c \Gamma(z)}
where $c$ is the center of the distribution, $R = x/c$ and $z=(c/\sigma)^2$.
This is evaluated using logarithms as
.. math::
w = \exp\left(z \ln z + (z-1)\ln R - Rz - \ln c - \ln \Gamma(z) \right)
"""
type = "schulz"
default = dict(npts=80, width=0, nsigmas=8)
def _weights(self, center, sigma, lb, ub):
x = self._linspace(center, sigma, max(lb, 1e-8), max(ub, 1e-8))
R = x/center
z = (center/sigma)**2
arg = z*np.log(z) + (z-1)*np.log(R) - R*z - np.log(center) - gammaln(z)
px = np.exp(arg)
return x, px
class ArrayDispersion(Dispersion):
r"""
Empirical dispersion curve.
Use :meth:`set_weights` to set $w = f(x)$.
"""
type = "array"
default = dict(npts=35, width=0, nsigmas=1)
def __init__(self, npts=None, width=None, nsigmas=None):
Dispersion.__init__(self, npts, width, nsigmas)
self.values = np.array([0.], 'd')
self.weights = np.array([1.], 'd')
def set_weights(self, values, weights):
"""
Set the weights for the given x values.
"""
self.values = np.ascontiguousarray(values, 'd')
self.weights = np.ascontiguousarray(weights, 'd')
self.npts = len(values)
def _weights(self, center, sigma, lb, ub):
# TODO: rebin the array dispersion using npts
# TODO: use a distribution that can be recentered and scaled
x = self.values
#x = center + self.values*sigma
idx = (x >= lb) & (x <= ub)
x = x[idx]
px = self.weights[idx]
return x, px
class BoltzmannDispersion(Dispersion):
r"""
Boltzmann dispersion, with $\sigma=k T/E$.
.. math::
w = \exp\left( -|x - c|/\sigma\right)
"""
type = "boltzmann"
default = dict(npts=35, width=0, nsigmas=3)
def _weights(self, center, sigma, lb, ub):
x = self._linspace(center, sigma, lb, ub)
px = np.exp(-np.abs(x-center) / np.abs(sigma))
return x, px
# dispersion name -> disperser lookup table.
# Maintain order since this is used by sasview GUI to order the options in
# the dispersion type combobox.
DISTRIBUTIONS = OrderedDict((d.type, d) for d in (
RectangleDispersion,
UniformDispersion,
ArrayDispersion,
LogNormalDispersion,
GaussianDispersion,
SchulzDispersion,
BoltzmannDispersion
))
# CRUFT: deprecated old name
MODELS = DISTRIBUTIONS
SAS_WEIGHTS_PATH = "~/.sasview/weights"
def load_weights(pattern=None):
# type: (str) -> None
"""
Load dispersion distributions matching the given glob pattern
"""
import logging
import os
import os.path
import glob
import traceback
from .custom import load_custom_kernel_module
if pattern is None:
path = os.environ.get("SAS_WEIGHTS_PATH", SAS_WEIGHTS_PATH)
pattern = os.path.join(path, "*.py")
for filename in sorted(glob.glob(os.path.expanduser(pattern))):
try:
#print("loading weights from", filename)
module = load_custom_kernel_module(filename)
DISTRIBUTIONS[module.Dispersion.type] = module.Dispersion
except Exception as exc:
logging.error(traceback.format_exc(exc))
def get_weights(disperser, n, width, nsigmas, value, limits, relative):
"""
Return the set of values and weights for a polydisperse parameter.
*disperser* is the name of the disperser.
*n* is the number of points in the weight vector.
*width* is the width of the disperser distribution.
*nsigmas* is the number of sigmas to span for the dispersion convolution.
*value* is the value of the parameter in the model.
*limits* is [lb, ub], the lower and upper bound on the possible values.
*relative* is true if *width* is defined in proportion to the value
of the parameter, and false if it is an absolute width.
Returns *(value, weight)*, where *value* and *weight* are vectors.
"""
if disperser == "array":
raise NotImplementedError("Don't handle arrays through get_weights;"
" use values and weights directly")
cls = DISTRIBUTIONS[disperser]
obj = cls(n, width, nsigmas)
v, w = obj.get_weights(value, limits[0], limits[1], relative)
return v, w/np.sum(w)
def plot_weights(model_info, mesh):
# type: (ModelInfo, List[Tuple[float, np.ndarray, np.ndarray]]) -> None
"""
Plot the weights returned by :func:`get_weights`.
*model_info* defines model parameters, etc.
*mesh* is a list of tuples containing (*value*, *dispersity*, *weights*)
for each parameter, where (*dispersity*, *weights*) pairs are the
distributions to be plotted.
"""
import pylab
if any(len(dispersity) > 1 for value, dispersity, weights in mesh):
labels = [p.name for p in model_info.parameters.call_parameters]
#pylab.interactive(True)
pylab.figure()
for (_, x, w), s in zip(mesh, labels):
if len(x) > 1:
pylab.plot(x, w, '-o', label=s)
pylab.grid(True)
pylab.legend()
#pylab.show()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class RouteFilterRulesOperations(object):
"""RouteFilterRulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def _delete_initial(
self, resource_group_name, route_filter_name, rule_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_filter_name, rule_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'}
def get(
self, resource_group_name, route_filter_name, rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RouteFilterRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_06_01.models.RouteFilterRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'}
def _create_or_update_initial(
self, resource_group_name, route_filter_name, rule_name, route_filter_rule_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_filter_name, rule_name, route_filter_rule_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create
or update route filter rule operation.
:type route_filter_rule_parameters:
~azure.mgmt.network.v2017_06_01.models.RouteFilterRule
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteFilterRule or
ClientRawResponse<RouteFilterRule> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_06_01.models.RouteFilterRule]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_06_01.models.RouteFilterRule]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteFilterRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'}
def _update_initial(
self, resource_group_name, route_filter_name, rule_name, route_filter_rule_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, route_filter_name, rule_name, route_filter_rule_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update
route filter rule operation.
:type route_filter_rule_parameters:
~azure.mgmt.network.v2017_06_01.models.PatchRouteFilterRule
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteFilterRule or
ClientRawResponse<RouteFilterRule> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_06_01.models.RouteFilterRule]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_06_01.models.RouteFilterRule]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteFilterRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'}
def list_by_route_filter(
self, resource_group_name, route_filter_name, custom_headers=None, raw=False, **operation_config):
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteFilterRule
:rtype:
~azure.mgmt.network.v2017_06_01.models.RouteFilterRulePaged[~azure.mgmt.network.v2017_06_01.models.RouteFilterRule]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteFilterRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteFilterRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'}
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
# Copyright (c) 2016 OpenIO SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from xml.sax import saxutils
from swift.common.request_helpers import get_listing_content_type
from swift.common.utils import public, Timestamp, json
from swift.common.constraints import check_metadata
from swift.common import constraints
from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed
from swift.common.request_helpers import get_param, is_sys_or_user_meta
from swift.common.swob import HTTPNoContent, HTTPOk, HTTPPreconditionFailed, \
HTTPNotFound, HTTPCreated, HTTPAccepted
from swift.proxy.controllers.account import AccountController \
as SwiftAccountController
from swift.proxy.controllers.base import set_info_cache, clear_info_cache
from oio.common import exceptions
from oioswift.utils import handle_service_busy
def get_response_headers(info):
resp_headers = {
'X-Account-Container-Count': info['containers'],
'X-Account-Object-Count': info['objects'],
'X-Account-Bytes-Used': info['bytes'],
'X-Timestamp': Timestamp(info['ctime']).normal,
}
for k, v in info['metadata'].iteritems():
if v != '':
resp_headers[k] = v
return resp_headers
def account_listing_response(account, req, response_content_type,
info=None, listing=None):
if info is None:
now = Timestamp(time.time()).internal
info = {'containers': 0,
'objects': 0,
'bytes': 0,
'metadata': {},
'ctime': now}
if listing is None:
listing = []
resp_headers = get_response_headers(info)
if response_content_type == 'application/json':
data = []
for (name, object_count, bytes_used, is_subdir) in listing:
if is_subdir:
data.append({'subdir': name})
else:
data.append({'name': name, 'count': object_count,
'bytes': bytes_used})
account_list = json.dumps(data)
elif response_content_type.endswith('/xml'):
output_list = ['<?xml version="1.0" encoding="UTF-8"?>',
'<account name=%s>' % saxutils.quoteattr(account)]
for (name, object_count, bytes_used, is_subdir) in listing:
if is_subdir:
output_list.append(
'<subdir name=%s />' % saxutils.quoteattr(name))
else:
item = '<container><name>%s</name><count>%s</count>' \
'<bytes>%s</bytes></container>' % \
(saxutils.escape(name), object_count, bytes_used)
output_list.append(item)
output_list.append('</account>')
account_list = '\n'.join(output_list)
else:
if not listing:
resp = HTTPNoContent(request=req, headers=resp_headers)
resp.content_type = response_content_type
resp.charset = 'utf-8'
return resp
account_list = '\n'.join(r[0] for r in listing) + '\n'
ret = HTTPOk(body=account_list, request=req, headers=resp_headers)
ret.content_type = response_content_type
ret.charset = 'utf-8'
return ret
class AccountController(SwiftAccountController):
@public
@handle_service_busy
def GET(self, req):
"""Handler for HTTP GET requests."""
if len(self.account_name) > constraints.MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name),
constraints.MAX_ACCOUNT_NAME_LENGTH)
return resp
resp = self.get_account_listing_resp(req)
set_info_cache(self.app, req.environ, self.account_name, None, resp)
if req.environ.get('swift_owner'):
self.add_acls_from_sys_metadata(resp)
else:
for header in self.app.swift_owner_headers:
resp.headers.pop(header, None)
return resp
def get_account_listing_resp(self, req):
prefix = get_param(req, 'prefix')
delimiter = get_param(req, 'prefix')
if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
return HTTPPreconditionFailed(body='Bad delimiter')
limit = constraints.ACCOUNT_LISTING_LIMIT
given_limit = get_param(req, 'limit')
if given_limit and given_limit.isdigit():
limit = int(given_limit)
if limit > constraints.ACCOUNT_LISTING_LIMIT:
return HTTPPreconditionFailed(
request=req,
body='Maximum limit is %d' %
constraints.ACCOUNT_LISTING_LIMIT)
marker = get_param(req, 'marker')
end_marker = get_param(req, 'end_marker')
try:
info = None
if hasattr(self.app.storage, 'account'):
# Call directly AccountClient.container_list()
info = self.app.storage.account.container_list(
self.account_name, limit=limit, marker=marker,
end_marker=end_marker, prefix=prefix,
delimiter=delimiter)
listing = info.pop('listing')
else:
# Legacy call to account service
listing, info = self.app.storage.container_list(
self.account_name, limit=limit, marker=marker,
end_marker=end_marker, prefix=prefix,
delimiter=delimiter)
resp = account_listing_response(self.account_name, req,
get_listing_content_type(req),
info=info,
listing=listing)
except (exceptions.NotFound, exceptions.NoSuchAccount):
if self.app.account_autocreate:
resp = account_listing_response(self.account_name, req,
get_listing_content_type(req))
else:
resp = HTTPNotFound(request=req)
return resp
@public
@handle_service_busy
def HEAD(self, req):
"""HTTP HEAD request handler."""
if len(self.account_name) > constraints.MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name),
constraints.MAX_ACCOUNT_NAME_LENGTH)
return resp
resp = self.get_account_head_resp(req)
set_info_cache(self.app, req.environ, self.account_name, None, resp)
if req.environ.get('swift_owner'):
self.add_acls_from_sys_metadata(resp)
else:
for header in self.app.swift_owner_headers:
resp.headers.pop(header, None)
return resp
def get_account_head_resp(self, req):
try:
info = self.app.storage.account_show(self.account_name)
resp = account_listing_response(self.account_name, req,
get_listing_content_type(req),
info=info)
except (exceptions.NotFound, exceptions.NoSuchAccount):
if self.app.account_autocreate:
resp = account_listing_response(self.account_name, req,
get_listing_content_type(req))
else:
resp = HTTPNotFound(request=req)
return resp
@public
@handle_service_busy
def PUT(self, req):
"""HTTP PUT request handler."""
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
error_response = check_metadata(req, 'account')
if error_response:
return error_response
if len(self.account_name) > constraints.MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name),
constraints.MAX_ACCOUNT_NAME_LENGTH)
return resp
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.get_account_put_resp(req, headers)
self.add_acls_from_sys_metadata(resp)
return resp
def get_account_put_resp(self, req, headers):
created = self.app.storage.account_create(self.account_name)
metadata = {}
metadata.update((key, value)
for key, value in req.headers.items()
if is_sys_or_user_meta('account', key))
if metadata:
self.app.storage.account_update(self.account_name, metadata)
if created:
resp = HTTPCreated(request=req)
else:
resp = HTTPAccepted(request=req)
return resp
@public
@handle_service_busy
def POST(self, req):
"""HTTP POST request handler."""
if len(self.account_name) > constraints.MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name),
constraints.MAX_ACCOUNT_NAME_LENGTH)
return resp
error_response = check_metadata(req, 'account')
if error_response:
return error_response
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.get_account_post_resp(req, headers)
self.add_acls_from_sys_metadata(resp)
return resp
def get_account_post_resp(self, req, headers):
metadata = {}
metadata.update((key, value)
for key, value in req.headers.items()
if is_sys_or_user_meta('account', key))
try:
self.app.storage.account_update(self.account_name, metadata)
return HTTPNoContent(request=req)
except (exceptions.NotFound, exceptions.NoSuchAccount):
if self.app.account_autocreate:
self.autocreate_account(req, self.account_name)
if metadata:
self.app.storage.account_update(
self.account_name, metadata, headers=headers)
resp = HTTPNoContent(request=req)
else:
resp = HTTPNotFound(request=req)
self.add_acls_from_sys_metadata(resp)
return resp
@public
@handle_service_busy
def DELETE(self, req):
"""HTTP DELETE request handler."""
if req.query_string:
return HTTPBadRequest(request=req)
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
headers = self.generate_request_headers(req)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.get_account_delete_resp(req, headers)
return resp
def get_account_delete_resp(self, req, headers):
# TODO perform delete
return HTTPNoContent(request=req)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Tracking the state of Amazon EKS Clusters, Amazon EKS managed node groups, and AWS Fargate profiles."""
import warnings
from typing import TYPE_CHECKING, Optional, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.eks import (
ClusterStates,
EksHook,
FargateProfileStates,
NodegroupStates,
)
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
DEFAULT_CONN_ID = "aws_default"
CLUSTER_TERMINAL_STATES = frozenset({ClusterStates.ACTIVE, ClusterStates.FAILED, ClusterStates.NONEXISTENT})
FARGATE_TERMINAL_STATES = frozenset(
{
FargateProfileStates.ACTIVE,
FargateProfileStates.CREATE_FAILED,
FargateProfileStates.DELETE_FAILED,
FargateProfileStates.NONEXISTENT,
}
)
NODEGROUP_TERMINAL_STATES = frozenset(
{
NodegroupStates.ACTIVE,
NodegroupStates.CREATE_FAILED,
NodegroupStates.DELETE_FAILED,
NodegroupStates.NONEXISTENT,
}
)
UNEXPECTED_TERMINAL_STATE_MSG = (
"Terminal state reached. Current state: {current_state}, Expected state: {target_state}"
)
class EksClusterStateSensor(BaseSensorOperator):
"""
Check the state of an Amazon EKS Cluster until it reaches the target state or another terminal state.
:param cluster_name: The name of the Cluster to watch. (templated)
:param target_state: Target state of the Cluster. (templated)
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("cluster_name", "target_state", "aws_conn_id", "region")
ui_color = "#ff9900"
ui_fgcolor = "#232F3E"
def __init__(
self,
*,
cluster_name: str,
target_state: ClusterStates = ClusterStates.ACTIVE,
aws_conn_id: str = DEFAULT_CONN_ID,
region: Optional[str] = None,
**kwargs,
):
self.cluster_name = cluster_name
self.target_state = (
target_state
if isinstance(target_state, ClusterStates)
else ClusterStates(str(target_state).upper())
)
self.aws_conn_id = aws_conn_id
self.region = region
super().__init__(**kwargs)
def poke(self, context: 'Context'):
eks_hook = EksHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
cluster_state = eks_hook.get_cluster_state(clusterName=self.cluster_name)
self.log.info("Cluster state: %s", cluster_state)
if cluster_state in (CLUSTER_TERMINAL_STATES - {self.target_state}):
# If we reach a terminal state which is not the target state:
raise AirflowException(
UNEXPECTED_TERMINAL_STATE_MSG.format(
current_state=cluster_state, target_state=self.target_state
)
)
return cluster_state == self.target_state
class EksFargateProfileStateSensor(BaseSensorOperator):
"""
Check the state of an AWS Fargate profile until it reaches the target state or another terminal state.
:param cluster_name: The name of the Cluster which the AWS Fargate profile is attached to. (templated)
:param fargate_profile_name: The name of the Fargate profile to watch. (templated)
:param target_state: Target state of the Fargate profile. (templated)
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = (
"cluster_name",
"fargate_profile_name",
"target_state",
"aws_conn_id",
"region",
)
ui_color = "#ff9900"
ui_fgcolor = "#232F3E"
def __init__(
self,
*,
cluster_name: str,
fargate_profile_name: str,
target_state: FargateProfileStates = FargateProfileStates.ACTIVE,
aws_conn_id: str = DEFAULT_CONN_ID,
region: Optional[str] = None,
**kwargs,
):
self.cluster_name = cluster_name
self.fargate_profile_name = fargate_profile_name
self.target_state = (
target_state
if isinstance(target_state, FargateProfileStates)
else FargateProfileStates(str(target_state).upper())
)
self.aws_conn_id = aws_conn_id
self.region = region
super().__init__(**kwargs)
def poke(self, context: 'Context'):
eks_hook = EksHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
fargate_profile_state = eks_hook.get_fargate_profile_state(
clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name
)
self.log.info("Fargate profile state: %s", fargate_profile_state)
if fargate_profile_state in (FARGATE_TERMINAL_STATES - {self.target_state}):
# If we reach a terminal state which is not the target state:
raise AirflowException(
UNEXPECTED_TERMINAL_STATE_MSG.format(
current_state=fargate_profile_state, target_state=self.target_state
)
)
return fargate_profile_state == self.target_state
class EksNodegroupStateSensor(BaseSensorOperator):
"""
Check the state of an EKS managed node group until it reaches the target state or another terminal state.
:param cluster_name: The name of the Cluster which the Nodegroup is attached to. (templated)
:param nodegroup_name: The name of the Nodegroup to watch. (templated)
:param target_state: Target state of the Nodegroup. (templated)
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = (
"cluster_name",
"nodegroup_name",
"target_state",
"aws_conn_id",
"region",
)
ui_color = "#ff9900"
ui_fgcolor = "#232F3E"
def __init__(
self,
*,
cluster_name: str,
nodegroup_name: str,
target_state: NodegroupStates = NodegroupStates.ACTIVE,
aws_conn_id: str = DEFAULT_CONN_ID,
region: Optional[str] = None,
**kwargs,
):
self.cluster_name = cluster_name
self.nodegroup_name = nodegroup_name
self.target_state = (
target_state
if isinstance(target_state, NodegroupStates)
else NodegroupStates(str(target_state).upper())
)
self.aws_conn_id = aws_conn_id
self.region = region
super().__init__(**kwargs)
def poke(self, context: 'Context'):
eks_hook = EksHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
nodegroup_state = eks_hook.get_nodegroup_state(
clusterName=self.cluster_name, nodegroupName=self.nodegroup_name
)
self.log.info("Nodegroup state: %s", nodegroup_state)
if nodegroup_state in (NODEGROUP_TERMINAL_STATES - {self.target_state}):
# If we reach a terminal state which is not the target state:
raise AirflowException(
UNEXPECTED_TERMINAL_STATE_MSG.format(
current_state=nodegroup_state, target_state=self.target_state
)
)
return nodegroup_state == self.target_state
class EKSClusterStateSensor(EksClusterStateSensor):
"""
This sensor is deprecated.
Please use :class:`airflow.providers.amazon.aws.sensors.eks.EksClusterStateSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This sensor is deprecated. "
"Please use `airflow.providers.amazon.aws.sensors.eks.EksClusterStateSensor`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class EKSFargateProfileStateSensor(EksFargateProfileStateSensor):
"""
This sensor is deprecated.
Please use :class:`airflow.providers.amazon.aws.sensors.eks.EksFargateProfileStateSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This sensor is deprecated. "
"Please use `airflow.providers.amazon.aws.sensors.eks.EksFargateProfileStateSensor`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class EKSNodegroupStateSensor(EksNodegroupStateSensor):
"""
This sensor is deprecated.
Please use :class:`airflow.providers.amazon.aws.sensors.eks.EksNodegroupStateSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This sensor is deprecated. "
"Please use `airflow.providers.amazon.aws.sensors.eks.EksNodegroupStateSensor`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
|
|
#!/usr/bin/env python
__version__ = '1.0'
import itertools
import random
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from kivy.core.audio import SoundLoader
sfx_flap = SoundLoader.load('audio/flap.wav')
sfx_score = SoundLoader.load('audio/score.wav')
sfx_die = SoundLoader.load('audio/die.wav')
from config import config
import PID
class Sprite(Image):
def __init__(self, **kwargs):
super(Sprite, self).__init__(**kwargs)
self.size = self.texture_size
class Menu(Widget):
def __init__(self):
super(Menu, self).__init__()
self.add_widget(Sprite(source='images/background.png'))
self.size = self.children[0].size
# self.size = (200, 1820)
self.add_widget(Ground(source='images/ground.png'))
self.add_widget(Label(center=self.center, text="Tap to Start"))
def on_touch_down(self, *ignore):
parent = self.parent
parent.remove_widget(self)
parent.add_widget(Game())
class Bird(Sprite):
def __init__(self, pos):
super(Bird, self).__init__(source='atlas://images/bird_anim/wing-up', pos=pos)
min_vel = config['min_velocity']
max_vel = config['max_velocity']
self._pid = PID.PID(KP=13.197,
KI=0.0,
KD=0.256,
min_cor=min_vel,
max_cor=max_vel)
self.velocity_y = 0
self._gravity = config['gravity']
self._flap_images = itertools.cycle(('atlas://images/bird_anim/wing-up',
'atlas://images/bird_anim/wing-mid',
'atlas://images/bird_anim/wing-down',
'atlas://images/bird_anim/wing-mid'))
self._glide_image = 'atlas://images/bird_anim/wing-mid'
def update(self, dt):
self.velocity_y += (self._gravity * dt)
self.y += (self.velocity_y * dt)
# print "Velocity: {0}, position: {1}".format(self.velocity_y, self.y)
if abs(self.velocity_y) > 3:
self.source = next(self._flap_images)
else:
self.source = self._glide_image
def fly_to(self, height):
self._pid.target = height - (self.height / 2)
def flap(self, passed_time):
'''
Increase, or decrease power of flapping of the flappy bird.
@param passed_time - amount of time passed since last flap
'''
self.velocity_y = self._pid.make_correction(self.y, passed_time)
def on_touch_down(self, touch):
# self.velocity_y = 2.5
self.source = 'atlas://images/bird_anim/wing-down'
sfx_flap.play()
x, y = touch.pos
self.fly_to(y)
class Background(Widget):
def __init__(self, source):
super(Background, self).__init__()
self.image = Sprite(source=source)
self.add_widget(self.image)
self.size = self.image.size
self.image_dupe = Sprite(source=source, x=self.width)
self.add_widget(self.image_dupe)
def update(self):
self.image.x -= 2
self.image_dupe.x -= 2
if self.image.right <= 0:
self.image.x = 0
self.image_dupe.x = self.width
class Ground(Sprite):
def update(self):
self.x -= 2
if self.x < -24:
self.x += 24
class Pipe(Widget):
def __init__(self, pos):
super(Pipe, self).__init__(pos=pos)
self.top_image = Sprite(source='images/pipe_top.png')
self.top_image.pos = (self.x, self.y + 3.5 * 24)
self.add_widget(self.top_image)
self.bottom_image = Sprite(source='images/pipe_bottom.png')
self.bottom_image.pos = (self.x, self.y - self.bottom_image.height)
self.add_widget(self.bottom_image)
self.width = self.top_image.width
self.scored = False
@property
def mid_point(self):
'''
The openinig in between the pipes - mid-point.
'''
top_bottom = self.top_image.y
bottom_top = self.bottom_image.y + self.bottom_image.height
return (top_bottom + bottom_top) / 2
@property
def x_pos(self):
return self.top_image.pos[0]
def update(self):
self.x -= 2
self.top_image.x = self.bottom_image.x = self.x
if self.right < 0:
self.parent.remove_widget(self)
class Pipes(Widget):
add_pipe = 0
def update(self, dt):
for child in list(self.children):
child.update()
self.add_pipe -= dt
if self.add_pipe < 0:
y = random.randint(self.y + 50, self.height - 50 - 3.5 * 24)
self.add_widget(Pipe(pos=(self.width, y)))
self.add_pipe = 1.5
class Game(Widget):
def __init__(self):
super(Game, self).__init__()
self.background = Background(source='images/background.png')
self.size = self.background.size
self.add_widget(self.background)
self.painter = Widget()
self.painter.size = self.background.size
self.add_widget(self.painter)
self.bird = Bird(pos=(50, self.height / 2))
self.bird.fly_to(100)
self.add_widget(self.bird)
self.ground = Ground(source='images/ground.png')
self.pipes = Pipes(pos=(0, self.ground.height), size=self.size)
self.add_widget(self.pipes)
self.add_widget(self.ground)
self.score_label = Label(center_x=self.center_x, top=self.top - 30, text='0')
self.add_widget(self.score_label)
self.over_label = Label(center=self.center, opacity=0, text="Game over!")
self.add_widget(self.over_label)
Clock.schedule_interval(self.update, 1.0/config['fps'])
self.game_over = False
self.score = 0
def update(self, dt):
self.painter.canvas.clear()
if self.game_over:
Clock.unschedule(self.update)
self.background.update()
self.bird.update(dt)
self.bird.flap(dt)
self.ground.update()
self.pipes.update(dt)
with self.painter.canvas:
for pipe in self.pipes.children:
if (pipe.x_pos - self.bird.x) <= 100:
self.bird.fly_to(pipe.mid_point)
if self.bird.collide_widget(self.ground):
self.game_over = True
for pipe in self.pipes.children:
if pipe.top_image.collide_widget(self.bird):
self.game_over = True
elif pipe.bottom_image.collide_widget(self.bird):
self.game_over = True
elif not pipe.scored and pipe.right < self.bird.x:
pipe.scored = True
self.score += 1
self.score_label.text = str(self.score)
sfx_score.play()
if self.game_over:
sfx_die.play()
self.over_label.opacity = 1
self.bind(on_touch_down=self._on_touch_down)
def _on_touch_down(self, *ignore):
parent = self.parent
parent.remove_widget(self)
parent.add_widget(Menu())
class GameApp(App):
def build(self):
top = Widget()
top.add_widget(Menu())
Window.size = top.children[0].size
return top
def main():
GameApp().run()
if __name__ == "__main__":
main()
|
|
# -*- test-case-name: twisted.python.test.test_util
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.util}.
"""
from __future__ import division, absolute_import
import errno
import os.path
import shutil
import sys
import warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
from twisted.trial import unittest
from twisted.trial.util import suppress as SUPPRESS
from twisted.python import util
from twisted.python.filepath import FilePath
from twisted.internet import reactor
from twisted.internet.interfaces import IReactorProcess
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
from twisted.internet.error import ProcessDone
from twisted.test.test_process import MockOS
pyExe = FilePath(sys.executable)._asBytesPath()
class UtilTests(unittest.TestCase):
def testUniq(self):
l = ["a", 1, "ab", "a", 3, 4, 1, 2, 2, 4, 6]
self.assertEqual(util.uniquify(l), ["a", 1, "ab", 3, 4, 2, 6])
def testRaises(self):
self.assertTrue(util.raises(ZeroDivisionError, divmod, 1, 0))
self.assertFalse(util.raises(ZeroDivisionError, divmod, 0, 1))
try:
util.raises(TypeError, divmod, 1, 0)
except ZeroDivisionError:
pass
else:
raise unittest.FailTest("util.raises didn't raise when it should have")
def test_uidFromNumericString(self):
"""
When L{uidFromString} is called with a base-ten string representation
of an integer, it returns the integer.
"""
self.assertEqual(util.uidFromString("100"), 100)
def test_uidFromUsernameString(self):
"""
When L{uidFromString} is called with a base-ten string representation
of an integer, it returns the integer.
"""
pwent = pwd.getpwuid(os.getuid())
self.assertEqual(util.uidFromString(pwent.pw_name), pwent.pw_uid)
if pwd is None:
test_uidFromUsernameString.skip = (
"Username/UID conversion requires the pwd module.")
def test_gidFromNumericString(self):
"""
When L{gidFromString} is called with a base-ten string representation
of an integer, it returns the integer.
"""
self.assertEqual(util.gidFromString("100"), 100)
def test_gidFromGroupnameString(self):
"""
When L{gidFromString} is called with a base-ten string representation
of an integer, it returns the integer.
"""
grent = grp.getgrgid(os.getgid())
self.assertEqual(util.gidFromString(grent.gr_name), grent.gr_gid)
if grp is None:
test_gidFromGroupnameString.skip = (
"Group Name/GID conversion requires the grp module.")
class NameToLabelTests(unittest.TestCase):
"""
Tests for L{nameToLabel}.
"""
def test_nameToLabel(self):
"""
Test the various kinds of inputs L{nameToLabel} supports.
"""
nameData = [
('f', 'F'),
('fo', 'Fo'),
('foo', 'Foo'),
('fooBar', 'Foo Bar'),
('fooBarBaz', 'Foo Bar Baz'),
]
for inp, out in nameData:
got = util.nameToLabel(inp)
self.assertEqual(
got, out,
"nameToLabel(%r) == %r != %r" % (inp, got, out))
class UntilConcludesTests(unittest.TestCase):
"""
Tests for L{untilConcludes}, an C{EINTR} helper.
"""
def test_uninterruptably(self):
"""
L{untilConcludes} calls the function passed to it until the function
does not raise either L{OSError} or L{IOError} with C{errno} of
C{EINTR}. It otherwise completes with the same result as the function
passed to it.
"""
def f(a, b):
self.calls += 1
exc = self.exceptions.pop()
if exc is not None:
raise exc(errno.EINTR, "Interrupted system call!")
return a + b
self.exceptions = [None]
self.calls = 0
self.assertEqual(util.untilConcludes(f, 1, 2), 3)
self.assertEqual(self.calls, 1)
self.exceptions = [None, OSError, IOError]
self.calls = 0
self.assertEqual(util.untilConcludes(f, 2, 3), 5)
self.assertEqual(self.calls, 3)
class SwitchUIDTests(unittest.TestCase):
"""
Tests for L{util.switchUID}.
"""
if getattr(os, "getuid", None) is None:
skip = "getuid/setuid not available"
def setUp(self):
self.mockos = MockOS()
self.patch(util, "os", self.mockos)
self.patch(util, "initgroups", self.initgroups)
self.initgroupsCalls = []
def initgroups(self, uid, gid):
"""
Save L{util.initgroups} calls in C{self.initgroupsCalls}.
"""
self.initgroupsCalls.append((uid, gid))
def test_uid(self):
"""
L{util.switchUID} calls L{util.initgroups} and then C{os.setuid} with
the given uid.
"""
util.switchUID(12000, None)
self.assertEqual(self.initgroupsCalls, [(12000, None)])
self.assertEqual(self.mockos.actions, [("setuid", 12000)])
def test_euid(self):
"""
L{util.switchUID} calls L{util.initgroups} and then C{os.seteuid} with
the given uid if the C{euid} parameter is set to C{True}.
"""
util.switchUID(12000, None, True)
self.assertEqual(self.initgroupsCalls, [(12000, None)])
self.assertEqual(self.mockos.seteuidCalls, [12000])
def test_currentUID(self):
"""
If the current uid is the same as the uid passed to L{util.switchUID},
then initgroups does not get called, but a warning is issued.
"""
uid = self.mockos.getuid()
util.switchUID(uid, None)
self.assertEqual(self.initgroupsCalls, [])
self.assertEqual(self.mockos.actions, [])
currentWarnings = self.flushWarnings([util.switchUID])
self.assertEqual(len(currentWarnings), 1)
self.assertIn('tried to drop privileges and setuid %i' % uid,
currentWarnings[0]['message'])
self.assertIn(
'but uid is already %i' % uid, currentWarnings[0]['message'])
def test_currentEUID(self):
"""
If the current euid is the same as the euid passed to L{util.switchUID},
then initgroups does not get called, but a warning is issued.
"""
euid = self.mockos.geteuid()
util.switchUID(euid, None, True)
self.assertEqual(self.initgroupsCalls, [])
self.assertEqual(self.mockos.seteuidCalls, [])
currentWarnings = self.flushWarnings([util.switchUID])
self.assertEqual(len(currentWarnings), 1)
self.assertIn('tried to drop privileges and seteuid %i' % euid,
currentWarnings[0]['message'])
self.assertIn(
'but euid is already %i' % euid, currentWarnings[0]['message'])
class MergeFunctionMetadataTests(unittest.TestCase):
"""
Tests for L{mergeFunctionMetadata}.
"""
def test_mergedFunctionBehavesLikeMergeTarget(self):
"""
After merging C{foo}'s data into C{bar}, the returned function behaves
as if it is C{bar}.
"""
foo_object = object()
bar_object = object()
def foo():
return foo_object
def bar(x, y, ab, c=10, *d, **e):
(a, b) = ab
return bar_object
baz = util.mergeFunctionMetadata(foo, bar)
self.assertIs(baz(1, 2, (3, 4), quux=10), bar_object)
def test_moduleIsMerged(self):
"""
Merging C{foo} into C{bar} returns a function with C{foo}'s
C{__module__}.
"""
def foo():
pass
def bar():
pass
bar.__module__ = 'somewhere.else'
baz = util.mergeFunctionMetadata(foo, bar)
self.assertEqual(baz.__module__, foo.__module__)
def test_docstringIsMerged(self):
"""
Merging C{foo} into C{bar} returns a function with C{foo}'s docstring.
"""
def foo():
"""
This is foo.
"""
def bar():
"""
This is bar.
"""
baz = util.mergeFunctionMetadata(foo, bar)
self.assertEqual(baz.__doc__, foo.__doc__)
def test_nameIsMerged(self):
"""
Merging C{foo} into C{bar} returns a function with C{foo}'s name.
"""
def foo():
pass
def bar():
pass
baz = util.mergeFunctionMetadata(foo, bar)
self.assertEqual(baz.__name__, foo.__name__)
def test_instanceDictionaryIsMerged(self):
"""
Merging C{foo} into C{bar} returns a function with C{bar}'s
dictionary, updated by C{foo}'s.
"""
def foo():
pass
foo.a = 1
foo.b = 2
def bar():
pass
bar.b = 3
bar.c = 4
baz = util.mergeFunctionMetadata(foo, bar)
self.assertEqual(foo.a, baz.a)
self.assertEqual(foo.b, baz.b)
self.assertEqual(bar.c, baz.c)
class OrderedDictTests(unittest.TestCase):
"""
Tests for L{util.OrderedDict}.
"""
def test_deprecated(self):
"""
L{util.OrderedDict} is deprecated.
"""
from twisted.python.util import OrderedDict
OrderedDict # Shh pyflakes
currentWarnings = self.flushWarnings(offendingFunctions=[
self.test_deprecated])
self.assertEqual(
currentWarnings[0]['message'],
"twisted.python.util.OrderedDict was deprecated in Twisted "
"15.5.0: Use collections.OrderedDict instead.")
self.assertEqual(currentWarnings[0]['category'], DeprecationWarning)
self.assertEqual(len(currentWarnings), 1)
class InsensitiveDictTests(unittest.TestCase):
"""
Tests for L{util.InsensitiveDict}.
"""
def test_preserve(self):
"""
L{util.InsensitiveDict} preserves the case of keys if constructed with
C{preserve=True}.
"""
dct = util.InsensitiveDict({'Foo':'bar', 1:2, 'fnz':{1:2}}, preserve=1)
self.assertEqual(dct['fnz'], {1:2})
self.assertEqual(dct['foo'], 'bar')
self.assertEqual(dct.copy(), dct)
self.assertEqual(dct['foo'], dct.get('Foo'))
self.assertIn(1, dct)
self.assertIn('foo', dct)
result = eval(repr(dct), {
'dct': dct,
'InsensitiveDict': util.InsensitiveDict,
})
self.assertEqual(result, dct)
keys=['Foo', 'fnz', 1]
for x in keys:
self.assertIn(x, dct.keys())
self.assertIn((x, dct[x]), dct.items())
self.assertEqual(len(keys), len(dct))
del dct[1]
del dct['foo']
self.assertEqual(dct.keys(), ['fnz'])
def test_noPreserve(self):
"""
L{util.InsensitiveDict} does not preserves the case of keys if
constructed with C{preserve=False}.
"""
dct = util.InsensitiveDict({'Foo':'bar', 1:2, 'fnz':{1:2}}, preserve=0)
keys=['foo', 'fnz', 1]
for x in keys:
self.assertIn(x, dct.keys())
self.assertIn((x, dct[x]), dct.items())
self.assertEqual(len(keys), len(dct))
del dct[1]
del dct['foo']
self.assertEqual(dct.keys(), ['fnz'])
def test_unicode(self):
"""
Unicode keys are case insensitive.
"""
d = util.InsensitiveDict(preserve=False)
d[u"Foo"] = 1
self.assertEqual(d[u"FOO"], 1)
self.assertEqual(d.keys(), [u"foo"])
def test_bytes(self):
"""
Bytes keys are case insensitive.
"""
d = util.InsensitiveDict(preserve=False)
d[b"Foo"] = 1
self.assertEqual(d[b"FOO"], 1)
self.assertEqual(d.keys(), [b"foo"])
class PasswordTestingProcessProtocol(ProcessProtocol):
"""
Write the string C{"secret\n"} to a subprocess and then collect all of
its output and fire a Deferred with it when the process ends.
"""
def connectionMade(self):
self.output = []
self.transport.write(b'secret\n')
def childDataReceived(self, fd, output):
self.output.append((fd, output))
def processEnded(self, reason):
self.finished.callback((reason, self.output))
class GetPasswordTests(unittest.TestCase):
if not IReactorProcess.providedBy(reactor):
skip = "Process support required to test getPassword"
def test_stdin(self):
"""
Making sure getPassword accepts a password from standard input by
running a child process which uses getPassword to read in a string
which it then writes it out again. Write a string to the child
process and then read one and make sure it is the right string.
"""
p = PasswordTestingProcessProtocol()
p.finished = Deferred()
reactor.spawnProcess(
p, pyExe,
[pyExe,
b'-c',
(b'import sys\n'
b'from twisted.python.util import getPassword\n'
b'sys.stdout.write(getPassword())\n'
b'sys.stdout.flush()\n')],
env={b'PYTHONPATH': os.pathsep.join(sys.path).encode("utf8")})
def processFinished(result):
(reason, output) = result
reason.trap(ProcessDone)
self.assertIn((1, b'secret'), output)
return p.finished.addCallback(processFinished)
class SearchUpwardsTests(unittest.TestCase):
def testSearchupwards(self):
os.makedirs('searchupwards/a/b/c')
open('searchupwards/foo.txt', 'w').close()
open('searchupwards/a/foo.txt', 'w').close()
open('searchupwards/a/b/c/foo.txt', 'w').close()
os.mkdir('searchupwards/bar')
os.mkdir('searchupwards/bam')
os.mkdir('searchupwards/a/bar')
os.mkdir('searchupwards/a/b/bam')
actual=util.searchupwards('searchupwards/a/b/c',
files=['foo.txt'],
dirs=['bar', 'bam'])
expected=os.path.abspath('searchupwards') + os.sep
self.assertEqual(actual, expected)
shutil.rmtree('searchupwards')
actual=util.searchupwards('searchupwards/a/b/c',
files=['foo.txt'],
dirs=['bar', 'bam'])
expected=None
self.assertEqual(actual, expected)
class IntervalDifferentialTests(unittest.TestCase):
def testDefault(self):
d = iter(util.IntervalDifferential([], 10))
for i in range(100):
self.assertEqual(next(d), (10, None))
def testSingle(self):
d = iter(util.IntervalDifferential([5], 10))
for i in range(100):
self.assertEqual(next(d), (5, 0))
def testPair(self):
d = iter(util.IntervalDifferential([5, 7], 10))
for i in range(100):
self.assertEqual(next(d), (5, 0))
self.assertEqual(next(d), (2, 1))
self.assertEqual(next(d), (3, 0))
self.assertEqual(next(d), (4, 1))
self.assertEqual(next(d), (1, 0))
self.assertEqual(next(d), (5, 0))
self.assertEqual(next(d), (1, 1))
self.assertEqual(next(d), (4, 0))
self.assertEqual(next(d), (3, 1))
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (5, 0))
self.assertEqual(next(d), (0, 1))
def testTriple(self):
d = iter(util.IntervalDifferential([2, 4, 5], 10))
for i in range(100):
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (0, 1))
self.assertEqual(next(d), (1, 2))
self.assertEqual(next(d), (1, 0))
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (0, 1))
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (0, 2))
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (0, 1))
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (1, 2))
self.assertEqual(next(d), (1, 0))
self.assertEqual(next(d), (0, 1))
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (2, 0))
self.assertEqual(next(d), (0, 1))
self.assertEqual(next(d), (0, 2))
def testInsert(self):
d = iter(util.IntervalDifferential([], 10))
self.assertEqual(next(d), (10, None))
d.addInterval(3)
self.assertEqual(next(d), (3, 0))
self.assertEqual(next(d), (3, 0))
d.addInterval(6)
self.assertEqual(next(d), (3, 0))
self.assertEqual(next(d), (3, 0))
self.assertEqual(next(d), (0, 1))
self.assertEqual(next(d), (3, 0))
self.assertEqual(next(d), (3, 0))
self.assertEqual(next(d), (0, 1))
def testRemove(self):
d = iter(util.IntervalDifferential([3, 5], 10))
self.assertEqual(next(d), (3, 0))
self.assertEqual(next(d), (2, 1))
self.assertEqual(next(d), (1, 0))
d.removeInterval(3)
self.assertEqual(next(d), (4, 0))
self.assertEqual(next(d), (5, 0))
d.removeInterval(5)
self.assertEqual(next(d), (10, None))
self.assertRaises(ValueError, d.removeInterval, 10)
class Record(util.FancyEqMixin):
"""
Trivial user of L{FancyEqMixin} used by tests.
"""
compareAttributes = ('a', 'b')
def __init__(self, a, b):
self.a = a
self.b = b
class DifferentRecord(util.FancyEqMixin):
"""
Trivial user of L{FancyEqMixin} which is not related to L{Record}.
"""
compareAttributes = ('a', 'b')
def __init__(self, a, b):
self.a = a
self.b = b
class DerivedRecord(Record):
"""
A class with an inheritance relationship to L{Record}.
"""
class EqualToEverything(object):
"""
A class the instances of which consider themselves equal to everything.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
class EqualToNothing(object):
"""
A class the instances of which consider themselves equal to nothing.
"""
def __eq__(self, other):
return False
def __ne__(self, other):
return True
class EqualityTests(unittest.TestCase):
"""
Tests for L{FancyEqMixin}.
"""
def test_identity(self):
"""
Instances of a class which mixes in L{FancyEqMixin} but which
defines no comparison attributes compare by identity.
"""
class Empty(util.FancyEqMixin):
pass
self.assertFalse(Empty() == Empty())
self.assertTrue(Empty() != Empty())
empty = Empty()
self.assertTrue(empty == empty)
self.assertFalse(empty != empty)
def test_equality(self):
"""
Instances of a class which mixes in L{FancyEqMixin} should compare
equal if all of their attributes compare equal. They should not
compare equal if any of their attributes do not compare equal.
"""
self.assertTrue(Record(1, 2) == Record(1, 2))
self.assertFalse(Record(1, 2) == Record(1, 3))
self.assertFalse(Record(1, 2) == Record(2, 2))
self.assertFalse(Record(1, 2) == Record(3, 4))
def test_unequality(self):
"""
Inequality between instances of a particular L{record} should be
defined as the negation of equality.
"""
self.assertFalse(Record(1, 2) != Record(1, 2))
self.assertTrue(Record(1, 2) != Record(1, 3))
self.assertTrue(Record(1, 2) != Record(2, 2))
self.assertTrue(Record(1, 2) != Record(3, 4))
def test_differentClassesEquality(self):
"""
Instances of different classes which mix in L{FancyEqMixin} should not
compare equal.
"""
self.assertFalse(Record(1, 2) == DifferentRecord(1, 2))
def test_differentClassesInequality(self):
"""
Instances of different classes which mix in L{FancyEqMixin} should
compare unequal.
"""
self.assertTrue(Record(1, 2) != DifferentRecord(1, 2))
def test_inheritedClassesEquality(self):
"""
An instance of a class which derives from a class which mixes in
L{FancyEqMixin} should compare equal to an instance of the base class
if and only if all of their attributes compare equal.
"""
self.assertTrue(Record(1, 2) == DerivedRecord(1, 2))
self.assertFalse(Record(1, 2) == DerivedRecord(1, 3))
self.assertFalse(Record(1, 2) == DerivedRecord(2, 2))
self.assertFalse(Record(1, 2) == DerivedRecord(3, 4))
def test_inheritedClassesInequality(self):
"""
An instance of a class which derives from a class which mixes in
L{FancyEqMixin} should compare unequal to an instance of the base
class if any of their attributes compare unequal.
"""
self.assertFalse(Record(1, 2) != DerivedRecord(1, 2))
self.assertTrue(Record(1, 2) != DerivedRecord(1, 3))
self.assertTrue(Record(1, 2) != DerivedRecord(2, 2))
self.assertTrue(Record(1, 2) != DerivedRecord(3, 4))
def test_rightHandArgumentImplementsEquality(self):
"""
The right-hand argument to the equality operator is given a chance
to determine the result of the operation if it is of a type
unrelated to the L{FancyEqMixin}-based instance on the left-hand
side.
"""
self.assertTrue(Record(1, 2) == EqualToEverything())
self.assertFalse(Record(1, 2) == EqualToNothing())
def test_rightHandArgumentImplementsUnequality(self):
"""
The right-hand argument to the non-equality operator is given a
chance to determine the result of the operation if it is of a type
unrelated to the L{FancyEqMixin}-based instance on the left-hand
side.
"""
self.assertFalse(Record(1, 2) != EqualToEverything())
self.assertTrue(Record(1, 2) != EqualToNothing())
class RunAsEffectiveUserTests(unittest.TestCase):
"""
Test for the L{util.runAsEffectiveUser} function.
"""
if getattr(os, "geteuid", None) is None:
skip = "geteuid/seteuid not available"
def setUp(self):
self.mockos = MockOS()
self.patch(os, "geteuid", self.mockos.geteuid)
self.patch(os, "getegid", self.mockos.getegid)
self.patch(os, "seteuid", self.mockos.seteuid)
self.patch(os, "setegid", self.mockos.setegid)
def _securedFunction(self, startUID, startGID, wantUID, wantGID):
"""
Check if wanted UID/GID matched start or saved ones.
"""
self.assertTrue(wantUID == startUID or
wantUID == self.mockos.seteuidCalls[-1])
self.assertTrue(wantGID == startGID or
wantGID == self.mockos.setegidCalls[-1])
def test_forwardResult(self):
"""
L{util.runAsEffectiveUser} forwards the result obtained by calling the
given function
"""
result = util.runAsEffectiveUser(0, 0, lambda: 1)
self.assertEqual(result, 1)
def test_takeParameters(self):
"""
L{util.runAsEffectiveUser} pass the given parameters to the given
function.
"""
result = util.runAsEffectiveUser(0, 0, lambda x: 2*x, 3)
self.assertEqual(result, 6)
def test_takesKeyworkArguments(self):
"""
L{util.runAsEffectiveUser} pass the keyword parameters to the given
function.
"""
result = util.runAsEffectiveUser(0, 0, lambda x, y=1, z=1: x*y*z, 2, z=3)
self.assertEqual(result, 6)
def _testUIDGIDSwitch(self, startUID, startGID, wantUID, wantGID,
expectedUIDSwitches, expectedGIDSwitches):
"""
Helper method checking the calls to C{os.seteuid} and C{os.setegid}
made by L{util.runAsEffectiveUser}, when switching from startUID to
wantUID and from startGID to wantGID.
"""
self.mockos.euid = startUID
self.mockos.egid = startGID
util.runAsEffectiveUser(
wantUID, wantGID,
self._securedFunction, startUID, startGID, wantUID, wantGID)
self.assertEqual(self.mockos.seteuidCalls, expectedUIDSwitches)
self.assertEqual(self.mockos.setegidCalls, expectedGIDSwitches)
self.mockos.seteuidCalls = []
self.mockos.setegidCalls = []
def test_root(self):
"""
Check UID/GID switches when current effective UID is root.
"""
self._testUIDGIDSwitch(0, 0, 0, 0, [], [])
self._testUIDGIDSwitch(0, 0, 1, 0, [1, 0], [])
self._testUIDGIDSwitch(0, 0, 0, 1, [], [1, 0])
self._testUIDGIDSwitch(0, 0, 1, 1, [1, 0], [1, 0])
def test_UID(self):
"""
Check UID/GID switches when current effective UID is non-root.
"""
self._testUIDGIDSwitch(1, 0, 0, 0, [0, 1], [])
self._testUIDGIDSwitch(1, 0, 1, 0, [], [])
self._testUIDGIDSwitch(1, 0, 1, 1, [0, 1, 0, 1], [1, 0])
self._testUIDGIDSwitch(1, 0, 2, 1, [0, 2, 0, 1], [1, 0])
def test_GID(self):
"""
Check UID/GID switches when current effective GID is non-root.
"""
self._testUIDGIDSwitch(0, 1, 0, 0, [], [0, 1])
self._testUIDGIDSwitch(0, 1, 0, 1, [], [])
self._testUIDGIDSwitch(0, 1, 1, 1, [1, 0], [])
self._testUIDGIDSwitch(0, 1, 1, 2, [1, 0], [2, 1])
def test_UIDGID(self):
"""
Check UID/GID switches when current effective UID/GID is non-root.
"""
self._testUIDGIDSwitch(1, 1, 0, 0, [0, 1], [0, 1])
self._testUIDGIDSwitch(1, 1, 0, 1, [0, 1], [])
self._testUIDGIDSwitch(1, 1, 1, 0, [0, 1, 0, 1], [0, 1])
self._testUIDGIDSwitch(1, 1, 1, 1, [], [])
self._testUIDGIDSwitch(1, 1, 2, 1, [0, 2, 0, 1], [])
self._testUIDGIDSwitch(1, 1, 1, 2, [0, 1, 0, 1], [2, 1])
self._testUIDGIDSwitch(1, 1, 2, 2, [0, 2, 0, 1], [2, 1])
class InitGroupsTests(unittest.TestCase):
"""
Tests for L{util.initgroups}.
"""
def setUp(self):
self.addCleanup(setattr, util, "_initgroups", util._initgroups)
self.addCleanup(setattr, util, "setgroups", util.setgroups)
def test_initgroupsInStdlib(self):
"""
Calling L{util.initgroups} will call the underlying stdlib
implmentation.
"""
calls = []
util._initgroups = lambda x, y: calls.append((x, y))
setgroupsCalls = []
util.setgroups = setgroupsCalls.append
util.initgroups(os.getuid(), 4)
self.assertEqual(calls, [(pwd.getpwuid(os.getuid())[0], 4)])
self.assertFalse(setgroupsCalls)
if util._initgroups is None:
test_initgroupsInStdlib.skip = ("stdlib support for initgroups is not "
"available")
class DeprecationTests(unittest.TestCase):
"""
Tests for deprecations in C{twisted.python.util}.
"""
def test_getPluginDirs(self):
"""
L{util.getPluginDirs} is deprecated.
"""
util.getPluginDirs()
currentWarnings = self.flushWarnings(offendingFunctions=[
self.test_getPluginDirs])
self.assertEqual(
currentWarnings[0]['message'],
"twisted.python.util.getPluginDirs is deprecated since Twisted "
"12.2.")
self.assertEqual(currentWarnings[0]['category'], DeprecationWarning)
self.assertEqual(len(currentWarnings), 1)
def test_addPluginDir(self):
"""
L{util.addPluginDir} is deprecated.
"""
util.addPluginDir()
currentWarnings = self.flushWarnings(offendingFunctions=[
self.test_addPluginDir])
self.assertEqual(
currentWarnings[0]['message'],
"twisted.python.util.addPluginDir is deprecated since Twisted "
"12.2.")
self.assertEqual(currentWarnings[0]['category'], DeprecationWarning)
self.assertEqual(len(currentWarnings), 1)
test_addPluginDir.suppress = [
SUPPRESS(category=DeprecationWarning,
message="twisted.python.util.getPluginDirs is deprecated")
]
class SuppressedWarningsTests(unittest.TestCase):
"""
Tests for L{util.runWithWarningsSuppressed}.
"""
runWithWarningsSuppressed = staticmethod(util.runWithWarningsSuppressed)
def test_runWithWarningsSuppressedFiltered(self):
"""
Warnings from the function called by C{runWithWarningsSuppressed} are
suppressed if they match the passed in filter.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
self.runWithWarningsSuppressed(filters, warnings.warn, "ignore foo")
self.runWithWarningsSuppressed(filters, warnings.warn, "ignore bar")
self.assertEqual([], self.flushWarnings())
def test_runWithWarningsSuppressedUnfiltered(self):
"""
Warnings from the function called by C{runWithWarningsSuppressed} are
not suppressed if they do not match the passed in filter.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
self.runWithWarningsSuppressed(filters, warnings.warn, "don't ignore")
self.assertEqual(
["don't ignore"], [w['message'] for w in self.flushWarnings()])
def test_passThrough(self):
"""
C{runWithWarningsSuppressed} returns the result of the function it
called.
"""
self.assertEqual(self.runWithWarningsSuppressed([], lambda: 4), 4)
def test_noSideEffects(self):
"""
Once C{runWithWarningsSuppressed} has returned, it no longer
suppresses warnings.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
self.runWithWarningsSuppressed(filters, lambda: None)
warnings.warn("ignore foo")
self.assertEqual(
["ignore foo"], [w['message'] for w in self.flushWarnings()])
class FancyStrMixinTests(unittest.TestCase):
"""
Tests for L{util.FancyStrMixin}.
"""
def test_sequenceOfStrings(self):
"""
If C{showAttributes} is set to a sequence of strings, C{__str__}
renders using those by looking them up as attributes on the object.
"""
class Foo(util.FancyStrMixin):
showAttributes = ("first", "second")
first = 1
second = "hello"
self.assertEqual(str(Foo()), "<Foo first=1 second='hello'>")
def test_formatter(self):
"""
If C{showAttributes} has an item that is a 2-tuple, C{__str__} renders
the first item in the tuple as a key and the result of calling the
second item with the value of the attribute named by the first item as
the value.
"""
class Foo(util.FancyStrMixin):
showAttributes = (
"first",
("second", lambda value: repr(value[::-1])))
first = "hello"
second = "world"
self.assertEqual("<Foo first='hello' second='dlrow'>", str(Foo()))
def test_override(self):
"""
If C{showAttributes} has an item that is a 3-tuple, C{__str__} renders
the second item in the tuple as a key, and the contents of the
attribute named in the first item are rendered as the value. The value
is formatted using the third item in the tuple.
"""
class Foo(util.FancyStrMixin):
showAttributes = ("first", ("second", "2nd", "%.1f"))
first = 1
second = 2.111
self.assertEqual(str(Foo()), "<Foo first=1 2nd=2.1>")
def test_fancybasename(self):
"""
If C{fancybasename} is present, C{__str__} uses it instead of the class name.
"""
class Foo(util.FancyStrMixin):
fancybasename = "Bar"
self.assertEqual(str(Foo()), "<Bar>")
def test_repr(self):
"""
C{__repr__} outputs the same content as C{__str__}.
"""
class Foo(util.FancyStrMixin):
showAttributes = ("first", "second")
first = 1
second = "hello"
obj = Foo()
self.assertEqual(str(obj), repr(obj))
class PadToTests(unittest.TestCase):
"""
Tests for L{util.padTo}.
"""
def test_default(self):
"""
L{None} values can be added to a list to cause it to have a certain
length.
"""
padded = util.padTo(3, [])
self.assertEqual([None] * 3, padded)
def test_specificDefaultValue(self):
"""
A specific value can be added to a list to cause it to have a certain
length.
"""
padded = util.padTo(4, [], "x")
self.assertEqual(["x"] * 4, padded)
def test_padNonEmptyList(self):
"""
A list which already has some items has the padding value added after
those items.
"""
padded = util.padTo(3, [1, 2], "z")
self.assertEqual([1, 2, "z"], padded)
def test_padToSmallerSize(self):
"""
L{util.padTo} can't pad a list if the size requested is smaller than
the size of the list to pad.
"""
self.assertRaises(ValueError, util.padTo, 1, [1, 2])
def test_alreadyPadded(self):
"""
If the list is already the length indicated by the padding argument
then a list with the same value is returned.
"""
items = [1, 2]
padded = util.padTo(len(items), items)
self.assertEqual(items, padded)
def test_alreadyPaddedCopies(self):
"""
If the list is already the length indicated by the padding argument
then the return value is a copy of the input.
"""
items = [1, 2]
padded = util.padTo(len(items), items)
self.assertIsNot(padded, items)
def test_makeCopy(self):
"""
L{util.padTo} doesn't modify the input list but makes a copy.
"""
items = []
util.padTo(4, items)
self.assertEqual([], items)
class ReplaceIfTests(unittest.TestCase):
"""
Tests for L{util._replaceIf}.
"""
def test_replacesIfTrue(self):
"""
L{util._replaceIf} swaps out the body of a function if the conditional
is C{True}.
"""
@util._replaceIf(True, lambda: "hi")
def test():
return "bye"
self.assertEqual(test(), "hi")
self.assertEqual(test.__name__, "test")
self.assertEqual(test.__module__, "twisted.python.test.test_util")
def test_keepsIfFalse(self):
"""
L{util._replaceIf} keeps the original body of the function if the
conditional is C{False}.
"""
@util._replaceIf(False, lambda: "hi")
def test():
return "bye"
self.assertEqual(test(), "bye")
def test_multipleReplace(self):
"""
In the case that multiple conditions are true, the first one
(to the reader) is chosen by L{util._replaceIf}
"""
@util._replaceIf(True, lambda: "hi")
@util._replaceIf(False, lambda: "bar")
@util._replaceIf(True, lambda: "baz")
def test():
return "bye"
self.assertEqual(test(), "hi")
def test_boolsOnly(self):
"""
L{util._replaceIf}'s condition argument only accepts bools.
"""
with self.assertRaises(ValueError) as e:
@util._replaceIf("hi", "there")
def test():
"""
Some test function.
"""
self.assertEqual(e.exception.args[0],
("condition argument to _replaceIf requires a bool, "
"not 'hi'"))
|
|
from collections import defaultdict
from itertools import chain
import logging
from flask import session
from cla_public.apps.checker.constants import YES, NO
from cla_public.apps.checker.means_test import MeansTest
from cla_public.libs.money_interval import MoneyInterval
from cla_public.apps.base.tests import FlaskAppTestCase
logging.getLogger("MARKDOWN").setLevel(logging.WARNING)
def post_money_interval(amount=None, interval="per_month"):
return {"per_interval_value": amount, "interval_period": interval}
def about_you_post_data(**kwargs):
post_data = {
"have_partner": NO,
"in_dispute": NO,
"on_benefits": NO,
"have_children": NO,
"num_children": "0",
"have_dependants": NO,
"num_dependants": "0",
"have_savings": NO,
"have_valuables": NO,
"own_property": NO,
"is_employed": NO,
"partner_is_employed": NO,
"is_self_employed": NO,
"partner_is_self_employed": NO,
"aged_60_or_over": NO,
}
post_data.update(kwargs)
return post_data
def flatten(dict_, prefix=[]):
out = []
for key, val in dict_.items():
if isinstance(val, dict):
out.extend(flatten(val, prefix + [key]))
else:
out.append(("-".join(prefix + [key]), val))
return out
def flatten_prop(prop):
return flatten(prop[1], ["properties", str(prop[0])])
def properties_post_data(*properties):
props = dict(chain(*map(flatten_prop, enumerate(properties))))
return props
first_property = {
"is_main_home": YES,
"other_shareholders": NO,
"property_value": "10,000.00",
"mortgage_remaining": "9,000.00",
"mortgage_payments": "800.00",
"is_rented": NO,
"rent_amount": post_money_interval(""),
"in_dispute": NO,
}
second_property = {
"is_main_home": YES,
"other_shareholders": NO,
"property_value": "20,000.00",
"mortgage_remaining": "10,000.00",
"mortgage_payments": "700.00",
"is_rented": NO,
"rent_amount": post_money_interval(""),
"in_dispute": NO,
}
def rented(prop, rent):
nprop = dict(prop)
nprop["is_rented"] = YES
nprop["rent_amount"] = rent
return nprop
def update_session(form, **kwargs):
session.checker[form] = session.checker.get(form, {})
session.checker[form].update(**kwargs)
class TestMeansTest(FlaskAppTestCase):
def setUp(self):
super(TestMeansTest, self).setUp()
self.client = self.app.test_client()
session.clear()
def assertDictValues(self, expected, actual):
for key, val in actual.items():
self.assertEqual(expected[key], val, "%s is %r, not %r" % (key, val, expected[key]))
def assertIncome(self, income, default=None, **override):
expected = set(
[
"earnings",
"benefits",
"tax_credits",
"child_benefits",
"other_income",
"self_employment_drawings",
"maintenance_received",
"pension",
"self_employed",
]
)
self.assertSetEqual(expected, set(income.keys()))
expected = defaultdict(lambda: default)
expected["total"] = 0
expected["self_employed"] = NO
expected.update(override)
self.assertDictValues(expected, income)
def assertOutgoings(self, outgoings, default=None, **override):
expected = set(
[
"income_tax",
"mortgage",
"childcare",
"rent",
"maintenance",
"national_insurance",
"criminal_legalaid_contributions",
]
)
self.assertSetEqual(expected, set(outgoings.keys()))
expected = defaultdict(lambda: default)
expected["criminal_legalaid_contributions"] = 0
expected.update(override)
self.assertDictValues(expected, outgoings)
def assertSavings(self, savings, default=None, **override):
expected = set(["credit_balance", "investment_balance", "asset_balance", "bank_balance"])
self.assertSetEqual(expected, set(savings.keys()))
expected = defaultdict(lambda: default)
expected.update(override)
self.assertDictValues(expected, savings)
def assertMeansTestInitialized(self, mt, partner=False):
self.assertEqual(0, mt["dependants_young"])
self.assertEqual(0, mt["dependants_old"])
self.assertEqual(NO, mt["on_passported_benefits"])
self.assertEqual(NO, mt["on_nass_benefits"])
self.assertEqual({}, mt["specific_benefits"])
expected = set(["income", "savings", "deductions"])
self.assertSetEqual(expected, set(mt["you"].keys()))
self.assertIncome(mt["you"]["income"], default=MoneyInterval(0))
self.assertOutgoings(mt["you"]["deductions"], default=MoneyInterval(0))
self.assertSavings(mt["you"]["savings"], default=0)
if partner:
self.assertIncome(mt["partner"]["income"], default=MoneyInterval(0))
self.assertOutgoings(mt["partner"]["deductions"], default=MoneyInterval(0))
self.assertSavings(mt["partner"]["savings"], default=0)
def assertNullFinances(self, person, income_overrides={}, outgoings_overrides={}, savings_overrides={}):
self.assertIncome(
person["income"],
default=MoneyInterval(0),
earnings=MoneyInterval(),
pension=MoneyInterval(),
maintenance_received=MoneyInterval(),
other_income=MoneyInterval(),
**income_overrides
)
self.assertOutgoings(
person["deductions"],
default=MoneyInterval(),
mortgage=MoneyInterval(0),
criminal_legalaid_contributions=None,
**outgoings_overrides
)
self.assertSavings(person["savings"], default=0, **savings_overrides)
def test_initialization(self):
mt = MeansTest()
self.assertMeansTestInitialized(mt)
update_session("AboutYouForm", have_partner=YES, in_dispute=NO)
mt = MeansTest()
self.assertMeansTestInitialized(mt)
def test_about_you_all_no(self):
update_session("AboutYouForm", is_employed=YES, have_children=YES)
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", about_you_post_data(is_employed=YES))
self.assertEqual(NO, mt["on_passported_benefits"])
self.assertEqual(NO, mt["on_nass_benefits"])
self.assertEqual({}, mt["specific_benefits"])
self.assertEqual(0, mt["dependants_young"])
self.assertEqual(0, mt["dependants_old"])
self.assertEqual(NO, mt["is_you_or_your_partner_over_60"])
self.assertEqual(NO, mt["has_partner"])
self.assertEqual(NO, mt["you"]["income"]["self_employed"])
# fields that will need to be filled in must be set to null
self.assertNullFinances(mt["you"])
self.assertNotIn("partner", mt)
self.assertEqual([], mt["property_set"])
def test_about_you_have_partner(self):
update_session("AboutYouForm", have_partner=YES, in_dispute=NO)
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", about_you_post_data(have_partner=YES, in_dispute=NO))
self.assertEqual(YES, mt["has_partner"])
self.assertEqual(NO, mt["partner"]["income"]["self_employed"])
update_session("AboutYouForm", have_partner=YES, in_dispute=NO, partner_is_self_employed=YES)
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form(
"AboutYouForm", about_you_post_data(have_partner=YES, in_dispute=NO, partner_is_self_employed=YES)
)
self.assertEqual(YES, mt["partner"]["income"]["self_employed"])
update_session("AboutYouForm", have_partner=YES, in_dispute=YES, partner_is_self_employed=YES)
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form(
"AboutYouForm", about_you_post_data(have_partner=YES, in_dispute=YES, partner_is_self_employed=YES)
)
self.assertNotIn("partner", mt)
self.assertEqual([], mt["property_set"])
def test_benefits_passported(self):
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", about_you_post_data(on_benefits=YES))
mt.update_from_form("YourBenefitsForm", {"benefits": ["income_support"]})
self.assertTrue(mt["on_passported_benefits"])
expected = {
"income_support": True,
"job_seekers_allowance": False,
"pension_credit": False,
"universal_credit": False,
"employment_support": False,
}
self.assertEqual(expected, mt["specific_benefits"])
self.assertIncome(mt["you"]["income"], default=MoneyInterval(0))
self.assertOutgoings(mt["you"]["deductions"], default=MoneyInterval(0))
self.assertSavings(mt["you"]["savings"], default=0)
self.assertEqual([], mt["property_set"])
def test_benefits_not_passported(self):
update_session("AboutYouForm", is_employed=YES, have_children=YES)
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", about_you_post_data(on_benefits=YES))
mt.update_from_form("YourBenefitsForm", {"benefits": "other-benefit"})
self.assertFalse(mt["on_passported_benefits"])
expected = {
"income_support": False,
"job_seekers_allowance": False,
"pension_credit": False,
"universal_credit": False,
"employment_support": False,
}
self.assertEqual(expected, mt["specific_benefits"])
self.assertNullFinances(mt["you"], income_overrides={"child_benefits": MoneyInterval()})
self.assertEqual([], mt["property_set"])
def test_child_benefits(self):
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", about_you_post_data())
post_data = dict(
flatten({"child_benefit": post_money_interval("12", "per_week"), "benefits": ["child_benefit"]})
)
mt.update_from_form("YourBenefitsForm", post_data)
self.assertFalse(mt["on_passported_benefits"])
self.assertEqual(MoneyInterval(1200, "per_week"), mt["you"]["income"]["child_benefits"])
def test_property(self):
update_session("AboutYouForm", is_employed=YES, have_children=YES)
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", dict(own_property=YES))
mt.update_from_form("PropertiesForm", properties_post_data(first_property))
self.assertIncome(
mt["you"]["income"],
default=MoneyInterval(0),
earnings=MoneyInterval(),
pension=MoneyInterval(),
maintenance_received=MoneyInterval(),
self_employed=None,
)
self.assertOutgoings(
mt["you"]["deductions"],
default=MoneyInterval(),
criminal_legalaid_contributions=None,
mortgage=MoneyInterval("800.00", "per_month"),
)
self.assertSavings(mt["you"]["savings"], default=0)
expected = [
{
"value": 1000000,
"mortgage_left": 900000,
"share": 100,
"disputed": NO,
"rent": MoneyInterval(0),
"main": YES,
}
]
self.assertDictEqual(expected[0], mt["property_set"][0])
def test_multiple_property(self):
update_session("AboutYouForm", is_employed=YES, have_children=YES)
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", dict(own_property=YES))
mt.update_from_form("PropertiesForm", properties_post_data(first_property, second_property))
self.assertIncome(
mt["you"]["income"],
default=MoneyInterval(0),
earnings=MoneyInterval(),
pension=MoneyInterval(),
maintenance_received=MoneyInterval(),
self_employed=None,
)
self.assertOutgoings(
mt["you"]["deductions"],
default=MoneyInterval(),
criminal_legalaid_contributions=None,
mortgage=MoneyInterval("1500.00", "per_month"),
)
self.assertSavings(mt["you"]["savings"], default=0)
expected = [
{
"value": 1000000,
"mortgage_left": 900000,
"share": 100,
"disputed": NO,
"rent": MoneyInterval(0),
"main": YES,
},
{
"value": 2000000,
"mortgage_left": 1000000,
"share": 100,
"disputed": NO,
"rent": MoneyInterval(0),
"main": YES,
},
]
self.assertDictEqual(expected[0], mt["property_set"][0])
self.assertDictEqual(expected[1], mt["property_set"][1])
def test_rent(self):
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", dict(own_property=YES))
session.checker["AboutYouForm"] = {"have_partner": NO, "own_property": YES}
prop = rented(first_property, post_money_interval("100.00"))
mt.update_from_form("PropertiesForm", properties_post_data(prop))
session.checker["PropertiesForm"] = {"properties": [prop]}
self.assertIncome(
mt["you"]["income"],
default=MoneyInterval(0),
earnings=MoneyInterval(0),
pension=MoneyInterval(),
maintenance_received=MoneyInterval(),
other_income=MoneyInterval("100.00"),
self_employed=None,
)
def test_multiple_rents(self):
update_session("AboutYouForm", is_employed=YES, have_children=YES)
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", dict(own_property=YES))
session.checker["AboutYouForm"] = {"own_property": YES}
prop1 = rented(first_property, post_money_interval("100.00"))
prop2 = rented(second_property, post_money_interval("50.00"))
mt.update_from_form("PropertiesForm", properties_post_data(prop1, prop2))
session.checker["PropertiesForm"] = {"properties": [prop1, prop2]}
self.assertIncome(
mt["you"]["income"],
default=MoneyInterval(0),
earnings=MoneyInterval(),
pension=MoneyInterval(),
maintenance_received=MoneyInterval(),
other_income=MoneyInterval("150.00"),
self_employed=None,
)
def test_savings(self):
session.checker["category"] = "debt"
mt = MeansTest()
about_data = {"have_savings": YES, "have_valuables": YES}
mt.update_from_form("AboutYouForm", about_data)
session.checker["AboutYouForm"] = about_data
mt.update_from_form("SavingsForm", {"savings": "1,000.00", "investments": "0.00", "valuables": "500.00"})
self.assertEqual(100000, mt["you"]["savings"]["bank_balance"])
self.assertEqual(0, mt["you"]["savings"]["investment_balance"])
self.assertEqual(50000, mt["you"]["savings"]["asset_balance"])
def test_additional_benefits(self):
session.checker["category"] = "debt"
mt = MeansTest()
mt.update_from_form("AboutYouForm", about_you_post_data())
post_data = dict(
flatten(
{"benefits": [], "other_benefits": YES, "total_other_benefit": post_money_interval("3", "per_week")}
)
)
mt.update_from_form("AdditionalBenefitsForm", post_data)
self.assertFalse(mt["on_nass_benefits"])
self.assertEqual(MoneyInterval(300, "per_week"), mt["you"]["income"]["benefits"])
def test_nass_benefits(self):
# NB: asylum support is no longer available in the benefits list
mt = MeansTest()
mt.update_from_form(
"AdditionalBenefitsForm",
dict(
flatten(
{
"benefits": ["asylum-support"],
"other_benefits": NO,
"total_other_benefit": post_money_interval("0"),
}
)
),
)
self.assertTrue(mt["on_nass_benefits"])
def test_child_tax_credits_and_working_tax_credits(self):
update_session("AboutYouForm", is_employed=YES, have_children=YES)
mt = MeansTest()
mt.update_from_form(
"IncomeForm",
dict(
flatten(
{
"your_income": {
"earnings": post_money_interval("0"),
"income_tax": post_money_interval("0"),
"national_insurance": post_money_interval("0"),
"child_tax_credit": post_money_interval("10.00", "per_week"),
"working_tax_credit": post_money_interval("10.00"),
"maintenance": post_money_interval("0"),
"pension": post_money_interval("0"),
"other_income": post_money_interval("0"),
}
}
)
),
)
self.assertEqual(MoneyInterval(5333), mt["you"]["income"]["tax_credits"])
def test_income_self_employed(self):
session.checker["AboutYouForm"] = {"is_self_employed": YES, "is_employed": NO}
mt = MeansTest()
mt.update_from_form("AboutYouForm", about_you_post_data(is_self_employed=YES, is_employed=NO))
mt.update_from_form(
"IncomeForm",
dict(
flatten(
{
"your_income": {
"earnings": post_money_interval("1"),
"income_tax": post_money_interval("2"),
"national_insurance": post_money_interval("3"),
"working_tax_credit": post_money_interval("4"),
"maintenance": post_money_interval("5"),
"pension": post_money_interval("6"),
"other_income": post_money_interval("7"),
}
}
)
),
)
self.assertEqual(MoneyInterval(0), mt["you"]["income"]["earnings"])
self.assertEqual(MoneyInterval(100), mt["you"]["income"]["self_employment_drawings"])
self.assertEqual(MoneyInterval(200), mt["you"]["deductions"]["income_tax"])
self.assertEqual(MoneyInterval(300), mt["you"]["deductions"]["national_insurance"])
self.assertEqual(MoneyInterval(400), mt["you"]["income"]["tax_credits"])
self.assertEqual(MoneyInterval(500), mt["you"]["income"]["maintenance_received"])
self.assertEqual(MoneyInterval(600), mt["you"]["income"]["pension"])
self.assertEqual(MoneyInterval(700), mt["you"]["income"]["other_income"])
def test_partner_income(self):
session.checker["AboutYouForm"] = {"have_partner": YES, "partner_is_employed": YES}
mt = MeansTest()
mt.update_from_form(
"IncomeForm",
dict(
flatten(
{
"your_income": {
"earnings": post_money_interval("0"),
"income_tax": post_money_interval("0"),
"national_insurance": post_money_interval("0"),
"working_tax_credit": post_money_interval("0"),
"maintenance": post_money_interval("0"),
"pension": post_money_interval("0"),
"other_income": post_money_interval("0"),
},
"partner_income": {
"earnings": post_money_interval("1"),
"income_tax": post_money_interval("2"),
"national_insurance": post_money_interval("3"),
"working_tax_credit": post_money_interval("4"),
"maintenance": post_money_interval("5"),
"pension": post_money_interval("6"),
"other_income": post_money_interval("7"),
},
}
)
),
)
self.assertEqual(MoneyInterval(100), mt["partner"]["income"]["earnings"])
self.assertEqual(MoneyInterval(200), mt["partner"]["deductions"]["income_tax"])
self.assertEqual(MoneyInterval(300), mt["partner"]["deductions"]["national_insurance"])
self.assertEqual(MoneyInterval(400), mt["partner"]["income"]["tax_credits"])
self.assertEqual(MoneyInterval(500), mt["partner"]["income"]["maintenance_received"])
self.assertEqual(MoneyInterval(600), mt["partner"]["income"]["pension"])
self.assertEqual(MoneyInterval(700), mt["partner"]["income"]["other_income"])
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
import abc
import unittest
# test_interfaces is referenced from specification in this module.
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _receiver
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class TestCase(test_coverage.Coverage, unittest.TestCase):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
__metaclass__ = abc.ABCMeta
NAME = 'EventInvocationSynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, None)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
receiver = _receiver.Receiver()
self._invoker.event(group, method)(
request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
receiver.block_until_terminated()
response = receiver.unary_response()
test_messages.verify(request, response, self)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
receiver = _receiver.Receiver()
self._invoker.event(group, method)(
request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
receiver.block_until_terminated()
responses = receiver.stream_responses()
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
receiver = _receiver.Receiver()
call_consumer = self._invoker.event(group, method)(
receiver, receiver.abort, test_constants.LONG_TIMEOUT)
for request in requests:
call_consumer.consume(request)
call_consumer.terminate()
receiver.block_until_terminated()
response = receiver.unary_response()
test_messages.verify(requests, response, self)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
receiver = _receiver.Receiver()
call_consumer = self._invoker.event(group, method)(
receiver, receiver.abort, test_constants.LONG_TIMEOUT)
for request in requests:
call_consumer.consume(request)
call_consumer.terminate()
receiver.block_until_terminated()
responses = receiver.stream_responses()
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
# pylint: disable=cell-var-from-loop
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
second_receiver = _receiver.Receiver()
def make_second_invocation():
self._invoker.event(group, method)(
second_request, second_receiver, second_receiver.abort,
test_constants.LONG_TIMEOUT)
class FirstReceiver(_receiver.Receiver):
def complete(self, terminal_metadata, code, details):
super(FirstReceiver, self).complete(
terminal_metadata, code, details)
make_second_invocation()
first_receiver = FirstReceiver()
self._invoker.event(group, method)(
first_request, first_receiver, first_receiver.abort,
test_constants.LONG_TIMEOUT)
second_receiver.block_until_terminated()
first_response = first_receiver.unary_response()
second_response = second_receiver.unary_response()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
first_receiver = _receiver.Receiver()
second_request = test_messages.request()
second_receiver = _receiver.Receiver()
self._invoker.event(group, method)(
first_request, first_receiver, first_receiver.abort,
test_constants.LONG_TIMEOUT)
self._invoker.event(group, method)(
second_request, second_receiver, second_receiver.abort,
test_constants.LONG_TIMEOUT)
first_receiver.block_until_terminated()
second_receiver.block_until_terminated()
first_response = first_receiver.unary_response()
second_response = second_receiver.unary_response()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
@unittest.skip('TODO(nathaniel): implement.')
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
receiver = _receiver.Receiver()
with self._control.pause():
call = self._invoker.event(group, method)(
request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
call.cancel()
receiver.block_until_terminated()
self.assertIs(face.Abortion.Kind.CANCELLED, receiver.abortion().kind)
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
receiver = _receiver.Receiver()
call = self._invoker.event(group, method)(
request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
call.cancel()
receiver.block_until_terminated()
self.assertIs(face.Abortion.Kind.CANCELLED, receiver.abortion().kind)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
receiver = _receiver.Receiver()
call_consumer = self._invoker.event(group, method)(
receiver, receiver.abort, test_constants.LONG_TIMEOUT)
for request in requests:
call_consumer.consume(request)
call_consumer.cancel()
receiver.block_until_terminated()
self.assertIs(face.Abortion.Kind.CANCELLED, receiver.abortion().kind)
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for unused_test_messages in test_messages_sequence:
receiver = _receiver.Receiver()
call_consumer = self._invoker.event(group, method)(
receiver, receiver.abort, test_constants.LONG_TIMEOUT)
call_consumer.cancel()
receiver.block_until_terminated()
self.assertIs(face.Abortion.Kind.CANCELLED, receiver.abortion().kind)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
receiver = _receiver.Receiver()
with self._control.pause():
self._invoker.event(group, method)(
request, receiver, receiver.abort,
_3069_test_constant.REALLY_SHORT_TIMEOUT)
receiver.block_until_terminated()
self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind)
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
receiver = _receiver.Receiver()
with self._control.pause():
self._invoker.event(group, method)(
request, receiver, receiver.abort,
_3069_test_constant.REALLY_SHORT_TIMEOUT)
receiver.block_until_terminated()
self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for unused_test_messages in test_messages_sequence:
receiver = _receiver.Receiver()
self._invoker.event(group, method)(
receiver, receiver.abort, _3069_test_constant.REALLY_SHORT_TIMEOUT)
receiver.block_until_terminated()
self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind)
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
receiver = _receiver.Receiver()
call_consumer = self._invoker.event(group, method)(
receiver, receiver.abort, _3069_test_constant.REALLY_SHORT_TIMEOUT)
for request in requests:
call_consumer.consume(request)
receiver.block_until_terminated()
self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
receiver = _receiver.Receiver()
with self._control.fail():
self._invoker.event(group, method)(
request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
receiver.block_until_terminated()
self.assertIs(
face.Abortion.Kind.REMOTE_FAILURE, receiver.abortion().kind)
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
receiver = _receiver.Receiver()
with self._control.fail():
self._invoker.event(group, method)(
request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
receiver.block_until_terminated()
self.assertIs(
face.Abortion.Kind.REMOTE_FAILURE, receiver.abortion().kind)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
receiver = _receiver.Receiver()
with self._control.fail():
call_consumer = self._invoker.event(group, method)(
receiver, receiver.abort, test_constants.LONG_TIMEOUT)
for request in requests:
call_consumer.consume(request)
call_consumer.terminate()
receiver.block_until_terminated()
self.assertIs(
face.Abortion.Kind.REMOTE_FAILURE, receiver.abortion().kind)
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
receiver = _receiver.Receiver()
with self._control.fail():
call_consumer = self._invoker.event(group, method)(
receiver, receiver.abort, test_constants.LONG_TIMEOUT)
for request in requests:
call_consumer.consume(request)
call_consumer.terminate()
receiver.block_until_terminated()
self.assertIs(
face.Abortion.Kind.REMOTE_FAILURE, receiver.abortion().kind)
|
|
"""
Basic IIR Bilinear Transform-Based Digital Filter Design Helper
Copyright (c) March 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from logging import getLogger
log = getLogger(__name__)
def IIR_lpf(f_pass, f_stop, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR lowpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_pass : Passband critical frequency in Hz
f_stop : Stopband critical frequency in Hz
Ripple_pass : Filter gain in dB at f_pass
Atten_stop : Filter attenuation in dB at f_stop
fs : Sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Notes
-----
Additionally a text string telling the user the filter order is
written to the console, e.g., IIR cheby1 order = 8.
Examples
--------
>>> fs = 48000
>>> f_pass = 5000
>>> f_stop = 8000
>>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_hpf(f_stop, f_pass, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR highpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop :
f_pass :
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bpf(f_stop1, f_pass1, f_pass2, f_stop2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop1 : ndarray of the numerator coefficients
f_pass : ndarray of the denominator coefficients
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bsf(f_pass1, f_stop1, f_stop2, f_pass2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandstop filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
f = np.arange(0,Npts)/(2.0*Npts)
for n in range(N_filt):
w,H = signal.freqz(b[n],a[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def freqz_cas(sos,w):
"""
Cascade frequency response
Mark Wickert October 2016
"""
Ns,Mcol = sos.shape
w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w)
for k in range(1,Ns):
w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w)
Hcas *= Htemp
return w, Hcas
def freqz_resp_cas_list(sos, mode = 'dB', fs=1.0, n_pts=1024, fsize=(6, 4)):
"""
A method for displaying cascade digital filter form frequency response
magnitude, phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(sos) == list:
# We have a list of filters
N_filt = len(sos)
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w,H = freqz_cas(sos[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
Mark Wickert October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def sos_cascade(sos1,sos2):
"""
Mark Wickert October 2016
"""
return np.vstack((sos1,sos2))
def sos_zplane(sos,auto_scale=True,size=2,tol = 0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
sos : ndarray of the sos coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> sos_zplane(sos)
>>> # Here the plot is generated using manual scaling
>>> sos_zplane(sos,False,1.5)
"""
Ns,Mcol = sos.shape
# Extract roots from sos num and den removing z = 0
# roots due to first-order sections
N_roots = []
for k in range(Ns):
N_roots_tmp = np.roots(sos[k,:3])
if N_roots_tmp[1] == 0.:
N_roots = np.hstack((N_roots,N_roots_tmp[0]))
else:
N_roots = np.hstack((N_roots,N_roots_tmp))
D_roots = []
for k in range(Ns):
D_roots_tmp = np.roots(sos[k,3:])
if D_roots_tmp[1] == 0.:
D_roots = np.hstack((D_roots,D_roots_tmp[0]))
else:
D_roots = np.hstack((D_roots,D_roots_tmp))
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
M = len(N_roots)
N = len(D_roots)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=unique_cpx_roots(N_roots,tol=tol)
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),
ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),
ha='center',va='bottom',fontsize=10)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),
ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from google.appengine.ext import ndb
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import alert_group
from dashboard.models import anomaly
from dashboard.models import sheriff
from dashboard.models import stoppage_alert
class AnomalyGroupingTest(testing_common.TestCase):
"""Test case for the behavior of updating anomaly groups."""
def _AddAnomalies(self):
"""Adds a set of sample data used in the tests below."""
testing_common.AddTests(
['ChromiumGPU'], ['linux-release'],
{'scrolling_benchmark': {'first_paint': {}}})
first_paint_key = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/first_paint')
first_paint_test = first_paint_key.get()
first_paint_test.improvement_direction = anomaly.DOWN
first_paint_test.put()
group_keys = [
alert_group.AlertGroup(
start_revision=3000,
end_revision=4000,
alert_kind='Anomaly',
test_suites=['scrolling_benchmark']).put(),
alert_group.AlertGroup(
start_revision=6000,
end_revision=8000,
alert_kind='Anomaly',
test_suites=['scrolling_benchmark']).put(),
]
anomaly_keys = [
anomaly.Anomaly(
start_revision=2000,
end_revision=4000,
bug_id=12345,
test=first_paint_key).put(),
anomaly.Anomaly(
start_revision=3000,
end_revision=5000,
bug_id=12345,
test=first_paint_key).put(),
anomaly.Anomaly(
start_revision=6000,
end_revision=8000,
bug_id=None,
test=first_paint_key).put(),
]
anomalies = ndb.get_multi(anomaly_keys)
# Add these anomalies to groups and put them again.
anomalies[0].group = group_keys[0]
anomalies[0].put()
anomalies[1].group = group_keys[0]
anomalies[1].put()
anomalies[2].group = group_keys[1]
anomalies[2].put()
# Note that after these anomalies are added, the state of the two groups
# is updated. Also, the first two anomalies are in the same group.
self.assertEqual(anomalies[0].group, anomalies[1].group)
self.assertNotEqual(anomalies[0].group, anomalies[2].group)
return anomalies
def testUpdateAnomalyBugId_UpdatesGroupOfAnomaly(self):
anomalies = self._AddAnomalies()
# At first, two anomalies are in separate groups, and the second anomaly
# has not been assigned a bug ID.
self.assertNotEqual(anomalies[1].group, anomalies[2].group)
self.assertEqual(12345, anomalies[1].bug_id)
self.assertIsNone(anomalies[2].bug_id)
# Test setting bug_id. Anomaly should be moved to new group.
anomalies[2].bug_id = 12345
anomalies[2].put()
self.assertEqual(anomalies[1].bug_id, anomalies[2].bug_id)
def testMarkAnomalyInvalid_AnomalyIsRemovedFromGroup(self):
anomalies = self._AddAnomalies()
# At first, two anomalies are in the same group.
self.assertEqual(anomalies[0].group, anomalies[1].group)
# Mark one of the alerts as invalid.
self.assertEqual(12345, anomalies[1].bug_id)
alert_group.ModifyAlertsAndAssociatedGroups([anomalies[1]], bug_id=-1)
# Now, the alert marked as invalid has no group.
# Also, the group's revision range has been updated accordingly.
self.assertNotEqual(anomalies[0].group, anomalies[1].group)
self.assertIsNone(anomalies[1].group)
group = anomalies[0].group.get()
self.assertEqual(2000, group.start_revision)
self.assertEqual(4000, group.end_revision)
def testUpdateAnomalyRevisionRange_UpdatesGroupRevisionRange(self):
anomalies = self._AddAnomalies()
# Add another anomaly to the same group as the first two anomalies,
# but with a non-overlapping revision range.
new_anomaly = anomaly.Anomaly(
start_revision=3000,
end_revision=4000,
group=anomalies[0].group,
test=utils.TestKey('master/bot/benchmark/metric'))
new_anomaly.put()
# Associate it with a group; alert_group.ModifyAlertsAndAssociatedGroups
# will update the group's revision range here.
alert_group.ModifyAlertsAndAssociatedGroups(
[new_anomaly], start_revision=3010, end_revision=3020)
# Now the group's revision range is updated.
group = anomalies[0].group.get()
self.assertEqual(3010, group.start_revision)
self.assertEqual(3020, group.end_revision)
def testUpdateAnomalyGroup_BugIDValid_GroupUpdated(self):
anomalies = self._AddAnomalies()
group1_key = anomalies[0].group
group2_key = anomalies[2].group
alert_group.ModifyAlertsAndAssociatedGroups(anomalies, bug_id=11111)
# Both groups should have their bug_id's updated
self.assertEqual(11111, group1_key.get().bug_id)
self.assertEqual(11111, group2_key.get().bug_id)
def testUpdateAnomalyGroup_BugIDInvalid_GroupDeleted(self):
anomalies = self._AddAnomalies()
group1_key = anomalies[0].group
group2_key = anomalies[2].group
alert_group.ModifyAlertsAndAssociatedGroups(anomalies, bug_id=-1)
# Both groups should have been deleted
self.assertIsNone(group1_key.get())
self.assertIsNone(group2_key.get())
def testUpdateAnomalyGroup_BugIDUntriaged_GroupRetainsBugID(self):
anomalies = self._AddAnomalies()
group1_key = anomalies[0].group
# Groups aren't assigned a bug_id until after an alert is placed in the
# group with a bug_id.
group = group1_key.get()
group.bug_id = 12345
group.put()
alert_group.ModifyAlertsAndAssociatedGroups(anomalies[:1], bug_id=None)
self.assertEqual(12345, group1_key.get().bug_id)
def testUpdateAnomalyGroup_BugIDUntriaged_GroupIsNone(self):
anomalies = self._AddAnomalies()
group2_key = anomalies[2].group
# First give that group an actual bug_id
alert_group.ModifyAlertsAndAssociatedGroups(anomalies[2:], bug_id=11111)
self.assertEqual(11111, group2_key.get().bug_id)
# Now un-triage the bug, since it's the onyl one in the group the group's
# bug_id should also be None
alert_group.ModifyAlertsAndAssociatedGroups(anomalies[2:], bug_id=None)
# Both groups should have been deleted
self.assertIsNone(group2_key.get().bug_id)
def testUpdateGroup_InvalidRange_PropertiesAreUpdated(self):
anomalies = self._AddAnomalies()
# Add another anomaly to the same group as the first two anomalies
# by setting its bug ID to match that of an existing group.
new_anomaly = anomaly.Anomaly(
start_revision=3000,
end_revision=4000,
group=anomalies[0].group,
test=utils.TestKey('master/bot/benchmark/metric'))
new_anomaly_key = new_anomaly.put()
# Change the anomaly revision to invalid range.
alert_group.ModifyAlertsAndAssociatedGroups(
[new_anomaly], start_revision=10, end_revision=20)
# After adding this new anomaly, it belongs to the group, and the group
# no longer has a minimum revision range.
group = anomalies[0].group.get()
self.assertEqual(anomalies[0].group, new_anomaly_key.get().group)
self.assertIsNone(group.start_revision)
self.assertIsNone(group.end_revision)
# Remove the new anomaly from the group by marking it invalid.
alert_group.ModifyAlertsAndAssociatedGroups(
[new_anomaly_key.get()], bug_id=-1)
# Now, the anomaly group's revision range is valid again.
group = anomalies[0].group.get()
self.assertEqual(3000, group.start_revision)
self.assertEqual(4000, group.end_revision)
class StoppageAlertGroupingTest(testing_common.TestCase):
"""Test case for the behavior of updating StoppageAlert groups."""
def _AddStoppageAlerts(self):
testing_common.AddTests(
['ChromiumGPU'], ['linux-release'],
{
'scrolling_benchmark': {
'dropped_foo': {},
'dropped_bar': {},
}
})
foo_path = 'ChromiumGPU/linux-release/scrolling_benchmark/dropped_foo'
bar_path = 'ChromiumGPU/linux-release/scrolling_benchmark/dropped_bar'
foo_test = utils.TestKey(foo_path).get()
bar_test = utils.TestKey(bar_path).get()
foo_row = testing_common.AddRows(foo_path, {200})[0]
bar_row = testing_common.AddRows(bar_path, {200})[0]
foo_alert_key = stoppage_alert.CreateStoppageAlert(foo_test, foo_row).put()
bar_alert_key = stoppage_alert.CreateStoppageAlert(bar_test, bar_row).put()
return [foo_alert_key.get(), bar_alert_key.get()]
def testStoppageAlertGroup_GroupAssignedUponCreation(self):
foo_test, bar_test = self._AddStoppageAlerts()
self.assertIsNotNone(foo_test.group)
self.assertIsNotNone(bar_test.group)
self.assertEqual('StoppageAlert', foo_test.group.get().alert_kind)
class GroupAlertsTest(testing_common.TestCase):
def _CreateAnomalyForTests(
self, revision_range, test, sheriff_key, bug_id, is_improvement):
"""Returns a sample anomaly with some default properties."""
anomaly_entity = anomaly.Anomaly(
start_revision=revision_range[0], end_revision=revision_range[1],
test=test, median_before_anomaly=100, median_after_anomaly=200,
sheriff=sheriff_key, bug_id=bug_id, is_improvement=is_improvement)
return anomaly_entity
def _AddSheriffs(self):
sheriff1 = sheriff.Sheriff(
id='Chromium Perf Sheriff', email='chrisphan@google.com').put()
sheriff2 = sheriff.Sheriff(
id='QA Perf Sheriff', email='chrisphan@google.com').put()
return [sheriff1, sheriff2]
def _AddTests(self):
test_data = {
'scrolling_benchmark': {
'first_paint': {},
},
'tab_capture': {
'capture': {},
}
}
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], test_data)
testing_common.AddTests(['QAPerf'], ['linux-release'], test_data)
scrolling_test = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/first_paint')
tab_capture_test = utils.TestKey(
'ChromiumGPU/linux-release/tab_capture/capture')
for test_key in [scrolling_test, tab_capture_test]:
test = test_key.get()
test.improvement_direction = anomaly.DOWN
test.put()
return [scrolling_test, tab_capture_test]
def testGroupAlerts_WithNoAssociation_MakesNewGroup(self):
sheriffs = self._AddSheriffs()
tests = self._AddTests()
# Add some anomaly groups.
alert_group.AlertGroup(
bug_id=None,
start_revision=3000,
end_revision=6000,
alert_kind='Anomaly',
test_suites=['scrolling_benchmark']).put()
alert_group.AlertGroup(
bug_id=104,
start_revision=7000,
end_revision=9000,
alert_kind='Anomaly',
test_suites=['tab_capture']).put()
improvement_anomaly = self._CreateAnomalyForTests(
revision_range=(1000, 2000), test=tests[0], sheriff_key=sheriffs[0],
bug_id=None, is_improvement=True)
regression_anomaly = self._CreateAnomalyForTests(
revision_range=(1000, 2000), test=tests[0], sheriff_key=sheriffs[0],
bug_id=None, is_improvement=False)
test_suite = 'scrolling_benchmark'
alert_group.GroupAlerts(
[regression_anomaly, improvement_anomaly], test_suite, 'Anomaly')
# The regression Anomaly was not grouped with a group that has a bug ID,
# so the bug ID is not changed.
self.assertIsNone(regression_anomaly.bug_id)
# Improvement Anomaly should not be auto-triaged.
self.assertIsNone(improvement_anomaly.group)
alert_groups = alert_group.AlertGroup.query().fetch()
self.assertEqual(3, len(alert_groups))
self.assertEqual(
(1000, 2000),
(alert_groups[2].start_revision, alert_groups[2].end_revision))
self.assertIsNone(alert_groups[2].bug_id)
self.assertEqual(alert_groups[2].test_suites, [test_suite])
def testGroupAlerts_WithExistingGroup(self):
sheriffs = self._AddSheriffs()
tests = self._AddTests()
# Add some anomaly groups.
alert_group.AlertGroup(
bug_id=None,
start_revision=3000,
end_revision=6000,
alert_kind='Anomaly',
test_suites=['scrolling_benchmark']).put()
tab_capture_group = alert_group.AlertGroup(
bug_id=104,
start_revision=7000,
end_revision=9000,
alert_kind='Anomaly',
test_suites=['tab_capture']).put()
improvement_anomaly = self._CreateAnomalyForTests(
revision_range=(6000, 8000), test=tests[1], sheriff_key=sheriffs[0],
bug_id=None, is_improvement=True)
regression_anomaly = self._CreateAnomalyForTests(
revision_range=(6000, 8000), test=tests[1], sheriff_key=sheriffs[0],
bug_id=None, is_improvement=False)
alert_group.GroupAlerts(
[regression_anomaly, improvement_anomaly], 'tab_capture', 'Anomaly')
# The regression Anomaly's bug ID is changed because it has been grouped.
self.assertEqual(104, regression_anomaly.bug_id)
self.assertEqual(tab_capture_group, regression_anomaly.group)
# Improvement Anomaly should not be grouped.
self.assertIsNone(improvement_anomaly.group)
alert_groups = alert_group.AlertGroup.query().fetch()
self.assertEqual(2, len(alert_groups))
self.assertEqual(
(7000, 8000),
(alert_groups[1].start_revision, alert_groups[1].end_revision))
def testGroupAlerts_WithExistingGroupThatHasDifferentKind_DoesntGroup(self):
sheriffs = self._AddSheriffs()
tests = self._AddTests()
group_key = alert_group.AlertGroup(
bug_id=None,
start_revision=3000,
end_revision=6000,
alert_kind='OtherAlert',
test_suites=['tab_capture']).put()
my_alert = self._CreateAnomalyForTests(
revision_range=(4000, 5000), test=tests[1], sheriff_key=sheriffs[0],
bug_id=None, is_improvement=False)
alert_group.GroupAlerts([my_alert], 'tab_capture', 'Anomaly')
self.assertNotEqual(group_key, my_alert.group)
self.assertEqual('Anomaly', my_alert.group.get().alert_kind)
# If the alert kind that's passed when calling GroupAlerts matches
# the alert kind of the existing group, then it will be grouped.
alert_group.GroupAlerts([my_alert], 'tab_capture', 'OtherAlert')
self.assertEqual(group_key, my_alert.group)
self.assertEqual('OtherAlert', my_alert.group.get().alert_kind)
if __name__ == '__main__':
unittest.main()
|
|
"""Classes to work with sam and bam files"""
import struct, zlib, sys, re, itertools
from collections import namedtuple
import seqtools.format.sam
from seqtools.format.sam.header import SAMHeader
#from seqtools.sequence import rc
from cStringIO import StringIO
from string import maketrans
_bam_ops = maketrans('012345678','MIDNSHP=X')
_bam_char = maketrans('abcdefghijklmnop','=ACMGRSVTWYHKDBN')
_bam_value_type = {'c':[1,'<b'],'C':[1,'<B'],'s':[2,'<h'],'S':[2,'<H'],'i':[4,'<i'],'I':[4,'<I']}
from seqtools.format.sam import TagDatum, CIGARDatum, check_flag
class BAMEntries:
"""BAM entry data in same format as SAM entries tuple"""
def __init__(self,binary_data):
self._bentries = binary_data
self._cigar_string = None
self._cigar_array = None
self._seq = None
self._qual = None
self._optional_fields = None
def is_aligned(self):
return not check_flag(self.flag,0x4)
### The getters for all the fields
@property
def qname(self): return self._bentries.qname
@property
def flag(self): return self._bentries.flag
@property
def rname(self):
if not self.is_aligned(): return '*'
return self._bentries.rname
@property
def pos(self):
if self._bentries.pos == 0: return None
return self._bentries.pos
@property
def mapq(self):
if self._bentries.mapq == 255: return None
return self._bentries.mapq
@property
def cigar(self):
if self._cigar_string:
if self._cigar_string == '*': return None
return self._cigar_string
v1,v2 = _bin_to_cigar(self._bentries.cigar_bytes)
if not v2: self._cigar_string = '*'
else: self._cigar_string = v2
self._cigar_array = v1
return self.cigar
def get_cigar_array(self):
c = self.cigar
return self._cigar_array
@property
def rnext(self): return self._bentries.rnext
@property
def pnext(self): return self._bentries.pnext
@property
def tlen(self): return self._bentries.tlen
@property
def seq(self):
if self._seq:
if self._seq == '*': return None
return self._seq
self._seq = _bin_to_seq(self._bentries.seq_bytes)
if not self._seq: self._seq = '*'
return self.seq
#if self._seq == '*': return None
#return self._seq
@property
def qual(self):
if self._qual:
if self._qual == '*': return None
return self._qual
self._qual = _bin_to_qual(self._bentries.qual_bytes)
if not self._qual: self._qual = '*'
return self.qual
#if self._qual == '*': return None
#return self._qual
@property
def optional_fields(self):
if self._optional_fields:
if self._optional_fields == '': return None
return self._optional_fields
self._tags, self._optional_fields = _bin_to_extra(self._bentries.extra_bytes)
if not self._optional_fields: self._optional_fields = ''
return self.optional_fields
def get_tags(self):
o = self.optional_fields
return self._tags
"""BAM entry (much like a sam line)"""
BAMOptions = namedtuple('BAMOptions',
['reference', # reference dictionary
'header',
'blockStart',
'innerStart',
'payload'])
BAMFields = namedtuple('BAMFields',
['qname',
'flag',
'rname',
'pos',
'mapq',
'cigar_bytes',
'rnext',
'pnext',
'tlen',
'seq_bytes',
'qual_bytes',
'extra_bytes'])
class BAM(seqtools.format.sam.SAM):
"""Very much like a sam entry but optimized for access from a bam
Slows down for accessing things that need more decoding like
sequence, quality, cigar string, and tags
.. warning:: Having the reference names and the reference sizes we may save some time by not reading the header each time we access the file. Theres probably a more efficent coarse to go by defining a bamfile object and having a BAMline entry being the extension of sam, and drawing most of this stuff from the bam file
:param bin_data: byte data for just a single bam entry (seems unnecessary since we have the file)
:param ref_names: array of refernece names
:param fileName: the bam file name
:param blockStart:
:param innerStart:
:param ref_lengths: seems unncessary to take the reference lengths because we can always get that from the header
:param reference:
:param line_number:
:type bin_data: bytes
:type ref_names: list of names
:type blockStart: where to begin in the file
:type innerStart: where to begin in the decompressed block
:type ref_lengths: dict()
:type reference: dict()
:type line_number: int
"""
def __init__(self,bin_data,ref_names,options=None):
if not options: options = BAM.Options()
self._options = options
self._bin_data = bin_data
self._ref_names = ref_names
self.bentries = _parse_bam_data_block(self._bin_data,self._ref_names) #the binary data and the header is enough to parse
self._entries = BAMEntries(self.bentries)
self._line = None
self._target_range = None
self._alignment_ranges = None #should be accessed by method because of BAM
return
@property
def entries(self): return self._entries
#Alignment Ranges are calculated in SAM
@staticmethod
def Options(**kwargs):
""" A method for declaring options for the class"""
construct = BAMOptions #IMPORTANT! Set this
names = construct._fields
d = {}
for name in names: d[name] = None #default values
for k,v in kwargs.iteritems():
if k in names: d[k] = v
else: raise ValueError('Error '+k+' is not a property of these options')
"""Create a set of options based on the inputs"""
return construct(**d)
def get_coord(self):
"""get the current coordinate
:return: [blockStart, innerStart]
:rtype: list is a pair [int, int]
"""
return [self._options.blockStart,self._options.innerStart]
@property
def blockStart(self):
"""Maybe none if not set"""
return self._options.blockStart
@property
def innerStart(self):
"""Maybe none if not set"""
return self._options.innerStart
def __str__(self):
return self.sam_line
@property
def sam_line(self):
out = self.entries.qname + "\t"
out += str(self.entries.flag) + "\t"
if self.entries.rname is None:
out += '*' + "\t"
else:
out += self.entries.rname + "\t"
if self.entries.pos is None:
out += '0' + "\t"
else:
out += str(self.entries.pos) + "\t"
if self.entries.mapq is None:
out += '255'+"\t"
else:
out += str(self.entries.mapq) + "\t"
if self.entries.cigar is None:
out += '*'+"\t"
else:
out += self.entries.cigar+"\t"
if self.entries.rnext is None:
out += '*'+"\t"
else:
out += self.entries.rnext+"\t"
if self.entries.pnext is None:
out += '0'+"\t"
else:
out += str(self.entries.pnext)+"\t"
if self.entries.tlen is None:
out += '0'+"\t"
else:
out += str(self.entries.tlen)+"\t"
if self.entries.seq is None:
out += '*'+"\t"
else:
out += self.entries.seq+"\t"
if self.entries.qual is None:
out += '*'
else:
out += self.entries.qual
if self.entries.optional_fields:
out += "\t"+self.entries.optional_fields
return out
@property
def cigar_array(self):
"""produce the cigar in list form
:return: Cigar list of [value (int), type (char)] pairs
:rtype: list
"""
return self.entries.get_cigar_array()
@property
def tags(self):
return self.entries.get_tags()
def _parse_bam_data_block(bin_in,ref_names):
data = StringIO(bin_in)
rname_num = struct.unpack('<i',data.read(4))[0]
v_rname = ref_names[rname_num] #refID to check in ref names
v_pos = struct.unpack('<i',data.read(4))[0] + 1 #POS
bin_mq_nl = struct.unpack('<I',data.read(4))[0]
bin = bin_mq_nl >> 16
v_mapq = (bin_mq_nl & 0xFF00) >> 8 #mapq
l_read_name = bin_mq_nl & 0xFF #length of qname
flag_nc = struct.unpack('<I',data.read(4))[0] #flag and n_cigar_op
v_flag = flag_nc >> 16
n_cigar_op = flag_nc & 0xFFFF
l_seq = struct.unpack('<i',data.read(4))[0]
rnext_num = struct.unpack('<i',data.read(4))[0]
if rnext_num == -1:
v_rnext = '*'
else:
v_rnext = ref_names[rnext_num] #next_refID in ref_names
v_pnext = struct.unpack('<i',data.read(4))[0]+1 #pnext
tlen = struct.unpack('<i',data.read(4))[0]
v_tlen = tlen
v_qname = data.read(l_read_name).rstrip('\0') #read_name or qname
#print 'n_cigar_op '+str(n_cigar_op)
v_cigar_bytes = data.read(n_cigar_op*4)
#print 'cigar bytes '+str(len(v['cigar_bytes']))
v_seq_bytes = data.read((l_seq+1)/2)
v_qual_bytes = data.read(l_seq)
v_extra_bytes = data.read()
#last second tweak
if v_rnext == v_rname: v_rnext = '='
return BAMFields(
v_qname,
v_flag,
v_rname,
v_pos,
v_mapq,
v_cigar_bytes,
v_rnext,
v_pnext,
v_tlen,
v_seq_bytes,
v_qual_bytes,
v_extra_bytes)
def _bin_to_qual(qual_bytes):
if len(qual_bytes) == 0: return '*'
if struct.unpack('<B',qual_bytes[0])[0] == 0xFF: return '*'
#print qual_bytes
#try:
qual = ''.join([chr(struct.unpack('<B',x)[0]+33) for x in qual_bytes])
#except:
# return '*'
return qual
def _bin_to_seq(seq_bytes):
if len(seq_bytes) == 0: return None
#global _bam_char
#print len(seq_bytes)
seq = ''.join([''.join(
[''.join([chr(z+97).translate(_bam_char) for z in [y>>4,y&0xF]]) for y in struct.unpack('<B',x)]) for x in seq_bytes]).rstrip('=')
return seq
def _bin_to_cigar(cigar_bytes):
#global _bam_ops
if len(cigar_bytes) == 0: return [[],'*']
cigar_packed = [struct.unpack('<I',x)[0] for x in \
[cigar_bytes[i:i+4] for i in range(0,len(cigar_bytes),4)]]
cigar_array = [CIGARDatum(c >> 4, str(c &0xF).translate(_bam_ops)) for c in cigar_packed]
cigar_seq = ''.join([''.join([str(x[0]),x[1]]) for x in cigar_array])
return [cigar_array,cigar_seq]
def _bin_to_extra(extra_bytes):
"""Pre all the reamining bytes of an entry
Post an array of
1. A dict keyed by Tag with {'type':,'value':} where value is a string unless type is i
2. A string of the remainder
"""
#global _bam_value_type
extra = StringIO(extra_bytes)
tags = {}
rem = ''
while extra.tell() < len(extra_bytes):
tag = extra.read(2)
val_type = extra.read(1)
if val_type == 'Z':
rem += tag+':'
rem += val_type+':'
p = re.compile('([!-~])')
m = p.match(extra.read(1))
vre = ''
while m:
vre += m.group(1)
c = extra.read(1)
#print c
m = p.match(c)
rem += vre+"\t"
tags[tag] = TagDatum(val_type, vre)
#tags[tag] = {'type':val_type,'value':vre}
elif val_type == 'A':
rem += tag+':'
rem += val_type+':'
vre = extra.read(1)
rem += vre+"\t"
tags[tag] = TagDatum(val_type, vre)
#tags[tag] = {'type':val_type,'value':vre}
elif val_type in _bam_value_type:
rem += tag+':'
rem += 'i'+':'
val = struct.unpack(_bam_value_type[val_type][1],extra.read(_bam_value_type[val_type][0]))[0]
rem += str(val)+"\t"
tags[tag] = TagDatum(val_type, val)
#tags[tag] = {'type':val_type,'value':val}
elif val_type == 'B':
sys.sterr.write("WARNING array not implmented\n")
continue
rem += tag+':'
rem += val_type+':'
array_type = _bam_value_type[extra.read(1)]
element_count = struct.unpack('<I',extra.read(4))[0]
array_bytes = extra.read(element_count*_bam_value_type[array_type][0])
for by in [array_bytes[i:i+_bam_value_type[array_type][0]] for i in range(0,len(array_bytes),_bam_value_type[array_type][0])]:
aval = struct.unpack(_bam_value_type[array_type][1],by)
return [tags,rem.rstrip("\t")]
BGZFChunk = namedtuple('BGZFChunk',['block_size','data'])
class BGZF:
""" Methods adapted from biopython's bgzf.py
.. warning:: We already have a BGZF class, i winder why we don't put this there
:param filename:
:param blockStart:
:param innerStart:
:type filename: string
:type blockStart: int
:type innerStart: int
"""
def __init__(self,filename,blockStart=None,innerStart=None):
self.path = filename
self.fh = open(filename,'rb',1000000)
if blockStart: self.fh.seek(blockStart)
self._block_start = 0
self._buffer = self._load_block()
self._buffer_pos = 0
if innerStart: self._buffer_pos = innerStart
def close(self):
self.fh.close()
def get_block_start(self):
return self._block_start
def get_inner_start(self):
return self._buffer_pos
def seek(self,blockStart,innerStart):
self.fh.seek(blockStart)
self._buffer_pos = 0
self._buffer = self._load_block()
self._buffer_pos = innerStart
"""go to this posiiton in the file"""
def read(self,size):
"""Read this many bytes from where you currently are"""
done = 0 #number of bytes that have been read so far
v = ''
while True:
if size-done < len(self._buffer.data) - self._buffer_pos:
v += self._buffer.data[self._buffer_pos:self._buffer_pos+(size-done)]
self._buffer_pos += (size-done)
#self.pointer += size
return v
else: # we need more buffer
vpart = self._buffer.data[self._buffer_pos:]
self._buffer = self._load_block()
v += vpart
self._buffer_pos = 0
if len(self._buffer.data)==0: return v
done += len(vpart)
def _load_block(self):
#pointer_start = self.fh.tell()
if not self.fh: return BGZFChunk(block_size=0,data='')
self._block_start = self.fh.tell()
magic = self.fh.read(4)
if len(magic) < 4:
#print 'end?'
#print len(self.fh.read())
return BGZFChunk(block_size=0,data='')
gzip_mod_time, gzip_extra_flags, gzip_os,extra_len = struct.unpack("<LBBH",self.fh.read(8))
pos = 0
block_size = None
#get block_size
while pos < extra_len:
subfield_id = self.fh.read(2)
subfield_len = struct.unpack("<H",self.fh.read(2))[0]
subfield_data = self.fh.read(subfield_len)
pos += subfield_len+4
if subfield_id == 'BC':
block_size = struct.unpack("<H",subfield_data)[0]+1
#block_size is determined
deflate_size = block_size - 1 - extra_len - 19
d = zlib.decompressobj(-15)
data = d.decompress(self.fh.read(deflate_size))+d.flush()
expected_crc = self.fh.read(4)
expected_size = struct.unpack("<I",self.fh.read(4))[0]
if expected_size != len(data):
sys.stderr.write("ERROR unexpected size\n")
sys.exit()
crc = zlib.crc32(data)
if crc < 0: crc = struct.pack("<i",crc)
else: crc = struct.pack("<I",crc)
if crc != expected_crc:
sys.stderr.write("ERROR crc fail\n")
sys.exit()
return BGZFChunk(block_size=block_size,data=data)
|
|
# coding=UTF-8
"""Server stuff."""
from __future__ import print_function
from cfy import (create_server,
create_ssh_key,
attach_ssh_key,
wait_for_state,
wait_for_cond,
create_nic,
attach_nic,
get_resource,
get_server_status,
start_server,
stop_server,
delete_resource)
import socket
import errno
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cfy.helpers import (with_fco_api, with_exceptions_handled)
from resttypes import enums, cobjects
from paramiko import SSHClient, AutoAddPolicy
import spur
import spur.ssh
from time import sleep
from subprocess import call
from fabric.api import settings, run
import os
RT = enums.ResourceType
PROP_RESOURCE_ID = 'resource_id'
PROP_USE_EXISTING = 'use_existing'
PROP_IMAGE = 'image'
PROP_VDC = 'vdc'
PROP_NET = 'network'
PROP_SERVER_PO = 'server_type'
PROP_CPU_COUNT = 'cpu_count'
PROP_RAM_AMOUNT = 'ram_amount'
PROP_MANAGER_KEY = 'manager_key'
PROP_PRIVATE_KEYS = 'private_keys'
PROP_PUBLIC_KEYS = 'public_keys'
RPROP_UUID = 'uuid'
RPROP_DISKS = 'disks'
RPROP_NIC = 'nic'
RPROP_NICS = 'nics'
RPROP_IP = 'ip'
RPROP_USER = 'username'
RPROP_PASS = 'password'
@operation
@with_fco_api
@with_exceptions_handled
def create(fco_api, *args, **kwargs):
ctx.logger.info('starting server creation')
# Ease of access
_rp = ctx.instance.runtime_properties
_np = ctx.node.properties
# Check if existing server is to be used
if _np[PROP_USE_EXISTING]:
server = get_resource(fco_api, _np[PROP_RESOURCE_ID, RT.SERVER])
if not server.nics:
raise Exception('No NICs attached to server')
_rp[RPROP_UUID] = server.resourceUUID
_rp[RPROP_DISKS] = [d.resourceUUID for d in server.disks]
_rp[RPROP_NIC] = server.nics[0].resourceUUID
_rp[RPROP_NICS] = [n.resourceUUID for n in server.nics]
_rp[RPROP_IP] = server.nics[0].ipAddresses[0].ipAddress
_rp[RPROP_USER] = server.initialUser
_rp[RPROP_PASS] = server.initialPassword
return (_rp[RPROP_UUID], _rp[RPROP_IP], _rp[RPROP_USER],
_rp[RPROP_PASS])
# Get configuration
image = get_resource(fco_api, _np[PROP_IMAGE], RT.IMAGE)
if _np[PROP_IMAGE]:
vdc = get_resource(fco_api, _np[PROP_VDC], RT.VDC)
else:
vdc = None
network = get_resource(fco_api, _np[PROP_NET], RT.NETWORK)
server_po = get_resource(fco_api, _np[PROP_SERVER_PO], RT.PRODUCTOFFER)
manager_key = get_resource(fco_api, _np[PROP_MANAGER_KEY], RT.SSHKEY)
cpu_count = _np[PROP_CPU_COUNT]
ram_amount = _np[PROP_RAM_AMOUNT]
public_keys = _np[PROP_PUBLIC_KEYS] or []
private_keys = _np[PROP_PRIVATE_KEYS] or []
# Verify existence of private keys
missing_keys = set()
bad_permission_keys = set()
key_contents = {}
for key in private_keys:
try:
key_contents[key] = ctx.get_resource(os.path.expanduser(key))
except NonRecoverableError as e:
if 'HttpException: 404' in str(e):
missing_keys.add(key)
elif 'HttpException: 403' in str(e):
bad_permission_keys.add(key)
else:
raise
if missing_keys or bad_permission_keys:
raise Exception('Missing private keys: {}\nBad permission keys: {}'
.format(missing_keys, bad_permission_keys))
# Generate missing configuration
image_uuid = image.resourceUUID
if vdc is not None:
cluster_uuid = vdc.clusterUUID
vdc_uuid = vdc.resourceUUID
else:
cluster_uuid = image.clusterUUID
vdc_uuid = image.vdcUUID
network_uuid = network.resourceUUID
network_type = network.networkType
server_po_uuid = server_po.resourceUUID
manager_key_uuid = manager_key.resourceUUID
# TODO: better way of determining suitable disk
boot_disk_po_uuid = get_resource(fco_api,
'{} GB Storage Disk'.format(image.size),
RT.PRODUCTOFFER).resourceUUID
ctx.logger.info('Configuration: \n'
'image_uuid: %s\n'
'cluster_uuid: %s\n'
'vdc_uuid: %s\n'
'network_uuid: %s\n'
'server_po_uuid: %s\n'
'manager_key_uuid: %s\n'
'boot_disk_po_uuid: %s',
image_uuid, cluster_uuid, vdc_uuid, network_uuid,
server_po_uuid, manager_key_uuid, boot_disk_po_uuid)
# Create server
server_name = '{}{}_{}'.format(ctx.bootstrap_context.resources_prefix,
ctx.deployment.id, ctx.instance.id)
try:
server_uuid = _rp[RPROP_UUID]
except KeyError:
# key_obj = get_resource(fco_api, key_uuid, RT.SSHKEY)
# keys = SSHKey.REQUIRED_ATTRIBS.copy()
# keys.add('resourceUUID')
# submit_key = {}
# for k in keys:
# try:
# submit_key[k] = getattr(manager_key, k)
# except AttributeError:
# submit_key[k] = None
server_uuid = create_server(fco_api, server_po_uuid, image_uuid,
cluster_uuid, vdc_uuid, cpu_count,
ram_amount, boot_disk_po_uuid,
[manager_key], server_name)
_rp[RPROP_UUID] = server_uuid
ctx.logger.info('server_uuid: %s', server_uuid)
server = get_resource(fco_api, server_uuid, RT.SERVER)
server_nics = [nic.resourceUUID for nic in server.nics]
server_keys = [key.resourceUUID for key in server.sshkeys]
# Wait for server to be active
if not wait_for_state(fco_api, server_uuid, enums.ResourceState.ACTIVE,
RT.SERVER):
raise Exception('Server failed to prepare in time!')
ctx.logger.info('Server ACTIVE')
# Add keys
new_keys = set()
for key in public_keys:
if key not in server_keys:
key_uuid = create_ssh_key(fco_api, key, server_name + ' Key')
attach_ssh_key(fco_api, server_uuid, key_uuid)
new_keys.add(key_uuid)
ctx.logger.info('Keys attached: %s', new_keys)
# Create NIC
try:
nic_uuid = _rp[RPROP_NIC]
except KeyError:
nic_uuid = create_nic(fco_api, cluster_uuid, network_type,
network_uuid, vdc_uuid, server_name + ' NIC')
if not wait_for_state(fco_api, nic_uuid, enums.ResourceState.ACTIVE,
RT.NIC):
raise Exception('NIC failed to create in time!')
_rp[RPROP_NIC] = nic_uuid
ctx.logger.info('nic_uuid: %s', nic_uuid)
# Stop server if started
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.STOPPED:
if not stop_server(fco_api, server_uuid):
raise Exception('Stopping server failed to complete in time!')
ctx.logger.info('Server STOPPED')
# Attach NIC
if nic_uuid not in server_nics:
job_uuid = attach_nic(fco_api, server_uuid, nic_uuid, 1).resourceUUID
cond = cobjects.Job.status == enums.JobStatus.SUCCESSFUL
if not wait_for_cond(fco_api, job_uuid, cond, RT.JOB):
raise Exception('Attaching NIC failed to complete in time!')
ctx.logger.info('NICs attached')
else:
ctx.logger.info('NICs already attached')
# Start server if not started
if get_server_status(fco_api, server_uuid) == enums.ServerStatus.STOPPED:
if not start_server(fco_api, server_uuid):
raise Exception('Running server failed to complete in time!')
ctx.logger.info('Server RUNNING')
nic = get_resource(fco_api, nic_uuid, RT.NIC)
server_ip = nic.ipAddresses[0].ipAddress
server_port = 22
ctx.logger.info('Server READY')
username = server.initialUser
password = server.initialPassword
ssh_attempts = -1
ssh_delay = 3
# Fabric test
while ssh_attempts:
ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
try:
with settings(host_string=server_po_uuid, user=username,
password=password, disable_known_hosts=True,
abort_exception=Exception):
run('mkdir ~/.ssh')
run('chmod 0700 ~/.ssh')
for key, key_content in key_contents.items():
remote = os.path.join('~', '.ssh', os.path.basename(key))
run('echo \'{}\' > {}'.format(key_content, remote))
run('chmod 0600 ' + remote)
ctx.logger.info('Done')
break
except Exception as e:
ctx.logger.info(e)
ssh_attempts -= 1
else:
raise Exception('Failed to provision keys in time')
# # Spur test
# while ssh_attempts:
# ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
# shell = spur.SshShell(
# hostname=server_ip,
# port=server_port,
# username=username,
# password=password,
# shell_type=spur.ssh.ShellTypes.minimal,
# missing_host_key=spur.ssh.MissingHostKey.accept
# )
# with shell:
# try:
# ctx.logger.info('Creating & chmoding .ssh')
# shell.run(['mkdir', '~/.ssh'])
# shell.run(['chmod', '0700', '~/.ssh'])
# for key, key_content in key_contents.items():
# ctx.logger.info('Adding private key: ' + remote)
# remote = os.path.join('~', '.ssh', os.path.basename(key))
# shell.run(['echo', "'{}'".format(key_content), '>',
# remote])
# shell.run(['chmod', '0600', remote])
# except spur.ssh.ConnectionError as e:
# if e.original_error[0] not in {errno.ECONNREFUSED,
# errno.EHOSTUNREACH}:
# raise
# sleep(ssh_delay)
# ssh_attempts -= 1
# else:
# raise Exception('Failed to provision keys in time')
# # Provision private keys
# ssh = SSHClient()
# call(['ssh-keygen', '-R', server_ip])
# ssh.set_missing_host_key_policy(AutoAddPolicy())
#
# while ssh_attempts:
# try:
# ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
# ctx.logger.info('SSH Connection details: {}'.format(
# ((server_ip, server_port, username, password, ssh_delay))))
# ssh.connect(server_ip, server_port, username, password,
# timeout=ssh_delay, look_for_keys=False)
# ctx.logger.info('SSH connection established')
# break
# except socket.timeout:
# ssh_attempts -= 1
# except socket.error as e:
# if e[0] not in {errno.ECONNREFUSED, errno.EHOSTUNREACH}:
# ctx.logger.info('SSH connection failed: %s', e[0])
# raise
# sleep(ssh_delay)
# ssh_attempts -= 1
# else:
# raise Exception('Failed to provision keys in time')
# ssh.exec_command('mkdir ~/.ssh')
# ssh.exec_command('chmod 0700 ~/.ssh')
# for key, key_content in key_contents.items():
# remote = os.path.join('~', '.ssh', os.path.basename(key))
# ssh.exec_command('echo \'{}\' > {}'.format(key_content, remote))
# ssh.exec_command('chmod 0600 ' + remote)
_rp[RPROP_UUID] = server_uuid
_rp[RPROP_IP] = server_ip
_rp[RPROP_USER] = username
_rp[RPROP_PASS] = password
server = get_resource(fco_api, server_uuid, RT.SERVER)
_rp[RPROP_DISKS] = [d.resourceUUID for d in server.disks]
_rp[RPROP_NICS] = [n.resourceUUID for n in server.nics]
ctx.logger.info('Server IP: ' + server_ip)
ctx.logger.info('Server User: ' + username)
ctx.logger.info('Server Password: ' + password)
return server_uuid, server_ip, username, password
@operation
@with_fco_api
@with_exceptions_handled
def delete(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
job_uuid = delete_resource(fco_api, server_uuid, RT.SERVER, True) \
.resourceUUID
cond = cobjects.Job.status == enums.JobStatus.SUCCESSFUL
if not wait_for_cond(fco_api, job_uuid, cond, RT.JOB):
raise Exception('Failed to delete server')
@operation
@with_fco_api
@with_exceptions_handled
def start(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.RUNNING:
if not start_server(fco_api, server_uuid):
raise Exception('Could not start server!')
@operation
@with_fco_api
@with_exceptions_handled
def stop(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.STOPPED:
if not stop_server(fco_api, server_uuid):
raise Exception('Could not stop server!')
@operation
@with_fco_api
@with_exceptions_handled
def creation_validation(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
try:
get_resource(fco_api, server_uuid, RT.SERVER)
except Exception:
return False
return True
|
|
#-*- coding: utf-8 -*-
#!/usr/bin/python
'''
Created on 2015/06/08
@author: Administrator
'''
from models import app_user,application
from utils import sftp2,ssh2_deploy
import logging
from datetime import datetime
from time import sleep
today = (datetime.now().date()).strftime('%Y-%m-%d')
def welogic_deploy(data_req):
data_txt_web = []
data_txt_app = []
for data in data_req:
hostname = data['host_name']
username = data['username']
if('web' in hostname):
web_index = username.index('web')
module = username[0:web_index] + '-' + username[web_index:]
password = app_user.objects.all().filter(host_id=data['host_id'],username=data['username']).values('password')[0]['password']
data_txt_web.append({'host_id':data['host_id'],'hostname':hostname,'area':data['area'],'ip':data['ip'],'username':username,'password':password,'module':module,'instance':data['instance'],'primary':'','console_port':data['console_port']})
elif('app' in hostname):
app_index = username.index('app')
module = username[0:app_index] + '-' + username[app_index:]
password = app_user.objects.all().filter(host_id=data['host_id'],username=data['username']).values('password')[0]['password']
data_txt_app.append({'host_id':data['host_id'],'hostname':hostname,'area':data['area'],'ip':data['ip'],'username':username,'password':password,'module':module,'instance':data['instance'],'primary':'','console_port':data['console_port']})
'''
deploy step:
1.write text file
2.copy file to deploy host
3.excute shell scripts
'''
#web server
web_server_count = len(data_txt_web)
if (web_server_count > 0):
for i in range(web_server_count):
if('01' in data_txt_web[i]['area']):
cmd = r''
for info in data_txt_web:
cmd = cmd + info['area'] + ' ' + info['ip'] + ' ' +info['username']+ ' ' + info['password'] + ' ' + info['module'] + ' /app/mw test.war ' + info['instance'] + ' '+ info['console_port'] +' \\n'
cmd = cmd[:-2]
cmd = r'cd /app/'+data_txt_web[i]['username']+'/;echo "'+ cmd + '" > data.txt'
#set primary host for deploy
data_txt_web[i]['primary'] = 'primary'
#var
host_ip = data_txt_web[i]['ip']
username = data_txt_web[i]['username']
password = data_txt_web[i]['password']
module = data_txt_web[i]['module']
application.objects.create(host_id=data_txt_web[i]['host_id'],area=data_txt_web[i]['area'],middleware='weblogic',module=module,username=username,console_port=data_txt_web[i]['console_port'],instance_num=data_txt_web[i]['instance'],package='test.war',primary='primary',create_date=today,status='deployed')
try :
#write data text
ssh2_deploy(host_ip,username,password,cmd)
#put template file to target host
sftp2(host_ip,username,password,'/app/testweb/bak/TEMPLATE.tar','/app/'+username+'/TEMPLATE.tar')
#tar xvf file name and run 3 scripts
cmd_deploy_step1 = r'cd /app/'+username+';tar xf TEMPLATE.tar;source .cshrc;sh APP.sh;'\
'cp test.war /app/'+username+'/deploy/applications/'+module+'/;'\
'cd deploy/scripts;sh 1_envinitall.sh >deploy.log;'
ssh2_deploy(host_ip,username,password,cmd_deploy_step1)
while True:
result = ssh2_deploy(host_ip,username,password, r'cd /app/'+username+'/deploy/scripts;grep setup1 deploy.log')
logging.info(result)
logging.info(len(result))
if (len(result)==web_server_count):
break;
else:
sleep(30)
# web-machinfo.txt version-web-machinfo.txt
cmd_web_machinfo = ''
cmd_version_web_machinfo = ''
for info in data_txt_web:
cmd_web_machinfo = cmd_web_machinfo + info['ip'] + ' ' + info['password'] + ' ' +info['username']+ ' /app/' + info['username'] + '/' + info['module'] +' \\n'
for i in range(int(info['instance'])):
cmd_version_web_machinfo = cmd_version_web_machinfo + info['ip'] + ':' + str(int(info['console_port']) + 1 + i)+' \\n'
cmd_web_machinfo = cmd_web_machinfo[:-2]
cmd_version_web_machinfo = cmd_version_web_machinfo[:-2]
# run DEP.sh for deploy programe
cmd_run_deploy = r'cd /app/'+username+'/; sh DEP.sh;cd deploy_all/;echo "' + cmd_web_machinfo +'" > web-machinfo.txt;echo "' + cmd_version_web_machinfo +'" > version-web-machinfo.txt'
ssh2_deploy(host_ip,username,password,cmd_run_deploy)
#run step 2 and step 3
ssh2_deploy(host_ip,username,password,r'cd /app/'+username+'/deploy/scripts;sh 2_createdomain.sh >>deploy.log;')
while True:
result = ssh2_deploy(host_ip,username,password, r'cd /app/'+username+'/deploy/scripts;grep setup2 deploy.log')
if (len(result)==web_server_count):
break;
else:
sleep(30)
ssh2_deploy(host_ip,username,password,r'cd /app/'+username+'/deploy/scripts;sh 3_deployappall.sh >>deploy.log;')
while True:
result = ssh2_deploy(host_ip,username,password, r'cd /app/'+username+'/deploy/scripts;grep setup3 deploy.log')
if (len(result)==web_server_count):
break;
else:
sleep(60)
continue;
except:
application.objects.all().filter(host_id=data_txt_web[i]['host_id'],username=username).delete()
#app server
app_server_count = len(data_txt_app)
if ( app_server_count > 0):
for i in range(app_server_count):
if('01' in data_txt_app[i]['area']):
cmd = ''
for info in data_txt_app:
cmd = cmd + info['area'] + ' ' + info['ip'] + ' ' +info['username']+ ' ' + info['password'] + ' ' + info['module'] + ' /app/mw test.war ' + info['instance'] +' '+info['console_port'] +' \\n'
cmd = r'cd /app/'+data_txt_app[i]['username']+';echo "'+ cmd + ' " > data.txt'
#set primary host for deploy
data_txt_app[i]['primary'] = 'primary'
#var
host_ip = data_txt_app[i]['ip']
username = data_txt_app[i]['username']
password = data_txt_app[i]['password']
module = data_txt_app[i]['module']
application.objects.create(host_id=data_txt_app[i]['host_id'],area=data_txt_app[i]['area'],middleware='weblogic',module=module,username=username,console_port=data_txt_app[i]['console_port'],instance_num=data_txt_app[i]['instance'],package='test.war',primary='primary',create_date=today,status='deployed')
try:
#write data text
ssh2_deploy(host_ip,username,password,cmd)
#put template file to target host
sftp2(host_ip,username,password,'/app/testweb/bak/TEMPLATE.tar','/app/'+username+'/TEMPLATE.tar')
cmd_deploy = 'cd /app/'+username+';tar xf TEMPLATE.tar;source .cshrc;sh APP.sh;'\
'cp test.war /app/'+username+'/deploy/applications/'+module+'/;'\
'cd deploy/scripts;sh 1_envinitall.sh > deploy.log;'
ssh2_deploy(host_ip,username,password,cmd_deploy)
while True:
result = ssh2_deploy(host_ip,username,password, r'cd /app/'+username+'/deploy/scripts;grep setup1 deploy.log')
logging.info(result)
logging.info(len(result))
if (len(result)==app_server_count):
break;
else:
sleep(30)
# web-machinfo.txt version-web-machinfo.txt
cmd_app_machinfo = ''
cmd_version_app_machinfo = ''
for info in data_txt_web:
cmd_app_machinfo = cmd_app_machinfo + info['ip'] + ' ' + info['password'] + ' ' +info['username']+ ' /app/' + info['username'] + '/' + info['module'] +' \\n'
for i in range(int(info['instance'])):
cmd_version_app_machinfo = cmd_version_app_machinfo + info['ip'] + ':' + str(int(info['console_port']) + 1 + i)+' \\n'
cmd_app_machinfo = cmd_app_machinfo[:-2]
cmd_version_app_machinfo = cmd_version_app_machinfo[:-2]
# run DEP.sh for deploy programe
cmd_run_deploy = r'cd /app/'+username+'/; sh DEP.sh;cd deploy_all/;echo "' + cmd_app_machinfo +'" > app-machinfo.txt;echo "' + cmd_version_app_machinfo +'" > version-app-machinfo.txt'
ssh2_deploy(host_ip,username,password,cmd_run_deploy)
#run step 2 and step 3
ssh2_deploy(host_ip,username,password,r'cd /app/'+username+'/deploy/scripts;sh 2_createdomain.sh >>deploy.log;')
while True:
result = ssh2_deploy(host_ip,username,password, r'cd /app/'+username+'/deploy/scripts;grep setup2 deploy.log')
if (len(result)==app_server_count):
break;
else:
sleep(30)
ssh2_deploy(host_ip,username,password,r'cd /app/'+username+'/deploy/scripts;sh 3_deployappall.sh >>deploy.log;')
while True:
result = ssh2_deploy(host_ip,username,password, r'cd /app/'+username+'/deploy/scripts;grep setup3 deploy.log')
if (len(result)==app_server_count):
break;
else:
sleep(60)
continue;
except:
application.objects.all().filter(host_id=data_txt_app[i]['host_id'],username=username).delete()
logging.info(data_txt_web)
logging.info(data_txt_app)
data_txt = data_txt_web + data_txt_app
logging.info(data_txt)
#input database application
app_data = []
for deploy_info in data_txt:
if(not deploy_info['primary']):
app_data.append(application(host_id=deploy_info['host_id'],area=deploy_info['area'],middleware='weblogic',module=deploy_info['module'],username=deploy_info['username'],console_port=deploy_info['console_port'],instance_num=deploy_info['instance'],package='test.war',primary=deploy_info['primary'],create_date=today,status='deployed'))
application.objects.bulk_create(app_data)
#run startup scripts and check the deploy status
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Bernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
class Bernoulli(distribution.Distribution):
"""Bernoulli distribution.
The Bernoulli distribution is parameterized by p, the probability of a
positive event.
"""
def __init__(self,
logits=None,
p=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Bernoulli"):
"""Construct Bernoulli distributions.
Args:
logits: An N-D `Tensor` representing the log-odds
of a positive event. Each entry in the `Tensor` parametrizes
an independent Bernoulli distribution where the probability of an event
is sigmoid(logits). Only one of `logits` or `p` should be passed in.
p: An N-D `Tensor` representing the probability of a positive
event. Each entry in the `Tensor` parameterizes an independent
Bernoulli distribution. Only one of `logits` or `p` should be passed
in.
dtype: dtype for samples.
validate_args: `Boolean`, default `False`. Whether to validate that
`0 <= p <= 1`. If `validate_args` is `False`, and the inputs are
invalid, methods like `log_pmf` may return `NaN` values.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution.
Raises:
ValueError: If p and logits are passed, or if neither are passed.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name) as ns:
self._logits, self._p = distribution_util.get_logits_and_prob(
logits=logits, p=p, validate_args=validate_args)
with ops.name_scope("q"):
self._q = 1. - self._p
super(Bernoulli, self).__init__(
dtype=dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._p, self._q, self._logits],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def logits(self):
"""Log-odds of success."""
return self._logits
@property
def p(self):
"""Probability of success."""
return self._p
@property
def q(self):
"""1-p."""
return self._q
def _batch_shape(self):
return array_ops.shape(self._logits)
def _get_batch_shape(self):
return self._logits.get_shape()
def _event_shape(self):
return array_ops.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
new_shape = array_ops.concat(0, ([n], self.batch_shape()))
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=self.p.dtype)
sample = math_ops.less(uniform, self.p)
return math_ops.cast(sample, self.dtype)
def _log_prob(self, event):
# TODO(jaana): The current sigmoid_cross_entropy_with_logits has
# inconsistent behavior for logits = inf/-inf.
event = ops.convert_to_tensor(event, name="event")
event = math_ops.cast(event, self.logits.dtype)
logits = self.logits
# sigmoid_cross_entropy_with_logits doesn't broadcast shape,
# so we do this here.
# TODO(b/30637701): Check dynamic shape, and don't broadcast if the
# dynamic shapes are the same.
if (not event.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
event.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(event) * logits
event = array_ops.ones_like(logits) * event
return -nn.sigmoid_cross_entropy_with_logits(logits, event)
def _prob(self, event):
return math_ops.exp(self._log_prob(event))
def _entropy(self):
return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +
nn.softplus(-self.logits))
def _mean(self):
return array_ops.identity(self.p)
def _variance(self):
return self.q * self.p
def _std(self):
return math_ops.sqrt(self._variance())
def _mode(self):
"""Returns `1` if `p > 1-p` and `0` otherwise."""
return math_ops.cast(self.p > self.q, self.dtype)
class BernoulliWithSigmoidP(Bernoulli):
"""Bernoulli with `p = sigmoid(p)`."""
def __init__(self,
p=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="BernoulliWithSigmoidP"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name) as ns:
super(BernoulliWithSigmoidP, self).__init__(
p=nn.sigmoid(p),
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
@kullback_leibler.RegisterKL(Bernoulli, Bernoulli)
def _kl_bernoulli_bernoulli(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.
Args:
a: instance of a Bernoulli distribution object.
b: instance of a Bernoulli distribution object.
name: (optional) Name to use for created operations.
default is "kl_bernoulli_bernoulli".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_bernoulli_bernoulli", [a.logits, b.logits]):
return (math_ops.sigmoid(a.logits) * (-nn.softplus(-a.logits) +
nn.softplus(-b.logits)) +
math_ops.sigmoid(-a.logits) * (-nn.softplus(a.logits) +
nn.softplus(b.logits)))
|
|
# -*- coding: utf-8 -*-
import os
import random
import reversion
import six
import string
import unittest
from datetime import datetime, date
from easy_thumbnails.files import get_thumbnailer
from operator import itemgetter
from random import randint
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files import File as DjangoFile
from django.core.urlresolvers import reverse
from django.db import transaction
from django.test import TransactionTestCase
from django.utils.translation import activate, override, get_language
from aldryn_newsblog.search_indexes import ArticleIndex
from cms import api
from cms.utils import get_cms_setting
from filer.models.imagemodels import Image
from parler.tests.utils import override_parler_settings
from parler.utils.conf import add_default_language_settings
from parler.utils.context import switch_language, smart_override
from aldryn_categories.models import Category
from aldryn_categories.tests import CategoryTestCaseMixin
from aldryn_people.models import Person
from aldryn_search.helpers import get_request
from aldryn_newsblog.models import Article, NewsBlogConfig
from aldryn_newsblog.versioning import create_revision_with_placeholders
from . import TESTS_STATIC_ROOT
FEATURED_IMAGE_PATH = os.path.join(TESTS_STATIC_ROOT, 'featured_image.jpg')
def rand_str(prefix=u'', length=23, chars=string.ascii_letters):
return prefix + u''.join(random.choice(chars) for _ in range(length))
class NewsBlogTestsMixin(CategoryTestCaseMixin):
@staticmethod
def create_user():
return User.objects.create(username=rand_str(), first_name=rand_str(),
last_name=rand_str())
def create_person(self):
return Person.objects.create(user=self.create_user(), slug=rand_str())
def create_article(self, content=None, **kwargs):
try:
author = kwargs['author']
except KeyError:
author = self.create_person()
try:
owner = kwargs['owner']
except KeyError:
owner = author.user
fields = {
'title': rand_str(),
'slug': rand_str(),
'author': author,
'owner': owner,
'app_config': self.app_config,
'publishing_date': datetime.now(),
'is_published': True,
}
fields.update(kwargs)
article = Article.objects.create(**fields)
if content:
api.add_plugin(article.content, 'TextPlugin',
self.language, body=content)
return article
def create_tagged_articles(self, num_articles=3, tags=('tag1', 'tag2')):
"""Create num_articles Articles for each tag"""
articles = {}
for tag_name in tags:
tagged_articles = []
for _ in range(num_articles):
article = self.create_article()
article.save()
article.tags.add(tag_name)
tagged_articles.append(article)
tag_slug = tagged_articles[0].tags.slugs()[0]
articles[tag_slug] = tagged_articles
return articles
def setup_categories(self):
"""Sets-up i18n categories (self.category_root, self.category1 and
self.category2) for use in tests"""
if not self.language:
self.language = settings.LANGUAGES[0][0]
categories = []
# Set the default language, create the objects
with override(self.language):
code = "{0}-".format(self.language)
self.category_root = Category.add_root(
name=rand_str(prefix=code, length=8))
categories.append(self.category_root)
self.category1 = self.category_root.add_child(
name=rand_str(prefix=code, length=8))
categories.append(self.category1)
self.category2 = self.category_root.add_child(
name=rand_str(prefix=code, length=8))
categories.append(self.category2)
# We should reload category_root, since we modified its children.
self.category_root = self.reload(self.category_root)
# Setup the other language(s) translations for the categories
for language, _ in settings.LANGUAGES[1:]:
for category in categories:
with switch_language(category, language):
code = "{0}-".format(language)
category.name = rand_str(prefix=code, length=8)
category.save()
def setUp(self):
self.template = get_cms_setting('TEMPLATES')[0][0]
self.language = settings.LANGUAGES[0][0]
self.root_page = api.create_page(
'root page', self.template, self.language, published=True)
self.app_config = NewsBlogConfig.objects.create(namespace='NBNS')
self.page = api.create_page(
'page', self.template, self.language, published=True,
parent=self.root_page,
apphook='NewsBlogApp',
apphook_namespace=self.app_config.namespace)
self.placeholder = self.page.placeholders.all()[0]
self.request = get_request('en')
self.setup_categories()
for page in self.root_page, self.page:
for language, _ in settings.LANGUAGES[1:]:
api.create_title(language, page.get_slug(), page)
page.publish(language)
class TestAldrynNewsBlog(NewsBlogTestsMixin, TransactionTestCase):
def test_create_article(self):
article = self.create_article()
response = self.client.get(article.get_absolute_url())
self.assertContains(response, article.title)
def test_delete_article(self):
article = self.create_article()
article_pk = article.pk
article_url = article.get_absolute_url()
response = self.client.get(article_url)
self.assertContains(response, article.title)
Article.objects.get(pk=article_pk).delete()
response = self.client.get(article_url)
self.assertEqual(response.status_code, 404)
def test_article_not_published(self):
article = self.create_article(is_published=False)
response = self.client.get(article.get_absolute_url())
self.assertEqual(response.status_code, 404)
def test_articles_list(self):
articles = [self.create_article() for _ in range(10)]
unpublished_article = articles[0]
unpublished_article.is_published = False
unpublished_article.save()
response = self.client.get(
reverse('aldryn_newsblog:article-list'))
for article in articles[1:]:
self.assertContains(response, article.title)
self.assertNotContains(response, unpublished_article.title)
def test_articles_list_pagination(self):
paginate_by = settings.ALDRYN_NEWSBLOG_PAGINATE_BY
articles = [
self.create_article(publishing_date=datetime(
2000 - i, 1, 1, 1, 1)) for i in range(paginate_by + 5)]
response = self.client.get(
reverse('aldryn_newsblog:article-list'))
for article in articles[:paginate_by]:
self.assertContains(response, article.title)
for article in articles[paginate_by:]:
self.assertNotContains(response, article.title)
response = self.client.get(
reverse('aldryn_newsblog:article-list') + '?page=2')
for article in articles[:paginate_by]:
self.assertNotContains(response, article.title)
for article in articles[paginate_by:]:
self.assertContains(response, article.title)
def test_articles_by_author(self):
author1, author2 = self.create_person(), self.create_person()
for author in (author1, author2):
articles = [
self.create_article(author=author) for _ in range(10)]
response = self.client.get(reverse(
'aldryn_newsblog:article-list-by-author',
kwargs={'author': author.slug}))
for article in articles:
self.assertContains(response, article.title)
def test_articles_by_category(self):
"""Tests that we can find articles by their categories, in ANY of the
languages they are translated to"""
author = self.create_person()
for category in (self.category1, self.category2):
articles = []
code = "{0}-".format(self.language)
for _ in range(10):
article = Article.objects.create(
title=rand_str(), slug=rand_str(prefix=code),
app_config=self.app_config,
author=author, owner=author.user,
publishing_date=datetime.now())
# Make sure there are translations in place for the articles.
for language, _ in settings.LANGUAGES[1:]:
with switch_language(article, language):
code = "{0}-".format(language)
article.title = rand_str(prefix=code)
article.save()
article.categories.add(category)
articles.append(article)
for language, _ in settings.LANGUAGES:
with switch_language(category, language):
url = reverse('aldryn_newsblog:article-list-by-category',
kwargs={'category': category.slug})
response = self.client.get(url)
for article in articles:
if language in article.get_available_languages():
article.set_current_language(language)
self.assertContains(response, article.title)
else:
article.set_current_language(language)
self.assertNotContains(response, article.title)
def test_article_detail_not_translated_fallback(self):
"""
If the fallback is configured, article is available in any (configured) language
"""
author = self.create_person()
code = "{0}-".format(self.language)
article = Article.objects.create(
title=rand_str(), slug=rand_str(prefix=code),
app_config=self.app_config,
author=author, owner=author.user,
publishing_date=datetime.now())
article.save()
article.categories.add(self.category1)
# current language - it still exists
article = Article.objects.get(pk=article.pk)
language = settings.LANGUAGES[0][0]
with switch_language(self.category1, language):
url = reverse('aldryn_newsblog:article-detail',
kwargs={'slug': article.slug})
response = self.client.get(url)
self.assertContains(response, article.title)
# non existing language - it still exists
language = settings.LANGUAGES[1][0]
with switch_language(self.category1, language):
url = reverse('aldryn_newsblog:article-detail',
kwargs={'slug': article.slug})
response = self.client.get(url)
self.assertContains(response, article.title)
def test_article_detail_not_translated_no_fallback(self):
"""
If the fallback is disabled, article is available only in the
language in which is translated
"""
author = self.create_person()
code = "{0}-".format(self.language)
article = Article.objects.create(
title=rand_str(), slug=rand_str(prefix=code),
app_config=self.app_config,
author=author, owner=author.user,
publishing_date=datetime.now())
article.save()
article.categories.add(self.category1)
PARLER_LANGUAGES = {
1: (
{'code': 'de'},
{'code': 'fr'},
{'code': 'en'},
),
'default': {
'hide_untranslated': True,
}
}
LANGUAGES = add_default_language_settings(PARLER_LANGUAGES)
with override_parler_settings(PARLER_LANGUAGES=LANGUAGES):
# current language - it still exists
article = Article.objects.get(pk=article.pk)
language = settings.LANGUAGES[0][0]
with switch_language(self.category1, language):
url = reverse('aldryn_newsblog:article-detail',
kwargs={'slug': article.slug})
response = self.client.get(url)
self.assertContains(response, article.title)
# non existing language - it still exists
language = settings.LANGUAGES[1][0]
with switch_language(self.category1, language):
url = reverse('aldryn_newsblog:article-detail',
kwargs={'slug': article.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_article_detail_show_featured_image(self):
author = self.create_person()
with open(FEATURED_IMAGE_PATH, 'rb') as f:
file_obj = DjangoFile(f, name='featured_image.jpg')
image = Image.objects.create(owner=author.user,
original_filename='featured_image.jpg',
file=file_obj,
subject_location='fooobar')
article = self.create_article(author=author, featured_image=image)
response = self.client.get(article.get_absolute_url())
image_url = get_thumbnailer(article.featured_image).get_thumbnail({
'size': (800, 300),
'crop': True,
'subject_location': article.featured_image.subject_location
}).url
self.assertContains(response, image_url)
def test_articles_by_tag(self):
"""
Tests that TagArticleList view properly filters articles by their tags.
This uses ANY of the languages articles are translated to.
"""
untagged_articles = []
for _ in range(5):
article = self.create_article()
untagged_articles.append(article)
articles = self.create_tagged_articles(
3, tags=(rand_str(), rand_str()))
# tags are created in previous loop on demand, we need their slugs
tag_slug1, tag_slug2 = articles.keys()
url = reverse('aldryn_newsblog:article-list-by-tag',
kwargs={'tag': tag_slug2})
response = self.client.get(url)
for article in articles[tag_slug2]:
self.assertContains(response, article.title)
for article in articles[tag_slug1]:
self.assertNotContains(response, article.title)
for article in untagged_articles:
self.assertNotContains(response, article.title)
def test_articles_count_by_month(self):
months = [
{'date': date(1914, 7, 3), 'num_entries': 1},
{'date': date(1914, 8, 3), 'num_entries': 3},
{'date': date(1945, 9, 3), 'num_entries': 5},
]
for month in months:
for _ in range(month['num_entries']):
self.create_article(publishing_date=month['date'])
self.assertEquals(
sorted(
Article.objects.get_months(
namespace=self.app_config.namespace),
key=itemgetter('num_entries')),
months)
def test_articles_count_by_author(self):
authors = []
for num_entries in [1, 3, 5]:
person = self.create_person()
person.num_entries = num_entries
authors.append((person, num_entries))
for i, data in enumerate(authors):
for _ in range(data[1]):
self.create_article(author=data[0])
# replace author with it's pk, as we need it to easily compare
authors[i] = (data[0].pk, data[1])
self.assertEquals(
sorted(
Article.objects.get_authors(
namespace=self.app_config.namespace).values_list(
'pk', 'num_entries'),
key=itemgetter(1)),
authors)
def test_articles_count_by_tags(self):
untagged_articles = []
for _ in range(5):
article = self.create_article()
untagged_articles.append(article)
# Tag objects are created on attaching tag name to Article,
# so this looks not very DRY
tag_names = ('tag foo', 'tag bar', 'tag buzz')
tag_slug1 = self.create_tagged_articles(
1, tags=(tag_names[0],)).keys()[0]
tag_slug2 = self.create_tagged_articles(
3, tags=(tag_names[1],)).keys()[0]
tag_slug3 = self.create_tagged_articles(
5, tags=(tag_names[2],)).keys()[0]
tags_expected = [
(tag_slug3, 5),
(tag_slug2, 3),
(tag_slug1, 1),
]
tags = Article.objects.get_tags(namespace=self.app_config.namespace)
tags = map(lambda x: (x.slug, x.num_entries), tags)
self.assertEquals(tags, tags_expected)
def test_articles_by_date(self):
in_articles = [
self.create_article(
publishing_date=datetime(
1914, 7, 28, randint(0, 23), randint(0, 59)))
for _ in range(10)]
out_articles = [
self.create_article(
publishing_date=datetime(
1939, 9, 1, randint(0, 23), randint(0, 59)))
for _ in range(10)]
response = self.client.get(reverse(
'aldryn_newsblog:article-list-by-day',
kwargs={'year': '1914', 'month': '07', 'day': '28'}))
for article in out_articles:
self.assertNotContains(response, article.title)
for article in in_articles:
self.assertContains(response, article.title)
def test_articles_by_month(self):
in_articles = [
self.create_article(
publishing_date=datetime(
1914, 7, randint(1, 31), randint(0, 23), randint(0, 59)))
for _ in range(10)]
out_articles = [
self.create_article(
publishing_date=datetime(
1939, 9, 1, randint(0, 23), randint(0, 59)))
for _ in range(10)]
response = self.client.get(reverse(
'aldryn_newsblog:article-list-by-month',
kwargs={'year': '1914', 'month': '07'}))
for article in out_articles:
self.assertNotContains(response, article.title)
for article in in_articles:
self.assertContains(response, article.title)
def test_articles_by_year(self):
in_articles = [
self.create_article(
publishing_date=datetime(
1914, randint(1, 12), randint(1, 28),
randint(0, 23), randint(0, 59)))
for _ in range(10)]
out_articles = [
self.create_article(
publishing_date=datetime(
1939, randint(1, 12), randint(1, 28),
randint(0, 23), randint(0, 59)))
for _ in range(10)]
response = self.client.get(reverse(
'aldryn_newsblog:article-list-by-year', kwargs={'year': '1914'}))
for article in out_articles:
self.assertNotContains(response, article.title)
for article in in_articles:
self.assertContains(response, article.title)
def test_has_content(self):
# Just make sure we have a known language
activate(self.language)
title = rand_str()
content = rand_str()
author = self.create_person()
article = Article.objects.create(
title=title, slug=rand_str(), author=author, owner=author.user,
app_config=self.app_config, publishing_date=datetime.now())
article.save()
api.add_plugin(article.content, 'TextPlugin', self.language)
plugin = article.content.get_plugins()[0].get_plugin_instance()[0]
plugin.body = content
plugin.save()
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title)
self.assertContains(response, content)
def test_unattached_namespace(self):
# create a new namespace that has no corresponding blog app page
app_config = NewsBlogConfig.objects.create(namespace='another')
articles = [self.create_article(app_config=app_config)
for _ in range(10)]
response = self.client.get(articles[0].get_absolute_url())
self.assertEqual(response.status_code, 404)
response = self.client.get(
reverse('aldryn_newsblog:article-list'))
for article in articles:
self.assertNotContains(response, article.title)
def test_auto_slugifies(self):
activate(self.language)
title = u'This is a title'
author = self.create_person()
article = Article.objects.create(
title=title, author=author, owner=author.user,
app_config=self.app_config, publishing_date=datetime.now())
article.save()
self.assertEquals(article.slug, 'this-is-a-title')
# Now, let's try another with the same title
article.id = None
# Note, it cannot be the exact same title, else we'll fail the unique
# constraint on the field.
article.title = title.lower()
article.save()
# Note that this should be "incremented" slug here.
self.assertEquals(article.slug, 'this-is-a-title_1')
article.id = None
article.title = title.upper()
article.save()
self.assertEquals(article.slug, 'this-is-a-title_2')
def test_auto_existing_author(self):
author = self.create_person()
article = Article.objects.create(
title=rand_str(), owner=author.user,
app_config=self.app_config, publishing_date=datetime.now())
article.save()
self.assertEquals(article.author.user, article.owner)
with self.settings(ALDRYN_NEWSBLOG_CREATE_AUTHOR=False):
article = Article.objects.create(
title=rand_str(), owner=author.user,
app_config=self.app_config, publishing_date=datetime.now())
self.assertEquals(article.author, None)
def test_auto_new_author(self):
user = self.create_user()
article = Article.objects.create(
title=rand_str(), owner=user,
app_config=self.app_config, publishing_date=datetime.now())
article.save()
self.assertEquals(article.author.name,
u' '.join((user.first_name, user.last_name)))
def test_latest_entries_plugin(self):
page = api.create_page(
'plugin page', self.template, self.language,
parent=self.root_page, published=True)
placeholder = page.placeholders.all()[0]
api.add_plugin(placeholder, 'LatestEntriesPlugin', self.language,
app_config=self.app_config, latest_entries=7)
plugin = placeholder.get_plugins()[0].get_plugin_instance()[0]
plugin.save()
page.publish(self.language)
articles = [self.create_article() for _ in range(7)]
another_app_config = NewsBlogConfig.objects.create(namespace='another')
another_articles = [self.create_article(app_config=another_app_config)
for _ in range(3)]
response = self.client.get(page.get_absolute_url())
for article in articles:
self.assertContains(response, article.title)
for article in another_articles:
self.assertNotContains(response, article.title)
def test_index_simple(self):
self.index = ArticleIndex()
content0 = rand_str(prefix='content0_')
self.setup_categories()
article = self.create_article(content=content0, lead_in='lead in text',
title='a title')
article.categories.add()
for tag_name in ('tag 1', 'tag2'):
article.tags.add(tag_name)
for category in (self.category1, self.category2):
article.categories.add(category)
self.assertEqual(self.index.get_title(article), 'a title')
self.assertEqual(self.index.get_description(article), 'lead in text')
self.assertTrue('lead in text' in self.index.get_search_data(article, 'en', self.request))
self.assertTrue(content0 in self.index.get_search_data(article, 'en', self.request))
self.assertTrue('tag 1' in self.index.get_search_data(article, 'en', self.request))
self.assertTrue(self.category1.name in self.index.get_search_data(article, 'en', self.request))
def test_index_multilingual(self):
self.index = ArticleIndex()
content0 = rand_str(prefix='content0_')
self.setup_categories()
article_1 = self.create_article(content=content0, lead_in=u'lead in text',
title=u'a title')
article_2 = self.create_article(content=content0, lead_in=u'lead in text',
title=u'second title')
for article in (article_1, article_2):
for tag_name in ('tag 1', 'tag2'):
article.tags.add(tag_name)
for category in (self.category1, self.category2):
article.categories.add(category)
with switch_language(article_2, 'de'):
article_2.title = u'de title'
article_2.lead_in = u'de lead in'
article_2.save()
PARLER_LANGUAGES = {
1: (
{'code': 'de', },
{'code': 'fr', },
{'code': 'en', },
),
'default': {
'hide_untranslated': True,
}
}
LANGUAGES = add_default_language_settings(PARLER_LANGUAGES)
with override_parler_settings(PARLER_LANGUAGES=LANGUAGES):
with smart_override('de'):
language = get_language()
# english-only article is excluded
qs = self.index.index_queryset(language)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs.translated(language, title__icontains='title').count(), 1)
# the language is correctly setup
for article_de in qs:
self.assertEqual(self.index.get_title(article_de), 'de title')
self.assertEqual(self.index.get_description(article_de), 'de lead in')
class TestVersioning(NewsBlogTestsMixin, TransactionTestCase):
def create_revision(self, article, content=None, language=None, **kwargs):
with transaction.atomic():
with reversion.create_revision():
for k, v in six.iteritems(kwargs):
setattr(article, k, v)
if content:
plugins = article.content.get_plugins()
plugin = plugins[0].get_plugin_instance()[0]
plugin.body = content
plugin.save()
# TODO: Cover both cases (plugin modification/recreation)
# if content:
# article.content.get_plugins().delete()
# api.add_plugin(article.content, 'TextPlugin',
# self.language, body=content)
article.save()
def revert_to(self, article, revision):
reversion.get_for_object(article)[revision].revision.revert()
def test_revert_revision(self):
title1 = rand_str(prefix='title1_')
title2 = rand_str(prefix='title2_')
content0 = rand_str(prefix='content0_')
content1 = rand_str(prefix='content1_')
content2 = rand_str(prefix='content2_')
article = self.create_article(content=content0)
# Revision 1
self.create_revision(article, title=title1, content=content1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1)
self.assertContains(response, content1)
self.assertNotContains(response, content0)
# Revision 2
self.create_revision(article, title=title2, content=content2)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2)
self.assertContains(response, content2)
self.assertNotContains(response, content1)
# Revert to revision 1
self.revert_to(article, 1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1)
self.assertContains(response, content1)
self.assertNotContains(response, content0)
self.assertNotContains(response, content2)
def test_revert_translated_revision(self):
title1_en = rand_str(prefix='title1_en_')
title1_de = rand_str(prefix='title1_de_')
title2_en = rand_str(prefix='title2_en_')
title2_de = rand_str(prefix='title2_de_')
article = self.create_article()
# Revision 1
article.set_current_language('en')
self.create_revision(article, title=title1_en)
article.set_current_language('de')
self.create_revision(article, title=title1_de)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revision 2a (modify just EN)
article.set_current_language('en')
self.create_revision(article, title=title2_en)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revision 2b (modify just DE)
article.set_current_language('de')
self.create_revision(article, title=title2_de)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_de)
# Revert to revision 2a (EN=2, DE=1)
self.revert_to(article, 1)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revert to revision 1 (EN=1, DE=1)
self.revert_to(article, 2)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
def test_edit_plugin_directly(self):
content0 = rand_str(prefix='content0_')
content1 = rand_str(prefix='content1_')
content2 = rand_str(prefix='content2_')
article = self.create_article(content=content0)
# Revision 1
self.create_revision(article, content=content1)
self.assertEqual(
len(reversion.get_for_object(article)), 1)
# Revision 2
with transaction.atomic():
with reversion.create_revision():
plugins = article.content.get_plugins()
plugin = plugins[0].get_plugin_instance()[0]
plugin.body = content2
plugin.save()
create_revision_with_placeholders(article)
self.assertEqual(
len(reversion.get_for_object(article)), 2)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, content2)
self.assertNotContains(response, content1)
# Revert to revision 1
self.revert_to(article, 1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, content1)
self.assertNotContains(response, content2)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.