repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
AloneRoad/Inforlearn | django/contrib/admin/widgets.py | 3 | 11700 | """
Form Widget classes specific to the Django admin site.
"""
import django.utils.copycompat as copy
from django import forms
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/core.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js")
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append(u'<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % \
(name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), settings.ADMIN_MEDIA_PREFIX))
return mark_safe(u''.join(output))
class AdminDateWidget(forms.TextInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}):
super(AdminDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'})
class AdminTimeWidget(forms.TextInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}):
super(AdminTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'})
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
)
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.FileInput):
"""
A FileField Widget that shows its current value if it has one.
"""
def __init__(self, attrs={}):
super(AdminFileWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, "url"):
output.append('%s <a target="_blank" href="%s">%s</a> <br />%s ' % \
(_('Currently:'), value.url, value, _('Change:')))
output.append(super(AdminFileWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, attrs=None):
self.rel = rel
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = '../../../%s/%s/' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name.lower())
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if not attrs.has_key('class'):
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)]
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append('<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
if value:
output.append(self.label_for_value(value))
return mark_safe(u''.join(output))
def base_url_parameters(self):
params = {}
if self.rel.limit_choices_to and hasattr(self.rel.limit_choices_to, 'items'):
items = []
for k, v in self.rel.limit_choices_to.items():
if isinstance(v, list):
v = ','.join([str(x) for x in v])
else:
v = str(v)
items.append((k, v))
params.update(dict(items))
return params
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.get(**{key: value})
except self.rel.to.DoesNotExist:
return ''
return ' <strong>%s</strong>' % escape(truncate_words(obj, 14))
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def __init__(self, rel, attrs=None):
super(ManyToManyRawIdWidget, self).__init__(rel, attrs)
def render(self, name, value, attrs=None):
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([str(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value and ',' in value:
return data[name].split(',')
if value:
return [value]
return None
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for pk1, pk2 in zip(initial, data):
if force_unicode(pk1) != force_unicode(pk2):
return True
return False
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
def _media(self):
return self.widget.media
media = property(_media)
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.object_name.lower())
try:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
except NoReverseMatch:
info = (self.admin_site.root_path, rel_to._meta.app_label, rel_to._meta.object_name.lower())
related_url = '%s%s/%s/add/' % info
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if rel_to in self.admin_site._registry: # If the related object has an admin interface:
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(related_url, name))
output.append(u'<img src="%simg/admin/icon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Add Another')))
return mark_safe(u''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
return self.widget._has_changed(initial, data)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| apache-2.0 |
coreos/peewee | playhouse/tests/test_query_results.py | 6 | 56317 | import sys
from peewee import ModelQueryResultWrapper
from peewee import NaiveQueryResultWrapper
from playhouse.tests.base import ModelTestCase
from playhouse.tests.base import test_db
from playhouse.tests.models import *
class TestQueryResultWrapper(ModelTestCase):
requires = [User, Blog, Comment]
def test_iteration(self):
User.create_users(10)
with self.assertQueryCount(1):
sq = User.select()
qr = sq.execute()
first_five = []
for i, u in enumerate(qr):
first_five.append(u.username)
if i == 4:
break
self.assertEqual(first_five, ['u1', 'u2', 'u3', 'u4', 'u5'])
another_iter = [u.username for u in qr]
self.assertEqual(another_iter, ['u%d' % i for i in range(1, 11)])
another_iter = [u.username for u in qr]
self.assertEqual(another_iter, ['u%d' % i for i in range(1, 11)])
def test_iteration_protocol(self):
User.create_users(3)
with self.assertQueryCount(1):
query = User.select().order_by(User.id)
qr = query.execute()
for user in qr:
pass
self.assertRaises(StopIteration, next, qr)
self.assertEqual([u.username for u in qr], ['u1', 'u2', 'u3'])
self.assertEqual(query[0].username, 'u1')
self.assertEqual(query[2].username, 'u3')
self.assertRaises(StopIteration, next, qr)
def test_iterator(self):
User.create_users(10)
with self.assertQueryCount(1):
qr = User.select().order_by(User.id).execute()
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, ['u%d' % i for i in range(1, 11)])
self.assertTrue(qr._populated)
self.assertEqual(qr._result_cache, [])
with self.assertQueryCount(0):
again = [u.username for u in qr]
self.assertEqual(again, [])
with self.assertQueryCount(1):
qr = User.select().where(User.username == 'xxx').execute()
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, [])
def test_iterator_query_method(self):
User.create_users(10)
with self.assertQueryCount(1):
qr = User.select().order_by(User.id)
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, ['u%d' % i for i in range(1, 11)])
with self.assertQueryCount(0):
again = [u.username for u in qr]
self.assertEqual(again, [])
def test_iterator_extended(self):
User.create_users(10)
for i in range(1, 4):
for j in range(i):
Blog.create(
title='blog-%s-%s' % (i, j),
user=User.get(User.username == 'u%s' % i))
qr = (User
.select(
User.username,
fn.Count(Blog.pk).alias('ct'))
.join(Blog)
.where(User.username << ['u1', 'u2', 'u3'])
.group_by(User)
.order_by(User.id)
.naive())
accum = []
with self.assertQueryCount(1):
for user in qr.iterator():
accum.append((user.username, user.ct))
self.assertEqual(accum, [
('u1', 1),
('u2', 2),
('u3', 3)])
qr = (User
.select(fn.Count(User.id).alias('ct'))
.group_by(User.username << ['u1', 'u2', 'u3'])
.order_by(fn.Count(User.id).desc()))
accum = []
with self.assertQueryCount(1):
for ct, in qr.tuples().iterator():
accum.append(ct)
self.assertEqual(accum, [7, 3])
def test_fill_cache(self):
def assertUsernames(qr, n):
self.assertEqual([u.username for u in qr._result_cache], ['u%d' % i for i in range(1, n+1)])
User.create_users(20)
with self.assertQueryCount(1):
qr = User.select().execute()
qr.fill_cache(5)
self.assertFalse(qr._populated)
assertUsernames(qr, 5)
# a subsequent call will not "over-fill"
qr.fill_cache(5)
self.assertFalse(qr._populated)
assertUsernames(qr, 5)
# ask for one more and ye shall receive
qr.fill_cache(6)
self.assertFalse(qr._populated)
assertUsernames(qr, 6)
qr.fill_cache(21)
self.assertTrue(qr._populated)
assertUsernames(qr, 20)
self.assertRaises(StopIteration, next, qr)
def test_select_related(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
c11 = Comment.create(blog=b1, comment='c11')
c12 = Comment.create(blog=b1, comment='c12')
c21 = Comment.create(blog=b2, comment='c21')
c22 = Comment.create(blog=b2, comment='c22')
# missing comment.blog_id
comments = (Comment
.select(Comment.id, Comment.comment, Blog.pk, Blog.title)
.join(Blog)
.where(Blog.title == 'b1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.blog.title for c in comments], ['b1', 'b1'])
# missing blog.pk
comments = (Comment
.select(Comment.id, Comment.comment, Comment.blog, Blog.title)
.join(Blog)
.where(Blog.title == 'b2')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.blog.title for c in comments], ['b2', 'b2'])
# both but going up 2 levels
comments = (Comment
.select(Comment, Blog, User)
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.comment for c in comments], ['c11', 'c12'])
self.assertEqual([c.blog.title for c in comments], ['b1', 'b1'])
self.assertEqual([c.blog.user.username for c in comments], ['u1', 'u1'])
self.assertTrue(isinstance(comments._qr, ModelQueryResultWrapper))
comments = (Comment
.select()
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(5):
self.assertEqual([c.blog.user.username for c in comments], ['u1', 'u1'])
self.assertTrue(isinstance(comments._qr, NaiveQueryResultWrapper))
# Go up two levels and use aliases for the joined instances.
comments = (Comment
.select(Comment, Blog, User)
.join(Blog, on=(Comment.blog == Blog.pk).alias('bx'))
.join(User, on=(Blog.user == User.id).alias('ux'))
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.comment for c in comments], ['c11', 'c12'])
self.assertEqual([c.bx.title for c in comments], ['b1', 'b1'])
self.assertEqual([c.bx.ux.username for c in comments], ['u1', 'u1'])
def test_naive(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
users = User.select().naive()
self.assertEqual([u.username for u in users], ['u1', 'u2'])
self.assertTrue(isinstance(users._qr, NaiveQueryResultWrapper))
users = User.select(User, Blog).join(Blog).naive()
self.assertEqual([u.username for u in users], ['u1', 'u2'])
self.assertEqual([u.title for u in users], ['b1', 'b2'])
query = Blog.select(Blog, User).join(User).order_by(Blog.title).naive()
self.assertEqual(query.get().user, User.get(User.username == 'u1'))
def test_tuples_dicts(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
users = User.select().tuples().order_by(User.id)
self.assertEqual([r for r in users], [
(u1.id, 'u1'),
(u2.id, 'u2'),
])
users = User.select().dicts()
self.assertEqual([r for r in users], [
{'id': u1.id, 'username': 'u1'},
{'id': u2.id, 'username': 'u2'},
])
users = User.select(User, Blog).join(Blog).order_by(User.id).tuples()
self.assertEqual([r for r in users], [
(u1.id, 'u1', b1.pk, u1.id, 'b1', '', None),
(u2.id, 'u2', b2.pk, u2.id, 'b2', '', None),
])
users = User.select(User, Blog).join(Blog).order_by(User.id).dicts()
self.assertEqual([r for r in users], [
{'id': u1.id, 'username': 'u1', 'pk': b1.pk, 'user': u1.id, 'title': 'b1', 'content': '', 'pub_date': None},
{'id': u2.id, 'username': 'u2', 'pk': b2.pk, 'user': u2.id, 'title': 'b2', 'content': '', 'pub_date': None},
])
def test_slicing_dicing(self):
def assertUsernames(users, nums):
self.assertEqual([u.username for u in users], ['u%d' % i for i in nums])
User.create_users(10)
with self.assertQueryCount(1):
uq = User.select().order_by(User.id)
for i in range(2):
res = uq[0]
self.assertEqual(res.username, 'u1')
with self.assertQueryCount(0):
for i in range(2):
res = uq[1]
self.assertEqual(res.username, 'u2')
with self.assertQueryCount(0):
for i in range(2):
res = uq[:3]
assertUsernames(res, [1, 2, 3])
with self.assertQueryCount(0):
for i in range(2):
res = uq[2:5]
assertUsernames(res, [3, 4, 5])
with self.assertQueryCount(0):
for i in range(2):
res = uq[5:]
assertUsernames(res, [6, 7, 8, 9, 10])
self.assertRaises(IndexError, uq.__getitem__, 10)
self.assertRaises(ValueError, uq.__getitem__, -1)
with self.assertQueryCount(0):
res = uq[10:]
self.assertEqual(res, [])
def test_indexing_fill_cache(self):
def assertUser(query_or_qr, idx):
self.assertEqual(query_or_qr[idx].username, 'u%d' % (idx + 1))
User.create_users(10)
uq = User.select().order_by(User.id)
with self.assertQueryCount(1):
# Ensure we can grab the first 5 users in 1 query.
for i in range(5):
assertUser(uq, i)
# Iterate in reverse and ensure only costs 1 query.
uq = User.select().order_by(User.id)
with self.assertQueryCount(1):
for i in reversed(range(10)):
assertUser(uq, i)
# Execute the query and get reference to result wrapper.
query = User.select().order_by(User.id)
query.execute()
qr = query._qr
# Getting the first user will populate the result cache with 1 obj.
assertUser(query, 0)
self.assertEqual(len(qr._result_cache), 1)
# Getting the last user will fill the cache.
assertUser(query, 9)
self.assertEqual(len(qr._result_cache), 10)
def test_prepared(self):
for i in range(2):
u = User.create(username='u%d' % i)
for j in range(2):
Blog.create(title='b%d-%d' % (i, j), user=u, content='')
for u in User.select():
# check prepared was called
self.assertEqual(u.foo, u.username)
for b in Blog.select(Blog, User).join(User):
# prepared is called for select-related instances
self.assertEqual(b.foo, b.title)
self.assertEqual(b.user.foo, b.user.username)
def test_aliasing_values(self):
User.create_users(2)
q = User.select(User.username.alias('xx')).order_by(User.username)
results = [row for row in q.dicts()]
self.assertEqual(results, [
{'xx': 'u1'},
{'xx': 'u2'}])
results = [user.xx for user in q]
self.assertEqual(results, ['u1', 'u2'])
# Force ModelQueryResultWrapper.
q = (User
.select(User.username.alias('xx'), Blog.pk)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username))
results = [user.xx for user in q]
self.assertEqual(results, ['u1', 'u2'])
# Use Model and Field aliases.
UA = User.alias()
q = (User
.select(
User.username.alias('x'),
UA.username.alias('y'))
.join(UA, on=(User.id == UA.id).alias('z'))
.order_by(User.username))
results = [(user.x, user.z.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
q = q.naive()
results = [(user.x, user.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
uq = User.select(User.id, User.username).alias('u2')
q = (User
.select(
User.username.alias('x'),
uq.c.username.alias('y'))
.join(uq, on=(User.id == uq.c.id))
.order_by(User.username))
results = [(user.x, user.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
class TestJoinedInstanceConstruction(ModelTestCase):
requires = [Blog, User]
def setUp(self):
super(TestJoinedInstanceConstruction, self).setUp()
u1 = User.create(username='u1')
u2 = User.create(username='u2')
Blog.create(user=u1, title='b1')
Blog.create(user=u2, title='b2')
def test_fk_missing_pk(self):
# Not enough information.
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username)
.join(User)
.order_by(Blog.title, User.username))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertIsNone(blog.user.id)
self.assertIsNone(blog.user_id)
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_fk_with_pk(self):
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username, User.id)
.join(User)
.order_by(Blog.title, User.username))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertIsNotNone(blog.user.id)
self.assertIsNotNone(blog.user_id)
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_backref_missing_pk(self):
with self.assertQueryCount(1):
q = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username, Blog.title))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertIsNone(user.id)
self.assertIsNone(user.blog.pk)
self.assertIsNone(user.blog.user_id)
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
def test_fk_join_expr(self):
with self.assertQueryCount(1):
q = (User
.select(User.username, Blog.title)
.join(Blog, on=(User.id == Blog.user).alias('bx'))
.order_by(User.username))
results = []
for user in q:
results.append((user.username, user.bx.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username)
.join(User, on=(Blog.user == User.id).alias('ux'))
.order_by(Blog.title))
results = []
for blog in q:
results.append((blog.title, blog.ux.username))
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_aliases(self):
B = Blog.alias()
U = User.alias()
with self.assertQueryCount(1):
q = (U.select(U.username, B.title)
.join(B, on=(U.id == B.user))
.order_by(U.username))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
with self.assertQueryCount(1):
q = (B.select(B.title, U.username)
.join(U, on=(B.user == U.id))
.order_by(B.title))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_subqueries(self):
uq = User.select()
bq = Blog.select(Blog.title, Blog.user).alias('bq')
with self.assertQueryCount(1):
q = (User
.select(User, bq.c.title.bind_to(Blog))
.join(bq, on=(User.id == bq.c.user_id).alias('blog'))
.order_by(User.username))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
class TestQueryResultTypeConversion(ModelTestCase):
requires = [User]
def setUp(self):
super(TestQueryResultTypeConversion, self).setUp()
for i in range(3):
User.create(username='u%d' % i)
def assertNames(self, query, expected, attr='username'):
id_field = query.model_class.id
self.assertEqual(
[getattr(item, attr) for item in query.order_by(id_field)],
expected)
def test_simple_select(self):
query = UpperUser.select()
self.assertNames(query, ['U0', 'U1', 'U2'])
query = User.select()
self.assertNames(query, ['u0', 'u1', 'u2'])
def test_with_alias(self):
# Even when aliased to a different attr, the column is coerced.
query = UpperUser.select(UpperUser.username.alias('foo'))
self.assertNames(query, ['U0', 'U1', 'U2'], 'foo')
def test_scalar(self):
max_username = (UpperUser
.select(fn.Max(UpperUser.username))
.scalar(convert=True))
self.assertEqual(max_username, 'U2')
max_username = (UpperUser
.select(fn.Max(UpperUser.username))
.scalar())
self.assertEqual(max_username, 'u2')
def test_function(self):
substr = fn.SubStr(UpperUser.username, 1, 3)
# Being the first parameter of the function, it meets the special-case
# criteria.
query = UpperUser.select(substr.alias('foo'))
self.assertNames(query, ['U0', 'U1', 'U2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('foo'))
self.assertNames(query, ['u0', 'u1', 'u2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('username'))
self.assertNames(query, ['u0', 'u1', 'u2'])
query = UpperUser.select(fn.Lower(UpperUser.username).alias('username'))
self.assertNames(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.Lower(UpperUser.username).alias('username').coerce(False))
self.assertNames(query, ['u0', 'u1', 'u2'])
# Since it is aliased to an existing column, we will use that column's
# coerce.
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('username'))
self.assertNames(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('foo'))
self.assertNames(query, ['u0', 'u1', 'u2'], 'foo')
class TestModelQueryResultWrapper(ModelTestCase):
requires = [TestModelA, TestModelB, TestModelC, User, Blog]
data = (
(TestModelA, (
('pk1', 'a1'),
('pk2', 'a2'),
('pk3', 'a3'))),
(TestModelB, (
('pk1', 'b1'),
('pk2', 'b2'),
('pk3', 'b3'))),
(TestModelC, (
('pk1', 'c1'),
('pk2', 'c2'))),
)
def setUp(self):
super(TestModelQueryResultWrapper, self).setUp()
for model_class, model_data in self.data:
for pk, data in model_data:
model_class.create(field=pk, data=data)
def test_join_expr(self):
def get_query(join_type=JOIN.INNER):
sq = (TestModelA
.select(TestModelA, TestModelB, TestModelC)
.join(
TestModelB,
on=(TestModelA.field == TestModelB.field).alias('rel_b'))
.join(
TestModelC,
join_type=join_type,
on=(TestModelB.field == TestModelC.field))
.order_by(TestModelA.field))
return sq
sq = get_query()
self.assertEqual(sq.count(), 2)
with self.assertQueryCount(1):
results = list(sq)
expected = (('b1', 'c1'), ('b2', 'c2'))
for i, (b_data, c_data) in enumerate(expected):
self.assertEqual(results[i].rel_b.data, b_data)
self.assertEqual(results[i].rel_b.field.data, c_data)
sq = get_query(JOIN.LEFT_OUTER)
self.assertEqual(sq.count(), 3)
with self.assertQueryCount(1):
results = list(sq)
expected = (('b1', 'c1'), ('b2', 'c2'), ('b3', None))
for i, (b_data, c_data) in enumerate(expected):
self.assertEqual(results[i].rel_b.data, b_data)
self.assertEqual(results[i].rel_b.field.data, c_data)
def test_backward_join(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
for user in (u1, u2):
Blog.create(title='b-%s' % user.username, user=user)
# Create an additional blog for user 2.
Blog.create(title='b-u2-2', user=u2)
res = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username.asc(), Blog.title.asc()))
self.assertEqual([(u.username, u.blog.title) for u in res], [
('u1', 'b-u1'),
('u2', 'b-u2'),
('u2', 'b-u2-2')])
def test_joins_with_aliases(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1_1 = Blog.create(user=u1, title='b1-1')
b1_2 = Blog.create(user=u1, title='b1-2')
b2_1 = Blog.create(user=u2, title='b2-1')
UserAlias = User.alias()
BlogAlias = Blog.alias()
def assertExpectedQuery(query, is_user_query):
accum = []
with self.assertQueryCount(1):
if is_user_query:
for user in query:
accum.append((user.username, user.blog.title))
else:
for blog in query:
accum.append((blog.user.username, blog.title))
self.assertEqual(accum, [
('u1', 'b1-1'),
('u1', 'b1-2'),
('u2', 'b2-1'),
])
combinations = [
(User, BlogAlias, User.id == BlogAlias.user, True),
(User, BlogAlias, BlogAlias.user == User.id, True),
(User, Blog, User.id == Blog.user, True),
(User, Blog, Blog.user == User.id, True),
(User, Blog, None, True),
(Blog, UserAlias, UserAlias.id == Blog.user, False),
(Blog, UserAlias, Blog.user == UserAlias.id, False),
(Blog, User, User.id == Blog.user, False),
(Blog, User, Blog.user == User.id, False),
(Blog, User, None, False),
]
for Src, JoinModel, predicate, is_user_query in combinations:
query = (Src
.select(Src, JoinModel)
.join(JoinModel, on=predicate)
.order_by(SQL('1, 2')))
assertExpectedQuery(query, is_user_query)
class TestModelQueryResultForeignKeys(ModelTestCase):
requires = [Parent, Child]
def test_foreign_key_assignment(self):
parent = Parent.create(data='p1')
child = Child.create(parent=parent, data='c1')
ParentAlias = Parent.alias()
query = Child.select(Child, ParentAlias)
ljoin = (ParentAlias.id == Child.parent)
rjoin = (Child.parent == ParentAlias.id)
lhs_alias = query.join(ParentAlias, on=ljoin)
rhs_alias = query.join(ParentAlias, on=rjoin)
self.assertJoins(lhs_alias, [
'INNER JOIN "parent" AS parent '
'ON ("parent"."id" = "child"."parent_id")'])
self.assertJoins(rhs_alias, [
'INNER JOIN "parent" AS parent '
'ON ("child"."parent_id" = "parent"."id")'])
with self.assertQueryCount(1):
lchild = lhs_alias.get()
self.assertEqual(lchild.id, child.id)
self.assertEqual(lchild.parent.id, parent.id)
with self.assertQueryCount(1):
rchild = rhs_alias.get()
self.assertEqual(rchild.id, child.id)
self.assertEqual(rchild.parent.id, parent.id)
class TestSelectRelatedForeignKeyToNonPrimaryKey(ModelTestCase):
requires = [Package, PackageItem]
def test_select_related(self):
p1 = Package.create(barcode='101')
p2 = Package.create(barcode='102')
pi11 = PackageItem.create(title='p11', package='101')
pi12 = PackageItem.create(title='p12', package='101')
pi21 = PackageItem.create(title='p21', package='102')
pi22 = PackageItem.create(title='p22', package='102')
# missing PackageItem.package_id.
with self.assertQueryCount(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, Package.barcode)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
self.assertEqual(
[i.package.barcode for i in items],
['101', '101'])
with self.assertQueryCount(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, PackageItem.package, Package.id)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
self.assertEqual([i.package.id for i in items], [p1.id, p1.id])
class BaseTestPrefetch(ModelTestCase):
requires = [
User,
Blog,
Comment,
Parent,
Child,
Orphan,
ChildPet,
OrphanPet,
Category,
Post,
Tag,
TagPostThrough,
TagPostThroughAlt,
Category,
UserCategory,
Relationship,
]
user_data = [
('u1', (('b1', ('b1-c1', 'b1-c2')), ('b2', ('b2-c1',)))),
('u2', ()),
('u3', (('b3', ('b3-c1', 'b3-c2')), ('b4', ()))),
('u4', (('b5', ('b5-c1', 'b5-c2')), ('b6', ('b6-c1',)))),
]
parent_data = [
('p1', (
# children
(
('c1', ('c1-p1', 'c1-p2')),
('c2', ('c2-p1',)),
('c3', ('c3-p1',)),
('c4', ()),
),
# orphans
(
('o1', ('o1-p1', 'o1-p2')),
('o2', ('o2-p1',)),
('o3', ('o3-p1',)),
('o4', ()),
),
)),
('p2', ((), ())),
('p3', (
# children
(
('c6', ()),
('c7', ('c7-p1',)),
),
# orphans
(
('o6', ('o6-p1', 'o6-p2')),
('o7', ('o7-p1',)),
),
)),
]
category_tree = [
['root', ['p1', 'p2']],
['p1', ['p1-1', 'p1-2']],
['p2', ['p2-1', 'p2-2']],
['p1-1', []],
['p1-2', []],
['p2-1', []],
['p2-2', []],
]
def setUp(self):
super(BaseTestPrefetch, self).setUp()
for parent, (children, orphans) in self.parent_data:
p = Parent.create(data=parent)
for child_pets in children:
child, pets = child_pets
c = Child.create(parent=p, data=child)
for pet in pets:
ChildPet.create(child=c, data=pet)
for orphan_pets in orphans:
orphan, pets = orphan_pets
o = Orphan.create(parent=p, data=orphan)
for pet in pets:
OrphanPet.create(orphan=o, data=pet)
for user, blog_comments in self.user_data:
u = User.create(username=user)
for blog, comments in blog_comments:
b = Blog.create(user=u, title=blog, content='')
for c in comments:
Comment.create(blog=b, comment=c)
def _build_category_tree(self):
def cc(name, parent=None):
return Category.create(name=name, parent=parent)
root = cc('root')
p1 = cc('p1', root)
p2 = cc('p2', root)
for p in (p1, p2):
for i in range(2):
cc('%s-%s' % (p.name, i + 1), p)
class TestPrefetch(BaseTestPrefetch):
def test_prefetch_simple(self):
sq = User.select().where(User.username != 'u3')
sq2 = Blog.select().where(Blog.title != 'b2')
sq3 = Comment.select()
with self.assertQueryCount(3):
prefetch_sq = prefetch(sq, sq2, sq3)
results = []
for user in prefetch_sq:
results.append(user.username)
for blog in user.blog_set_prefetch:
results.append(blog.title)
for comment in blog.comments_prefetch:
results.append(comment.comment)
self.assertEqual(results, [
'u1', 'b1', 'b1-c1', 'b1-c2',
'u2',
'u4', 'b5', 'b5-c1', 'b5-c2', 'b6', 'b6-c1',
])
with self.assertQueryCount(0):
results = []
for user in prefetch_sq:
for blog in user.blog_set_prefetch:
results.append(blog.user.username)
for comment in blog.comments_prefetch:
results.append(comment.blog.title)
self.assertEqual(results, [
'u1', 'b1', 'b1', 'u4', 'b5', 'b5', 'u4', 'b6',
])
def test_prefetch_reverse(self):
sq = User.select()
sq2 = Blog.select().where(Blog.title != 'b2').order_by(Blog.pk)
with self.assertQueryCount(2):
prefetch_sq = prefetch(sq2, sq)
results = []
for blog in prefetch_sq:
results.append(blog.title)
results.append(blog.user.username)
self.assertEqual(results, [
'b1', 'u1',
'b3', 'u3',
'b4', 'u3',
'b5', 'u4',
'b6', 'u4'])
def test_prefetch_up_and_down(self):
blogs = Blog.select(Blog, User).join(User).order_by(Blog.title)
comments = Comment.select().order_by(Comment.comment.desc())
with self.assertQueryCount(2):
query = prefetch(blogs, comments)
results = []
for blog in query:
results.append((
blog.user.username,
blog.title,
[comment.comment for comment in blog.comments_prefetch]))
self.assertEqual(results, [
('u1', 'b1', ['b1-c2', 'b1-c1']),
('u1', 'b2', ['b2-c1']),
('u3', 'b3', ['b3-c2', 'b3-c1']),
('u3', 'b4', []),
('u4', 'b5', ['b5-c2', 'b5-c1']),
('u4', 'b6', ['b6-c1']),
])
def test_prefetch_multi_depth(self):
sq = Parent.select()
sq2 = Child.select()
sq3 = Orphan.select()
sq4 = ChildPet.select()
sq5 = OrphanPet.select()
with self.assertQueryCount(5):
prefetch_sq = prefetch(sq, sq2, sq3, sq4, sq5)
results = []
for parent in prefetch_sq:
results.append(parent.data)
for child in parent.child_set_prefetch:
results.append(child.data)
for pet in child.childpet_set_prefetch:
results.append(pet.data)
for orphan in parent.orphan_set_prefetch:
results.append(orphan.data)
for pet in orphan.orphanpet_set_prefetch:
results.append(pet.data)
self.assertEqual(results, [
'p1', 'c1', 'c1-p1', 'c1-p2', 'c2', 'c2-p1', 'c3', 'c3-p1', 'c4',
'o1', 'o1-p1', 'o1-p2', 'o2', 'o2-p1', 'o3', 'o3-p1', 'o4',
'p2',
'p3', 'c6', 'c7', 'c7-p1', 'o6', 'o6-p1', 'o6-p2', 'o7', 'o7-p1',
])
def test_prefetch_no_aggregate(self):
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username, Blog.title))
results = []
for user in query:
results.append((
user.username,
user.blog.title))
self.assertEqual(results, [
('u1', 'b1'),
('u1', 'b2'),
('u2', None),
('u3', 'b3'),
('u3', 'b4'),
('u4', 'b5'),
('u4', 'b6'),
])
def test_prefetch_self_join(self):
self._build_category_tree()
Child = Category.alias()
with self.assertQueryCount(2):
query = prefetch(Category.select().order_by(Category.id), Child)
names_and_children = [
[parent.name, [child.name for child in parent.children_prefetch]]
for parent in query]
self.assertEqual(names_and_children, self.category_tree)
class TestAggregateRows(BaseTestPrefetch):
def test_aggregate_users(self):
with self.assertQueryCount(1):
query = (User
.select(User, Blog, Comment)
.join(Blog, JOIN.LEFT_OUTER)
.join(Comment, JOIN.LEFT_OUTER)
.order_by(User.username, Blog.title, Comment.id)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[(blog.title,
[comment.comment for comment in blog.comments])
for blog in user.blog_set]))
self.assertEqual(results, [
('u1', [
('b1', ['b1-c1', 'b1-c2']),
('b2', ['b2-c1'])]),
('u2', []),
('u3', [
('b3', ['b3-c1', 'b3-c2']),
('b4', [])]),
('u4', [
('b5', ['b5-c1', 'b5-c2']),
('b6', ['b6-c1'])]),
])
def test_aggregate_blogs(self):
with self.assertQueryCount(1):
query = (Blog
.select(Blog, User, Comment)
.join(User)
.switch(Blog)
.join(Comment, JOIN.LEFT_OUTER)
.order_by(Blog.title, User.username, Comment.id)
.aggregate_rows())
results = []
for blog in query:
results.append((
blog.user.username,
blog.title,
[comment.comment for comment in blog.comments]))
self.assertEqual(results, [
('u1', 'b1', ['b1-c1', 'b1-c2']),
('u1', 'b2', ['b2-c1']),
('u3', 'b3', ['b3-c1', 'b3-c2']),
('u3', 'b4', []),
('u4', 'b5', ['b5-c1', 'b5-c2']),
('u4', 'b6', ['b6-c1']),
])
def test_aggregate_on_expression_join(self):
with self.assertQueryCount(1):
join_expr = (User.id == Blog.user)
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER, on=join_expr)
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u1', ['b1', 'b2']),
('u2', []),
('u3', ['b3', 'b4']),
('u4', ['b5', 'b6']),
])
def test_aggregate_with_join_model_aliases(self):
expected = [
('u1', ['b1', 'b2']),
('u2', []),
('u3', ['b3', 'b4']),
('u4', ['b5', 'b6']),
]
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(
Blog,
JOIN.LEFT_OUTER,
on=(User.id == Blog.user).alias('blogz'))
.order_by(User.id, Blog.title)
.aggregate_rows())
results = [
(user.username, [blog.title for blog in user.blogz])
for user in query]
self.assertEqual(results, expected)
BlogAlias = Blog.alias()
with self.assertQueryCount(1):
query = (User
.select(User, BlogAlias)
.join(
BlogAlias,
JOIN.LEFT_OUTER,
on=(User.id == BlogAlias.user).alias('blogz'))
.order_by(User.id, BlogAlias.title)
.aggregate_rows())
results = [
(user.username, [blog.title for blog in user.blogz])
for user in query]
self.assertEqual(results, expected)
def test_aggregate_unselected_join_backref(self):
cat_1 = Category.create(name='category 1')
cat_2 = Category.create(name='category 2')
with test_db.transaction():
for i, user in enumerate(User.select().order_by(User.username)):
if i % 2 == 0:
category = cat_2
else:
category = cat_1
UserCategory.create(user=user, category=category)
with self.assertQueryCount(1):
# The join on UserCategory is a backref join (since the FK is on
# UserCategory). Additionally, UserCategory/Category are not
# selected and are only used for filtering the result set.
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.switch(User)
.join(UserCategory)
.join(Category)
.where(Category.name == cat_1.name)
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u2', []),
('u4', ['b5', 'b6']),
])
def test_aggregate_manytomany(self):
p1 = Post.create(title='p1')
p2 = Post.create(title='p2')
Post.create(title='p3')
p4 = Post.create(title='p4')
t1 = Tag.create(tag='t1')
t2 = Tag.create(tag='t2')
t3 = Tag.create(tag='t3')
TagPostThroughAlt.create(tag=t1, post=p1)
TagPostThroughAlt.create(tag=t2, post=p1)
TagPostThroughAlt.create(tag=t2, post=p2)
TagPostThroughAlt.create(tag=t3, post=p2)
TagPostThroughAlt.create(tag=t1, post=p4)
TagPostThroughAlt.create(tag=t2, post=p4)
TagPostThroughAlt.create(tag=t3, post=p4)
with self.assertQueryCount(1):
query = (Post
.select(Post, TagPostThroughAlt, Tag)
.join(TagPostThroughAlt, JOIN.LEFT_OUTER)
.join(Tag, JOIN.LEFT_OUTER)
.order_by(Post.id, TagPostThroughAlt.post, Tag.id)
.aggregate_rows())
results = []
for post in query:
post_data = [post.title]
for tpt in post.tags_alt:
post_data.append(tpt.tag.tag)
results.append(post_data)
self.assertEqual(results, [
['p1', 't1', 't2'],
['p2', 't2', 't3'],
['p3'],
['p4', 't1', 't2', 't3'],
])
def test_aggregate_parent_child(self):
with self.assertQueryCount(1):
query = (Parent
.select(Parent, Child, Orphan, ChildPet, OrphanPet)
.join(Child, JOIN.LEFT_OUTER)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Parent)
.join(Orphan, JOIN.LEFT_OUTER)
.join(OrphanPet, JOIN.LEFT_OUTER)
.order_by(
Parent.data,
Child.data,
ChildPet.id,
Orphan.data,
OrphanPet.id)
.aggregate_rows())
results = []
for parent in query:
results.append((
parent.data,
[(child.data, [pet.data for pet in child.childpet_set])
for child in parent.child_set],
[(orphan.data, [pet.data for pet in orphan.orphanpet_set])
for orphan in parent.orphan_set]
))
# Without the `.aggregate_rows()` call, this would be 289!!
self.assertEqual(results, [
('p1',
[('c1', ['c1-p1', 'c1-p2']),
('c2', ['c2-p1']),
('c3', ['c3-p1']),
('c4', [])],
[('o1', ['o1-p1', 'o1-p2']),
('o2', ['o2-p1']),
('o3', ['o3-p1']),
('o4', [])],
),
('p2', [], []),
('p3',
[('c6', []),
('c7', ['c7-p1'])],
[('o6', ['o6-p1', 'o6-p2']),
('o7', ['o7-p1'])],)
])
def test_aggregate_with_unselected_joins(self):
with self.assertQueryCount(1):
query = (Child
.select(Child, ChildPet, Parent)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Child)
.join(Parent)
.join(Orphan)
.join(OrphanPet)
.where(OrphanPet.data == 'o6-p2')
.order_by(Child.data, ChildPet.data)
.aggregate_rows())
results = []
for child in query:
results.append((
child.data,
child.parent.data,
[child_pet.data for child_pet in child.childpet_set]))
self.assertEqual(results, [
('c6', 'p3', []),
('c7', 'p3', ['c7-p1']),
])
with self.assertQueryCount(1):
query = (Parent
.select(Parent, Child, ChildPet)
.join(Child, JOIN.LEFT_OUTER)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Parent)
.join(Orphan)
.join(OrphanPet)
.where(OrphanPet.data == 'o6-p2')
.order_by(Parent.data, Child.data, ChildPet.data)
.aggregate_rows())
results = []
for parent in query:
results.append((
parent.data,
[(child.data, [pet.data for pet in child.childpet_set])
for child in parent.child_set]))
self.assertEqual(results, [('p3', [
('c6', []),
('c7', ['c7-p1']),
])])
def test_aggregate_rows_ordering(self):
# Refs github #519.
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username.desc(), Blog.title.desc())
.aggregate_rows())
accum = []
for user in query:
accum.append((
user.username,
[blog.title for blog in user.blog_set]))
if sys.version_info[:2] > (2, 6):
self.assertEqual(accum, [
('u4', ['b6', 'b5']),
('u3', ['b4', 'b3']),
('u2', []),
('u1', ['b2', 'b1']),
])
def test_aggregate_rows_self_join(self):
self._build_category_tree()
Child = Category.alias()
# Same query, but this time use an `alias` on the join expr.
with self.assertQueryCount(1):
query = (Category
.select(Category, Child)
.join(
Child,
JOIN.LEFT_OUTER,
on=(Category.id == Child.parent).alias('childrenx'))
.order_by(Category.id, Child.id)
.aggregate_rows())
names_and_children = [
[parent.name, [child.name for child in parent.childrenx]]
for parent in query]
self.assertEqual(names_and_children, self.category_tree)
def test_multiple_fks(self):
names = ['charlie', 'huey', 'zaizee']
charlie, huey, zaizee = [
User.create(username=username) for username in names]
Relationship.create(from_user=charlie, to_user=huey)
Relationship.create(from_user=charlie, to_user=zaizee)
Relationship.create(from_user=huey, to_user=charlie)
Relationship.create(from_user=zaizee, to_user=charlie)
UserAlias = User.alias()
with self.assertQueryCount(1):
query = (User
.select(User, Relationship, UserAlias)
.join(
Relationship,
JOIN.LEFT_OUTER,
on=Relationship.from_user)
.join(
UserAlias,
on=(
Relationship.to_user == UserAlias.id
).alias('to_user'))
.order_by(User.username, Relationship.id)
.where(User.username == 'charlie')
.aggregate_rows())
results = [row for row in query]
self.assertEqual(len(results), 1)
user = results[0]
self.assertEqual(user.username, 'charlie')
self.assertEqual(len(user.relationships), 2)
rh, rz = user.relationships
self.assertEqual(rh.to_user.username, 'huey')
self.assertEqual(rz.to_user.username, 'zaizee')
FromUser = User.alias()
ToUser = User.alias()
from_join = (Relationship.from_user == FromUser.id)
to_join = (Relationship.to_user == ToUser.id)
with self.assertQueryCount(1):
query = (Relationship
.select(Relationship, FromUser, ToUser)
.join(FromUser, on=from_join.alias('from_user'))
.switch(Relationship)
.join(ToUser, on=to_join.alias('to_user'))
.order_by(Relationship.id)
.aggregate_rows())
results = [
(relationship.from_user.username,
relationship.to_user.username)
for relationship in query]
self.assertEqual(results, [
('charlie', 'huey'),
('charlie', 'zaizee'),
('huey', 'charlie'),
('zaizee', 'charlie'),
])
def test_multiple_fks_multi_depth(self):
names = ['charlie', 'huey', 'zaizee']
charlie, huey, zaizee = [
User.create(username=username) for username in names]
Relationship.create(from_user=charlie, to_user=huey)
Relationship.create(from_user=charlie, to_user=zaizee)
Relationship.create(from_user=huey, to_user=charlie)
Relationship.create(from_user=zaizee, to_user=charlie)
human = Category.create(name='human')
kitty = Category.create(name='kitty')
UserCategory.create(user=charlie, category=human)
UserCategory.create(user=huey, category=kitty)
UserCategory.create(user=zaizee, category=kitty)
FromUser = User.alias()
ToUser = User.alias()
from_join = (Relationship.from_user == FromUser.id)
to_join = (Relationship.to_user == ToUser.id)
FromUserCategory = UserCategory.alias()
ToUserCategory = UserCategory.alias()
from_uc_join = (FromUser.id == FromUserCategory.user)
to_uc_join = (ToUser.id == ToUserCategory.user)
FromCategory = Category.alias()
ToCategory = Category.alias()
from_c_join = (FromUserCategory.category == FromCategory.id)
to_c_join = (ToUserCategory.category == ToCategory.id)
with self.assertQueryCount(1):
query = (Relationship
.select(
Relationship,
FromUser,
ToUser,
FromUserCategory,
ToUserCategory,
FromCategory,
ToCategory)
.join(FromUser, on=from_join.alias('from_user'))
.join(FromUserCategory, on=from_uc_join.alias('fuc'))
.join(FromCategory, on=from_c_join.alias('category'))
.switch(Relationship)
.join(ToUser, on=to_join.alias('to_user'))
.join(ToUserCategory, on=to_uc_join.alias('tuc'))
.join(ToCategory, on=to_c_join.alias('category'))
.order_by(Relationship.id)
.aggregate_rows())
results = []
for obj in query:
from_user = obj.from_user
to_user = obj.to_user
results.append((
from_user.username,
from_user.fuc[0].category.name,
to_user.username,
to_user.tuc[0].category.name))
self.assertEqual(results, [
('charlie', 'human', 'huey', 'kitty'),
('charlie', 'human', 'zaizee', 'kitty'),
('huey', 'kitty', 'charlie', 'human'),
('zaizee', 'kitty', 'charlie', 'human'),
])
class TestAggregateRowsRegression(ModelTestCase):
requires = [
User,
Blog,
Comment,
Category,
CommentCategory,
BlogData]
def setUp(self):
super(TestAggregateRowsRegression, self).setUp()
u = User.create(username='u1')
b = Blog.create(title='b1', user=u)
BlogData.create(blog=b)
c1 = Comment.create(blog=b, comment='c1')
c2 = Comment.create(blog=b, comment='c2')
cat1 = Category.create(name='cat1')
cat2 = Category.create(name='cat2')
CommentCategory.create(category=cat1, comment=c1, sort_order=1)
CommentCategory.create(category=cat2, comment=c1, sort_order=1)
CommentCategory.create(category=cat1, comment=c2, sort_order=2)
CommentCategory.create(category=cat2, comment=c2, sort_order=2)
def test_aggregate_rows_regression(self):
comments = (Comment
.select(
Comment,
CommentCategory,
Category,
Blog,
BlogData)
.join(CommentCategory, JOIN.LEFT_OUTER)
.join(Category, JOIN.LEFT_OUTER)
.switch(Comment)
.join(Blog)
.join(BlogData, JOIN.LEFT_OUTER)
.where(Category.id == 1)
.order_by(CommentCategory.sort_order))
with self.assertQueryCount(1):
c_list = list(comments.aggregate_rows())
def test_regression_506(self):
user = User.create(username='u2')
for i in range(2):
Blog.create(title='u2-%s' % i, user=user)
users = (User
.select()
.order_by(User.id.desc())
.paginate(1, 5)
.alias('users'))
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog)
.join(users, on=(User.id == users.c.id))
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u1', ['b1']),
('u2', ['u2-0', 'u2-1']),
])
class TestPrefetchNonPKFK(ModelTestCase):
requires = [Package, PackageItem]
data = {
'101': ['a', 'b'],
'102': ['c'],
'103': [],
'104': ['a', 'b', 'c', 'd', 'e'],
}
def setUp(self):
super(TestPrefetchNonPKFK, self).setUp()
for barcode, titles in self.data.items():
Package.create(barcode=barcode)
for title in titles:
PackageItem.create(package=barcode, title=title)
def test_prefetch(self):
packages = Package.select().order_by(Package.barcode)
items = PackageItem.select().order_by(PackageItem.id)
query = prefetch(packages, items)
for package, (barcode, titles) in zip(query, sorted(self.data.items())):
self.assertEqual(package.barcode, barcode)
self.assertEqual(
[item.title for item in package.items_prefetch],
titles)
packages = (Package
.select()
.where(Package.barcode << ['101', '104'])
.order_by(Package.id))
items = items.where(PackageItem.title << ['a', 'c', 'e'])
query = prefetch(packages, items)
accum = {}
for package in query:
accum[package.barcode] = [
item.title for item in package.items_prefetch]
self.assertEqual(accum, {
'101': ['a'],
'104': ['a', 'c','e'],
})
| mit |
srsman/odoo | addons/document/report/document_report.py | 341 | 4224 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
class report_document_user(osv.osv):
_name = "report.document.user"
_description = "Files details by Users"
_auto = False
_columns = {
'name': fields.char('Year', size=64,readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
'user_id': fields.many2one('res.users', 'Owner', readonly=True),
'user': fields.related('user_id', 'name', type='char', size=64, readonly=True),
'directory': fields.char('Directory',size=64,readonly=True),
'datas_fname': fields.char('File Name',size=64,readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'change_date': fields.datetime('Modified Date', readonly=True),
'file_size': fields.integer('File Size', readonly=True),
'nbr':fields.integer('# of Files', readonly=True),
'type':fields.char('Directory Type',size=64,readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_document_user')
cr.execute("""
CREATE OR REPLACE VIEW report_document_user as (
SELECT
min(f.id) as id,
to_char(f.create_date, 'YYYY') as name,
to_char(f.create_date, 'MM') as month,
f.user_id as user_id,
count(*) as nbr,
d.name as directory,
f.datas_fname as datas_fname,
f.create_date as create_date,
f.file_size as file_size,
min(d.type) as type,
f.write_date as change_date
FROM ir_attachment f
left join document_directory d on (f.parent_id=d.id and d.name<>'')
group by to_char(f.create_date, 'YYYY'), to_char(f.create_date, 'MM'),d.name,f.parent_id,d.type,f.create_date,f.user_id,f.file_size,d.type,f.write_date,f.datas_fname
)
""")
class report_document_file(osv.osv):
_name = "report.document.file"
_description = "Files details by Directory"
_auto = False
_columns = {
'file_size': fields.integer('File Size', readonly=True),
'nbr':fields.integer('# of Files', readonly=True),
'month': fields.char('Month', size=24, readonly=True),
}
_order = "month"
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_document_file')
cr.execute("""
create or replace view report_document_file as (
select min(f.id) as id,
count(*) as nbr,
min(EXTRACT(MONTH FROM f.create_date)||'-'||to_char(f.create_date,'Month')) as month,
sum(f.file_size) as file_size
from ir_attachment f
group by EXTRACT(MONTH FROM f.create_date)
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
holmes-app/holmes-alf | setup.py | 1 | 1830 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of holmesalf.
# https://github.com/holmes-app/holmes-alf
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2014 Pablo Aguiar scorphus@gmail.com
from setuptools import setup, find_packages
from holmesalf import __version__
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'tox',
'ipdb',
'coveralls',
'sphinx',
]
setup(
name='holmesalf',
version=__version__,
description='holmes-alf is a wrapper for OAuth 2 synchronous (based on alf) and asynchronous (based on the tornado-alf) clients that can bu used in holmes.',
long_description='''
holmes-alf is a wrapper for OAuth 2 synchronous (based on alf) and asynchronous (based on the tornado-alf) clients that can bu used in holmes.
''',
keywords='alf client client_credentials holmes oauth requests tornado',
author='Pablo Aguiar',
author_email='scorphus@gmail.com',
url='https://github.com/holmes-app/holmes-alf',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Unix',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
include_package_data=False,
install_requires=[
'alf>=0.4.1',
'tornado-alf>=0.4.1'
],
extras_require={
'tests': tests_require,
},
entry_points={
'console_scripts': [
# add cli scripts here in this form:
# 'holmesalf=holmesalf.cli:main',
],
},
)
| mit |
Tomtomgo/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/activeworkitems_unittest.py | 143 | 2901 | # Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from datetime import datetime
from google.appengine.ext import testbed
from model.activeworkitems import ActiveWorkItems
class ActiveWorkItemsTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_basic(self):
items = ActiveWorkItems.lookup_by_queue("test-queue")
queued_items = [1, 2]
# db.Model only stores dates to second resolution, so we use an explicit datetime without milliseconds.
time = datetime(2011, 4, 18, 18, 50, 44)
self.assertEqual(items.next_item(queued_items, time), 1)
self.assertEqual(items.next_item([1], time), None)
self.assertEqual(items.next_item([], time), None)
self.assertEqual(items.time_for_item(1), time)
self.assertEqual(items.time_for_item(2), None)
items.expire_item(1)
# expire_item uses a transaction so it doesn't take effect on the current object.
self.assertEqual(items.time_for_item(1), time)
# If we look up the saved object, we see it's been updated.
items = ActiveWorkItems.lookup_by_queue("test-queue")
self.assertEqual(items.time_for_item(1), None)
| bsd-3-clause |
polyaxon/polyaxon | platform/polycommon/polycommon/test_cases/fixtures/services.py | 1 | 2919 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import UUID
from polyaxon.polyflow import V1RunKind
def get_fxt_service():
return {
"version": 1.1,
"kind": "operation",
"name": "foo",
"description": "a description",
"tags": ["tag1", "tag2"],
"trigger": "all_succeeded",
"component": {
"name": "service-template",
"tags": ["backend", "lab"],
"run": {
"kind": V1RunKind.SERVICE,
"container": {"image": "jupyter"},
"init": [{"connection": "foo", "git": {"revision": "dev"}}],
"ports": [5555],
},
},
}
def get_fxt_service_with_inputs():
return {
"version": 1.1,
"kind": "operation",
"name": "foo",
"description": "a description",
"params": {"image": {"value": "foo/bar"}},
"component": {
"name": "service-template",
"inputs": [{"name": "image", "type": "str"}],
"tags": ["backend", "lab"],
"run": {
"kind": V1RunKind.SERVICE,
"container": {"image": "{{ image }}"},
"init": [{"connection": "foo", "git": {"revision": "dev"}}],
"ports": [5555],
},
},
}
def get_fxt_service_with_upstream_runs(run_uuid: UUID):
return {
"version": 1.1,
"kind": "operation",
"name": "foo",
"description": "a description",
"params": {
"image": {
"value": "outputs.image-out",
"ref": "runs.{}".format(run_uuid.hex),
}
},
"component": {
"name": "service-template",
"inputs": [{"name": "image", "type": "str"}],
"tags": ["backend", "lab"],
"run": {
"kind": V1RunKind.SERVICE,
"container": {"image": "{{ image }}"},
"init": [{"connection": "foo", "git": {"revision": "dev"}}],
"ports": [5555],
},
},
}
def get_fxt_job_with_hub_ref():
return {
"version": 1.1,
"kind": "operation",
"name": "foo",
"description": "a description",
"params": {"image": {"value": "foo/bar"}},
"hubRef": "notebook",
}
| apache-2.0 |
pombredanne/Bicho | bicho/post_processing/issues_log_lp.py | 2 | 8400 | # -*- coding: utf-8 -*-
# Copyright (C) 2007-2013 GSyC/LibreSoft, Universidad Rey Juan Carlos
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Luis Cañas Díaz <lcanas@bitergia.com>
#
#
from bicho.post_processing import IssueLogger
from issues_log import *
__sql_drop__ = 'DROP TABLE IF EXISTS issues_log_launchpad;'
__sql_table__ = 'CREATE TABLE IF NOT EXISTS issues_log_launchpad (\
id INTEGER NOT NULL AUTO_INCREMENT, \
tracker_id INTEGER NOT NULL, \
issue_id INTEGER NOT NULL, \
change_id INTEGER NOT NULL, \
issue VARCHAR(255) NOT NULL, \
type VARCHAR(32) NULL, \
summary VARCHAR(255) NOT NULL, \
description TEXT NOT NULL, \
status VARCHAR(32) NOT NULL, \
resolution VARCHAR(32) NULL, \
priority VARCHAR(32) NULL, \
submitted_by INTEGER UNSIGNED NOT NULL, \
date DATETIME NOT NULL, \
assigned_to INTEGER UNSIGNED NOT NULL, \
issue_key VARCHAR(32) default NULL, \
link VARCHAR(100) default NULL, \
title VARCHAR(100) default NULL, \
environment VARCHAR(35) default NULL, \
security VARCHAR(35) default NULL, \
updated DATETIME default NULL, \
version VARCHAR(35) default NULL, \
component VARCHAR(35) default NULL, \
votes INTEGER UNSIGNED, \
project VARCHAR(35) default NULL, \
project_id INTEGER UNSIGNED, \
project_key VARCHAR(35) default NULL, \
affects VARCHAR(100) default NULL, \
PRIMARY KEY(id), \
UNIQUE KEY(id), \
INDEX issues_submitted_idx(submitted_by), \
INDEX issues_assigned_idx(assigned_to), \
INDEX issues_tracker_idx(tracker_id), \
FOREIGN KEY(submitted_by) \
REFERENCES people(id) \
ON DELETE SET NULL \
ON UPDATE CASCADE, \
FOREIGN KEY(assigned_to) \
REFERENCES people(id) \
ON DELETE SET NULL \
ON UPDATE CASCADE, \
FOREIGN KEY(tracker_id) \
REFERENCES trackers(id) \
ON DELETE CASCADE \
ON UPDATE CASCADE \
) ENGINE=MYISAM;'
#
# these dictionaries contain the text that appears in the HTML history
# table for bugzilla and its equivalent in the database
#
"""
The fields with the comment "# project" contain the name of the project
and the semicolon before the tag.
Example: "oslo: status"
"""
__launchpad_issues_links__ = { # the ones seen in Maria
"affects":"affects",
#"attachment added":"",
#"attachment removed":"",
"assignee": "assigned_to", # project:
#"branch linked":"",
#"branch unlinked":"",
#"bug": "",
#"bug task added":"",
#"bug watch added":"",
"description": "description",
"importance": "priority", # project:
#"marked as duplicate":"",
"milestone": "version", # project:
#"nominated for series":"",
#"removed duplicate marker":"",
#"removed subscriber Cafuego":"",
"security vulnerability": "security",
"status": "status", # project: status
#"statusexplanation":"", #project:
"summary": "summary",
#"tags":"",
#"visibility":""
}
class DBLaunchpadIssuesLog(DBIssuesLog):
"""
"""
__storm_table__ = 'issues_log_launchpad'
affects = Unicode()
version = Unicode()
security = Unicode()
class LaunchpadIssuesLog(IssuesLog):
def __init__(self):
IssuesLog.__init__(self)
self._project_name = None
def _assign_values(self, db_ilog, field, value):
"""
Assign the value to the attribute field of the db_ilog object
"""
# first we need to confirm the change belongs to the currenct project
# and extract the content
if not self._project_name:
self._project_name = self._get_project_name(db_ilog.tracker_id)
field = self._filter_field(field, self._project_name)
if field in __launchpad_issues_links__:
table_field = __launchpad_issues_links__[field]
# to be done
if table_field == 'summary':
db_ilog.summary = value
elif table_field == 'priority':
db_ilog.priority = value
#elif table_field == 'type':
# db_ilog.type = value
elif table_field == 'assigned_to':
uid = self._get_user_id(value)
#db_ilog.assigned_to = self._get_people_id(uid,
# self._get_tracker_id(db_ilog.issue_id))
db_ilog.assigned_to = self._get_people_id(uid)
elif table_field == 'status':
db_ilog.status = value
elif table_field == 'affects':
db_ilog.affects = value
elif table_field == 'description':
db_ilog.description = value
elif table_field == 'security':
db_ilog.security = value
elif table_field == 'version':
db_ilog.version = value
return db_ilog
def _copy_issue_ext(self, aux, db_ilog):
"""
This method copies extended values of DBLaunchpadIssuesLog object
"""
aux.affects = db_ilog.affects
aux.security = db_ilog.security
aux.version = db_ilog.version
return aux
def _filter_field(self, text, project_name):
"""
Returns the field without the project name ("project: status")
"""
if text.find(':') < 0:
return text
elif text.find(project_name) < 0:
# if the text contains another project we skip it
return None
else:
offset = text.find(": ") + 2
return text[offset:]
def _get_changes(self, issue_id):
#aux = self.store.execute("SELECT id, field, new_value, changed_by, \
#changed_on FROM changes \
#WHERE (changes.issue_id=%s AND field NOT LIKE '%%:%%') \
#OR (changes.issue_id=%s AND field LIKE '%%%s:%%')" %
#(issue_id, issue_id, self._project_name))
aux = self.store.execute("SELECT id, field, new_value, changed_by, \
changed_on FROM changes \
WHERE changes.issue_id=%s" %
(issue_id))
return aux
def _get_dbissues_object(self, issue_name, tracker_id):
return DBLaunchpadIssuesLog(issue_name, tracker_id)
def _get_sql_create(self):
return __sql_table__
def _get_sql_drop(self):
return __sql_drop__
def _get_user_id(self, text):
if text == 'None':
return text
else:
a = text.find('(') + 1
b = text.find(')')
return text[a:b]
def _get_project_name(self, tracker_id):
"""
Returns project name based on tracker url
"""
result = self.store.find(DBTracker.url,
DBTracker.id == tracker_id).one()
offset = result.rfind('/') + 1
project_name = result[offset:]
return project_name
def _print_final_msg(self):
printout("Table issues_log_launchpad populated")
IssueLogger.register_logger("lp", LaunchpadIssuesLog)
| gpl-2.0 |
aurusov/rdo_studio | thirdparty/scintilla/qt/ScintillaEdit/WidgetGen.py | 6 | 8589 | #!/usr/bin/env python
# WidgetGen.py - regenerate the ScintillaWidgetCpp.cpp and ScintillaWidgetCpp.h files
# Check that API includes all gtkscintilla2 functions
import sys
import os
import getopt
scintillaDirectory = "../.."
scintillaIncludeDirectory = os.path.join(scintillaDirectory, "include")
sys.path.append(scintillaIncludeDirectory)
import Face
def Contains(s,sub):
return s.find(sub) != -1
def underscoreName(s):
# Name conversion fixes to match gtkscintilla2
irregular = ['WS', 'EOL', 'AutoC', 'KeyWords', 'BackSpace', 'UnIndents', 'RE', 'RGBA']
for word in irregular:
replacement = word[0] + word[1:].lower()
s = s.replace(word, replacement)
out = ""
for c in s:
if c.isupper():
if out:
out += "_"
out += c.lower()
else:
out += c
return out
def normalisedName(s, options, role=None):
if options["qtStyle"]:
if role == "get":
s = s.replace("Get", "")
return s[0].lower() + s[1:]
else:
return underscoreName(s)
typeAliases = {
"position": "int",
"colour": "int",
"keymod": "int",
"string": "const char *",
"stringresult": "const char *",
"cells": "const char *",
}
def cppAlias(s):
if s in typeAliases:
return typeAliases[s]
else:
return s
understoodTypes = ["", "void", "int", "bool", "position",
"colour", "keymod", "string", "stringresult", "cells"]
def checkTypes(name, v):
understandAllTypes = True
if v["ReturnType"] not in understoodTypes:
#~ print("Do not understand", v["ReturnType"], "for", name)
understandAllTypes = False
if v["Param1Type"] not in understoodTypes:
#~ print("Do not understand", v["Param1Type"], "for", name)
understandAllTypes = False
if v["Param2Type"] not in understoodTypes:
#~ print("Do not understand", v["Param2Type"], "for", name)
understandAllTypes = False
return understandAllTypes
def arguments(v, stringResult, options):
ret = ""
p1Type = cppAlias(v["Param1Type"])
if p1Type:
ret = ret + p1Type + " " + normalisedName(v["Param1Name"], options)
p2Type = cppAlias(v["Param2Type"])
if p2Type and not stringResult:
if p1Type:
ret = ret + ", "
ret = ret + p2Type + " " + normalisedName(v["Param2Name"], options)
return ret
def printPyFile(f,out, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["val"]:
out.write(name + "=" + v["Value"] + "\n")
if feat in ["evt"]:
out.write("SCN_" + name.upper() + "=" + v["Value"] + "\n")
def printHFile(f,out, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
returnType = cppAlias(v["ReturnType"])
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
out.write("\t" + returnType + " " + normalisedName(name, options, feat) + "(")
out.write(arguments(v, stringResult, options))
out.write(")" + constDeclarator + ";\n")
def methodNames(f, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
yield normalisedName(name, options)
def printCPPFile(f,out, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
featureDefineName = "SCI_" + name.upper()
returnType = cppAlias(v["ReturnType"])
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
returnStatement = ""
if returnType != "void":
returnStatement = "return "
out.write(returnType + " ScintillaEdit::" + normalisedName(name, options, feat) + "(")
out.write(arguments(v, stringResult, options))
out.write(")" + constDeclarator + " {\n")
if stringResult:
out.write(" " + returnStatement + "TextReturner(" + featureDefineName + ", ")
if "*" in cppAlias(v["Param1Type"]):
out.write("(uptr_t)")
if v["Param1Name"]:
out.write(normalisedName(v["Param1Name"], options))
else:
out.write("0")
out.write(");\n")
else:
out.write(" " + returnStatement + "send(" + featureDefineName + ", ")
if "*" in cppAlias(v["Param1Type"]):
out.write("(uptr_t)")
if v["Param1Name"]:
out.write(normalisedName(v["Param1Name"], options))
else:
out.write("0")
out.write(", ")
if "*" in cppAlias(v["Param2Type"]):
out.write("(sptr_t)")
if v["Param2Name"]:
out.write(normalisedName(v["Param2Name"], options))
else:
out.write("0")
out.write(");\n")
out.write("}\n")
out.write("\n")
def CopyWithInsertion(input, output, genfn, definition, options):
copying = 1
for line in input.readlines():
if copying:
output.write(line)
if "/* ++Autogenerated" in line or "# ++Autogenerated" in line or "<!-- ++Autogenerated" in line:
copying = 0
genfn(definition, output, options)
# ~~ form needed as XML comments can not contain --
if "/* --Autogenerated" in line or "# --Autogenerated" in line or "<!-- ~~Autogenerated" in line:
copying = 1
output.write(line)
def contents(filename):
with open(filename, "U") as f:
t = f.read()
return t
def Generate(templateFile, destinationFile, genfn, definition, options):
inText = contents(templateFile)
try:
currentText = contents(destinationFile)
except IOError:
currentText = ""
tempname = "WidgetGen.tmp"
with open(tempname, "w") as out:
with open(templateFile, "U") as hfile:
CopyWithInsertion(hfile, out, genfn, definition, options)
outText = contents(tempname)
if currentText == outText:
os.unlink(tempname)
else:
try:
os.unlink(destinationFile)
except OSError:
# Will see failure if file does not yet exist
pass
os.rename(tempname, destinationFile)
def gtkNames():
# The full path on my machine: should be altered for anyone else
p = "C:/Users/Neil/Downloads/wingide-source-4.0.1-1/wingide-source-4.0.1-1/external/gtkscintilla2/gtkscintilla.c"
with open(p) as f:
for l in f.readlines():
if "gtk_scintilla_" in l:
name = l.split()[1][14:]
if '(' in name:
name = name.split('(')[0]
yield name
def usage():
print("WidgetGen.py [-c|--clean][-h|--help][-u|--underscore-names]")
print("")
print("Generate full APIs for ScintillaEdit class and ScintillaConstants.py.")
print("")
print("options:")
print("")
print("-c --clean remove all generated code from files")
print("-h --help display this text")
print("-u --underscore-names use method_names consistent with GTK+ standards")
def readInterface(cleanGenerated):
f = Face.Face()
if not cleanGenerated:
f.ReadFromFile("../../include/Scintilla.iface")
return f
def main(argv):
# Using local path for gtkscintilla2 so don't default to checking
checkGTK = False
cleanGenerated = False
qtStyleInterface = True
# The --gtk-check option checks for full coverage of the gtkscintilla2 API but
# depends on a particular directory so is not mentioned in --help.
opts, args = getopt.getopt(argv, "hcgu", ["help", "clean", "gtk-check", "underscore-names"])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--clean"):
cleanGenerated = True
elif opt in ("-g", "--gtk-check"):
checkGTK = True
elif opt in ("-u", "--underscore-names"):
qtStyleInterface = False
options = {"qtStyle": qtStyleInterface}
f = readInterface(cleanGenerated)
try:
Generate("ScintillaEdit.cpp.template", "ScintillaEdit.cpp", printCPPFile, f, options)
Generate("ScintillaEdit.h.template", "ScintillaEdit.h", printHFile, f, options)
Generate("../ScintillaEditPy/ScintillaConstants.py.template",
"../ScintillaEditPy/ScintillaConstants.py",
printPyFile, f, options)
if checkGTK:
names = set(methodNames(f))
#~ print("\n".join(names))
namesGtk = set(gtkNames())
for name in namesGtk:
if name not in names:
print(name, "not found in Qt version")
for name in names:
if name not in namesGtk:
print(name, "not found in GTK+ version")
except:
raise
if cleanGenerated:
for file in ["ScintillaEdit.cpp", "ScintillaEdit.h", "../ScintillaEditPy/ScintillaConstants.py"]:
try:
os.remove(file)
except OSError:
pass
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
Timurdov/bionic | bionic/Lib/site-packages/django/contrib/messages/api.py | 512 | 3202 | from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
'MessageFailure',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
if not isinstance(request, HttpRequest):
raise TypeError("add_message() argument must be an HttpRequest object, "
"not '%s'." % request.__class__.__name__)
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if not fail_silently:
raise MessageFailure('You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware')
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
if hasattr(request, '_messages'):
return request._messages
else:
return []
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
if hasattr(request, '_messages'):
storage = request._messages
else:
storage = default_storage(request)
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
| apache-2.0 |
mrshelly/openerp71313 | openerp/addons/mrp_operations/report/mrp_code_barcode.py | 53 | 1538 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import pooler
import time
from openerp.report import report_sxw
class code_barcode(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(code_barcode, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
atopuzov/nitro-python | nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbmonitor_service_binding.py | 3 | 8118 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbmonitor_service_binding(base_resource) :
""" Binding class showing the service that can be bound to lbmonitor.
"""
def __init__(self) :
self._monitorname = ""
self._servicename = ""
self._dup_state = ""
self._dup_weight = 0
self._servicegroupname = ""
self._state = ""
self._weight = 0
@property
def servicegroupname(self) :
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def dup_state(self) :
ur"""State of the monitor. The state setting for a monitor of a given type affects all monitors of that type. For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
"""
try :
return self._dup_state
except Exception as e:
raise e
@dup_state.setter
def dup_state(self, dup_state) :
ur"""State of the monitor. The state setting for a monitor of a given type affects all monitors of that type. For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
"""
try :
self._dup_state = dup_state
except Exception as e:
raise e
@property
def servicename(self) :
try :
return self._servicename
except Exception as e:
raise e
@servicename.setter
def servicename(self, servicename) :
try :
self._servicename = servicename
except Exception as e:
raise e
@property
def state(self) :
ur"""State of the monitor. The state setting for a monitor of a given type affects all monitors of that type. For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
ur"""State of the monitor. The state setting for a monitor of a given type affects all monitors of that type. For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
"""
try :
self._state = state
except Exception as e:
raise e
@property
def dup_weight(self) :
ur"""Weight to assign to the binding between the monitor and service.
"""
try :
return self._dup_weight
except Exception as e:
raise e
@dup_weight.setter
def dup_weight(self, dup_weight) :
ur"""Weight to assign to the binding between the monitor and service.
"""
try :
self._dup_weight = dup_weight
except Exception as e:
raise e
@property
def monitorname(self) :
ur"""Name of the monitor.<br/>Minimum length = 1.
"""
try :
return self._monitorname
except Exception as e:
raise e
@monitorname.setter
def monitorname(self, monitorname) :
ur"""Name of the monitor.<br/>Minimum length = 1
"""
try :
self._monitorname = monitorname
except Exception as e:
raise e
@property
def weight(self) :
ur"""Weight to assign to the binding between the monitor and service.
"""
try :
return self._weight
except Exception as e:
raise e
@weight.setter
def weight(self, weight) :
ur"""Weight to assign to the binding between the monitor and service.
"""
try :
self._weight = weight
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbmonitor_service_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbmonitor_service_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.monitorname is not None :
return str(self.monitorname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = lbmonitor_service_binding()
updateresource.monitorname = resource.monitorname
updateresource.servicename = resource.servicename
updateresource.dup_state = resource.dup_state
updateresource.dup_weight = resource.dup_weight
updateresource.servicegroupname = resource.servicegroupname
updateresource.state = resource.state
updateresource.weight = resource.weight
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbmonitor_service_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].monitorname = resource[i].monitorname
updateresources[i].servicename = resource[i].servicename
updateresources[i].dup_state = resource[i].dup_state
updateresources[i].dup_weight = resource[i].dup_weight
updateresources[i].servicegroupname = resource[i].servicegroupname
updateresources[i].state = resource[i].state
updateresources[i].weight = resource[i].weight
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = lbmonitor_service_binding()
deleteresource.monitorname = resource.monitorname
deleteresource.servicename = resource.servicename
deleteresource.servicegroupname = resource.servicegroupname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbmonitor_service_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].monitorname = resource[i].monitorname
deleteresources[i].servicename = resource[i].servicename
deleteresources[i].servicegroupname = resource[i].servicegroupname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
class Dup_state:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class lbmonitor_service_binding_response(base_response) :
def __init__(self, length=1) :
self.lbmonitor_service_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbmonitor_service_binding = [lbmonitor_service_binding() for _ in range(length)]
| apache-2.0 |
alan-unravel/bokeh | bokeh/sphinxext/collapsible_code_block.py | 43 | 3128 | """ Display code blocks in collapsible sections when outputting
to HTML.
Usage
-----
This directive takes a heading to use for the collapsible code block::
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
Options
-------
This directive is identical to the standard ``code-block`` directive
that Sphinx supplies, with the addition of one new option:
heading : string
A heading to put for the collapsible block. Clicking the heading
expands or collapes the block
Examples
--------
The inline example code above produces the following output:
----
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
"""
from __future__ import absolute_import
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
from os.path import basename
import jinja2
from sphinx.directives.code import CodeBlock
PROLOGUE_TEMPLATE = jinja2.Template(u"""
<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
<div class="panel panel-default">
<div class="panel-heading" role="tab" id="heading-{{ id }}">
<h4 class="panel-title">
<a class="collapsed" data-toggle="collapse" data-parent="#accordion" href="#collapse-{{ id }}" aria-expanded="false" aria-controls="collapse-{{ id }}">
{{ heading }}
</a>
</h4>
</div>
<div id="collapse-{{ id }}" class="panel-collapse collapse" role="tabpanel" aria-labelledby="heading-{{ id }}">
<div class="panel-body">
""")
EPILOGUE_TEMPLATE = jinja2.Template(u"""
</div>
</div>
</div>
</div>
""")
class collapsible_code_block(nodes.General, nodes.Element):
pass
class CollapsibleCodeBlock(CodeBlock):
option_spec = CodeBlock.option_spec
option_spec.update(heading=unchanged)
def run(self):
env = self.state.document.settings.env
rst_source = self.state_machine.node.document['source']
rst_filename = basename(rst_source)
target_id = "%s.ccb-%d" % (rst_filename, env.new_serialno('bokeh-plot'))
target_id = target_id.replace(".", "-")
target_node = nodes.target('', '', ids=[target_id])
node = collapsible_code_block()
node['target_id'] = target_id
node['heading'] = self.options.get('heading', "Code")
cb = CodeBlock.run(self)
node.setup_child(cb[0])
node.children.append(cb[0])
return [target_node, node]
def html_visit_collapsible_code_block(self, node):
self.body.append(
PROLOGUE_TEMPLATE.render(
id=node['target_id'],
heading=node['heading']
)
)
def html_depart_collapsible_code_block(self, node):
self.body.append(EPILOGUE_TEMPLATE.render())
def setup(app):
app.add_node(
collapsible_code_block,
html=(
html_visit_collapsible_code_block,
html_depart_collapsible_code_block
)
)
app.add_directive('collapsible-code-block', CollapsibleCodeBlock)
| bsd-3-clause |
yhj630520/dpark | setup.py | 8 | 1585 | from setuptools import setup, Extension
try:
from Cython.Distutils import build_ext
except:
use_cython = False
else:
use_cython = True
if use_cython:
ext_modules = [Extension('dpark.portable_hash', ['dpark/portable_hash.pyx'])]
cmdclass = {'build_ext': build_ext}
else:
ext_modules = [Extension('dpark.portable_hash', ['dpark/portable_hash.c'])]
cmdclass = {}
version = '0.1'
setup(name='DPark',
version=version,
description="Python clone of Spark, MapReduce like "
+"computing framework supporting iterative algorithms.",
long_description=open("README.md").read(),
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
],
keywords='dpark python mapreduce spark',
author='Davies Liu',
author_email='davies.liu@gmail.com',
license= 'BSD License',
packages=['dpark', 'dpark.moosefs'],
include_package_data=True,
zip_safe=False,
install_requires=[
'mesos.interface',
'pymesos',
'setuptools',
'pyzmq',
'msgpack-python',
'cython',
'lz4',
'psutil',
],
tests_require=[
'nose',
],
test_suite='nose.collector',
cmdclass = cmdclass,
ext_modules = ext_modules,
scripts = [
'tools/drun',
'tools/mrun',
'tools/executor.py',
'tools/scheduler.py',
'tools/dquery',
'examples/dgrep',
]
)
| bsd-3-clause |
tectronics/madcow | contrib/django-memebot/gruntle/memebot/forms.py | 5 | 2447 | """Memebot forms"""
from django import forms
from django.contrib.auth.models import User
from django.db.models import Q
from gruntle.memebot.models import Link
class EditProfileForm(forms.ModelForm):
"""Form for updating your user profile"""
password_opts = {'min_length': 3, 'max_length': 128, 'widget': forms.PasswordInput, 'required': False}
password1 = forms.CharField(label='New Password', **password_opts)
password2 = forms.CharField(label='Confirm Password', **password_opts)
class Meta:
model = User
fields = 'email', 'first_name', 'last_name'
def clean_password2(self):
"""Verify password matches"""
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError("Doesn't match password")
return self.cleaned_data['password2']
def save(self, *args, **kwargs):
"""Save user instance, updated with cleaned_data"""
commit = kwargs.pop('commit', True)
kwargs['commit'] = False
user = super(EditProfileForm, self).save(*args, **kwargs)
if self.cleaned_data['password2']:
user.set_password(self.cleaned_data['password2'])
if commit:
user.save()
return user
class CheckLinkForm(forms.Form):
url = forms.URLField(label='URL', min_length=11, max_length=128, required=True,
widget=forms.TextInput(attrs={'size': 128}))
def clean_url(self):
errors = []
url = self.cleaned_data.get('url', None)
self.cleaned_data['link'] = None
if url is None:
errors.append('You must enter the URL to check')
else:
normalized = Link.objects.normalize_url(url)
links = Link.objects.filter(state='published')
links = links.filter(Q(url=url) | Q(resolved_url=url) | Q(normalized=normalized)).distinct()
links = links.order_by('published')
if links.count():
self.cleaned_data['link'] = links[0]
else:
errors.append('No results found for that URL')
if errors:
raise forms.ValidationError(errors)
return self.cleaned_data['url']
class AMPMTimeForm(forms.Form):
hour = forms.IntegerField(min_value=1, max_value=12)
minute = forms.IntegerField(min_value=0, max_value=59)
phase = forms.ChoiceField(choices=[('am', 'AM'), ('pm', 'PM')])
| gpl-3.0 |
entoo/portage-src | pym/portage/tests/__init__.py | 2 | 9588 | # tests/__init__.py -- Portage Unit Test functionality
# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import sys
import time
import unittest
try:
from unittest.runner import _TextTestResult # new in python-2.7
except ImportError:
from unittest import _TextTestResult
try:
# They added the skip framework to python-2.7.
# Drop this once we drop python-2.6 support.
unittest_skip_shims = False
import unittest.SkipTest as SkipTest # new in python-2.7
except ImportError:
unittest_skip_shims = True
import portage
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage.util._argparse import ArgumentParser
def main():
suite = unittest.TestSuite()
basedir = os.path.dirname(os.path.realpath(__file__))
usage = "usage: %s [options] [tests to run]" % os.path.basename(sys.argv[0])
parser = ArgumentParser(usage=usage)
parser.add_argument("-l", "--list", help="list all tests",
action="store_true", dest="list_tests")
options, args = parser.parse_known_args(args=sys.argv)
if (os.environ.get('NOCOLOR') in ('yes', 'true') or
os.environ.get('TERM') == 'dumb' or
not sys.stdout.isatty()):
portage.output.nocolor()
if options.list_tests:
testdir = os.path.dirname(sys.argv[0])
for mydir in getTestDirs(basedir):
testsubdir = os.path.basename(mydir)
for name in getTestNames(mydir):
print("%s/%s/%s.py" % (testdir, testsubdir, name))
return os.EX_OK
if len(args) > 1:
suite.addTests(getTestFromCommandLine(args[1:], basedir))
else:
for mydir in getTestDirs(basedir):
suite.addTests(getTests(os.path.join(basedir, mydir), basedir))
result = TextTestRunner(verbosity=2).run(suite)
if not result.wasSuccessful():
return 1
return os.EX_OK
def my_import(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def getTestFromCommandLine(args, base_path):
result = []
for arg in args:
realpath = os.path.realpath(arg)
path = os.path.dirname(realpath)
f = realpath[len(path)+1:]
if not f.startswith("test") or not f.endswith(".py"):
raise Exception("Invalid argument: '%s'" % arg)
mymodule = f[:-3]
result.extend(getTestsFromFiles(path, base_path, [mymodule]))
return result
def getTestDirs(base_path):
TEST_FILE = b'__test__'
testDirs = []
# the os.walk help mentions relative paths as being quirky
# I was tired of adding dirs to the list, so now we add __test__
# to each dir we want tested.
for root, dirs, files in os.walk(base_path):
try:
root = _unicode_decode(root,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
continue
if TEST_FILE in files:
testDirs.append(root)
testDirs.sort()
return testDirs
def getTestNames(path):
files = os.listdir(path)
files = [f[:-3] for f in files if f.startswith("test") and f.endswith(".py")]
files.sort()
return files
def getTestsFromFiles(path, base_path, files):
parent_path = path[len(base_path)+1:]
parent_module = ".".join(("portage", "tests", parent_path))
parent_module = parent_module.replace('/', '.')
result = []
for mymodule in files:
# Make the trailing / a . for module importing
modname = ".".join((parent_module, mymodule))
mod = my_import(modname)
result.append(unittest.TestLoader().loadTestsFromModule(mod))
return result
def getTests(path, base_path):
"""
path is the path to a given subdir ( 'portage/' for example)
This does a simple filter on files in that dir to give us modules
to import
"""
return getTestsFromFiles(path, base_path, getTestNames(path))
class TextTestResult(_TextTestResult):
"""
We need a subclass of unittest._TextTestResult to handle tests with TODO
This just adds an addTodo method that can be used to add tests
that are marked TODO; these can be displayed later
by the test runner.
"""
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__(stream, descriptions, verbosity)
self.todoed = []
self.portage_skipped = []
def addTodo(self, test, info):
self.todoed.append((test, info))
if self.showAll:
self.stream.writeln("TODO")
elif self.dots:
self.stream.write(".")
def addPortageSkip(self, test, info):
self.portage_skipped.append((test, info))
if self.showAll:
self.stream.writeln("SKIP")
elif self.dots:
self.stream.write(".")
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
self.printErrorList('TODO', self.todoed)
self.printErrorList('SKIP', self.portage_skipped)
class TestCase(unittest.TestCase):
"""
We need a way to mark a unit test as "ok to fail"
This way someone can add a broken test and mark it as failed
and then fix the code later. This may not be a great approach
(broken code!!??!11oneone) but it does happen at times.
"""
def __init__(self, *pargs, **kwargs):
unittest.TestCase.__init__(self, *pargs, **kwargs)
self.todo = False
self.portage_skip = None
def defaultTestResult(self):
return TextTestResult()
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except SystemExit:
raise
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
return
ok = False
try:
testMethod()
ok = True
except SkipTest as e:
result.addPortageSkip(self, "%s: SKIP: %s" %
(testMethod, str(e)))
except self.failureException:
if self.portage_skip is not None:
if self.portage_skip is True:
result.addPortageSkip(self, "%s: SKIP" % testMethod)
else:
result.addPortageSkip(self, "%s: SKIP: %s" %
(testMethod, self.portage_skip))
elif self.todo:
result.addTodo(self, "%s: TODO" % testMethod)
else:
result.addFailure(self, sys.exc_info())
except (KeyboardInterrupt, SystemExit):
raise
except:
result.addError(self, sys.exc_info())
try:
self.tearDown()
except SystemExit:
raise
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
ok = False
if ok:
result.addSuccess(self)
finally:
result.stopTest(self)
def assertRaisesMsg(self, msg, excClass, callableObj, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
try:
callableObj(*args, **kwargs)
except excClass:
return
else:
if hasattr(excClass, '__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise self.failureException("%s not raised: %s" % (excName, msg))
def assertExists(self, path):
"""Make sure |path| exists"""
if not os.path.exists(path):
msg = ['path is missing: %s' % (path,)]
while path != '/':
path = os.path.dirname(path)
if not path:
# If we're given something like "foo", abort once we get to "".
break
result = os.path.exists(path)
msg.append('\tos.path.exists(%s): %s' % (path, result))
if result:
msg.append('\tcontents: %r' % os.listdir(path))
break
raise self.failureException('\n'.join(msg))
def assertNotExists(self, path):
"""Make sure |path| does not exist"""
if os.path.exists(path):
raise self.failureException('path exists when it should not: %s' % path)
if unittest_skip_shims:
# Shim code for <python-2.7.
class SkipTest(Exception):
"""unittest.SkipTest shim for <python-2.7"""
def skipTest(self, reason):
raise SkipTest(reason)
setattr(TestCase, 'skipTest', skipTest)
def assertIn(self, member, container, msg=None):
self.assertTrue(member in container, msg=msg)
setattr(TestCase, 'assertIn', assertIn)
def assertNotIn(self, member, container, msg=None):
self.assertFalse(member in container, msg=msg)
setattr(TestCase, 'assertNotIn', assertNotIn)
class TextTestRunner(unittest.TextTestRunner):
"""
We subclass unittest.TextTestRunner to output SKIP for tests that fail but are skippable
"""
def _makeResult(self):
return TextTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"""
Run the given test case or test suite.
"""
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed = len(result.failures)
errored = len(result.errors)
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
test_cps = ['sys-apps/portage', 'virtual/portage']
test_versions = ['1.0', '1.0-r1', '2.3_p4', '1.0_alpha57']
test_slots = [None, '1', 'gentoo-sources-2.6.17', 'spankywashere']
test_usedeps = ['foo', '-bar', ('foo', 'bar'),
('foo', '-bar'), ('foo?', '!bar?')]
| gpl-2.0 |
elit3ge/SickRage | sickrage/helper/encoding.py | 14 | 3360 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.tv
# Git: https://github.com/SiCKRAGETV/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from chardet import detect
from os import name
def ek(function, *args, **kwargs):
"""
Encoding Kludge: Call function with arguments and unicode-encode output
:param function: Function to call
:param args: Arguments for function
:param kwargs: Arguments for function
:return: Unicode-converted function output (string, list or tuple, depends on input)
"""
if name == 'nt':
result = function(*args, **kwargs)
else:
result = function(*[ss(x) if isinstance(x, (str, unicode)) else x for x in args], **kwargs)
if isinstance(result, (list, tuple)):
return _fix_list_encoding(result)
if isinstance(result, str):
return _to_unicode(result)
return result
def ss(var):
"""
Converts string to Unicode, fallback encoding is forced UTF-8
:param var: String to convert
:return: Converted string
"""
var = _to_unicode(var)
try:
var = var.encode(sickbeard.SYS_ENCODING)
except Exception:
try:
var = var.encode('utf-8')
except Exception:
try:
var = var.encode(sickbeard.SYS_ENCODING, 'replace')
except Exception:
var = var.encode('utf-8', 'ignore')
return var
def _fix_list_encoding(var):
"""
Converts each item in a list to Unicode
:param var: List or tuple to convert to Unicode
:return: Unicode converted input
"""
if isinstance(var, (list, tuple)):
return filter(lambda x: x is not None, map(_to_unicode, var))
return var
def _to_unicode(var):
"""
Converts string to Unicode, using in order: UTF-8, Latin-1, System encoding or finally what chardet wants
:param var: String to convert
:return: Converted string as unicode, fallback is System encoding
"""
if isinstance(var, str):
try:
var = unicode(var)
except Exception:
try:
var = unicode(var, 'utf-8')
except Exception:
try:
var = unicode(var, 'latin-1')
except Exception:
try:
var = unicode(var, sickbeard.SYS_ENCODING)
except Exception:
try:
# Chardet can be wrong, so try it last
var = unicode(var, detect(var).get('encoding'))
except Exception:
var = unicode(var, sickbeard.SYS_ENCODING, 'replace')
return var
| gpl-3.0 |
grahamhayes/designate | designate/storage/impl_sqlalchemy/migrate_repo/versions/057_placeholder.py | 39 | 1036 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass
| apache-2.0 |
kmacinnis/sympy | sympy/combinatorics/tests/test_graycode.py | 115 | 2173 | from sympy.combinatorics.graycode import (GrayCode, bin_to_gray,
random_bitstring, get_subset_from_bitstring, graycode_subsets)
def test_graycode():
g = GrayCode(2)
got = []
for i in g.generate_gray():
if i.startswith('0'):
g.skip()
got.append(i)
assert got == '00 11 10'.split()
a = GrayCode(6)
assert a.current == '0'*6
assert a.rank == 0
assert len(list(a.generate_gray())) == 64
codes = ['011001', '011011', '011010',
'011110', '011111', '011101', '011100', '010100', '010101', '010111',
'010110', '010010', '010011', '010001', '010000', '110000', '110001',
'110011', '110010', '110110', '110111', '110101', '110100', '111100',
'111101', '111111', '111110', '111010', '111011', '111001', '111000',
'101000', '101001', '101011', '101010', '101110', '101111', '101101',
'101100', '100100', '100101', '100111', '100110', '100010', '100011',
'100001', '100000']
assert list(a.generate_gray(start='011001')) == codes
assert list(
a.generate_gray(rank=GrayCode(6, start='011001').rank)) == codes
assert a.next().current == '000001'
assert a.next(2).current == '000011'
assert a.next(-1).current == '100000'
a = GrayCode(5, start='10010')
assert a.rank == 28
a = GrayCode(6, start='101000')
assert a.rank == 48
assert GrayCode(6, rank=4).current == '000110'
assert GrayCode(6, rank=4).rank == 4
assert [GrayCode(4, start=s).rank for s in
GrayCode(4).generate_gray()] == [0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15]
a = GrayCode(15, rank=15)
assert a.current == '000000000001000'
assert bin_to_gray('111') == '100'
a = random_bitstring(5)
assert type(a) is str
assert len(a) == 5
assert all(i in ['0', '1'] for i in a)
assert get_subset_from_bitstring(
['a', 'b', 'c', 'd'], '0011') == ['c', 'd']
assert get_subset_from_bitstring('abcd', '1001') == ['a', 'd']
assert list(graycode_subsets(['a', 'b', 'c'])) == \
[[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'],
['a', 'c'], ['a']]
| bsd-3-clause |
laslabs/odoo | addons/event_sale/models/event.py | 9 | 8716 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
from openerp.osv import fields as old_fields
class event_event(models.Model):
_inherit = 'event.event'
event_ticket_ids = fields.One2many(
'event.event.ticket', 'event_id', string='Event Ticket',
default=lambda rec: rec._default_tickets(), copy=True)
@api.model
def _default_tickets(self):
try:
product = self.env.ref('event_sale.product_product_event')
return [{
'name': _('Subscription'),
'product_id': product.id,
'price': 0,
}]
except ValueError:
return self.env['event.event.ticket']
class event_ticket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
name = fields.Char('Name', required=True, translate=True)
event_id = fields.Many2one('event.event', "Event", required=True, ondelete='cascade')
product_id = fields.Many2one(
'product.product', 'Product',
required=True, domain=["|", ("event_type_id", "!=", False), ("event_ok", "=", True)],
default=lambda self: self._default_product_id())
registration_ids = fields.One2many('event.registration', 'event_ticket_id', 'Registrations')
price = fields.Float('Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date("Sales End")
is_expired = fields.Boolean('Is Expired', compute='_is_expired')
@api.model
def _default_product_id(self):
try:
product = self.env['ir.model.data'].get_object('event_sale', 'product_product_event')
return product.id
except ValueError:
return False
@api.one
@api.depends('deadline')
def _is_expired(self):
if self.deadline:
current_date = fields.Date.context_today(self.with_context({'tz': self.event_id.date_tz}))
self.is_expired = self.deadline < current_date
else:
self.is_expired = False
# FIXME non-stored fields wont ends up in _columns (and thus _all_columns), which forbid them
# to be used in qweb views. Waiting a fix, we create an old function field directly.
"""
price_reduce = fields.Float("Price Reduce", compute="_get_price_reduce", store=False,
digits=dp.get_precision('Product Price'))
@api.one
@api.depends('price', 'product_id.lst_price', 'product_id.price')
def _get_price_reduce(self):
product = self.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
self.price_reduce = (1.0 - discount) * self.price
"""
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for ticket in self.browse(cr, uid, ids, context=context):
product = ticket.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
res[ticket.id] = (1.0 - discount) * ticket.price
return res
_columns = {
'price_reduce': old_fields.function(_get_price_reduce, type='float', string='Price Reduce',
digits_compute=dp.get_precision('Product Price')),
}
# seats fields
seats_availability = fields.Selection(
[('limited', 'Limited'), ('unlimited', 'Unlimited')],
'Available Seat', required=True, store=True, compute='_compute_seats', default="limited")
seats_max = fields.Integer('Maximum Available Seats',
help="Define the number of available tickets. If you have too much registrations you will "
"not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.")
seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0 + compute seats availability
for ticket in self:
ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
# aggregate registrations by ticket and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open': 'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self._cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
# compute seats_available
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)
@api.one
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise UserError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def onchange_product_id(self):
price = self.product_id.list_price if self.product_id else 0
return {'value': {'price': price}}
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
# in addition to origin generic fields, add real relational fields to correctly
# handle attendees linked to sale orders and their lines
# TDE FIXME: maybe add an onchange on sale_order_id + origin
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order', ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line', 'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if self.event_ticket_id.seats_max and self.event_ticket_id.seats_available < 0:
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _("The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s") % ({
'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (('<i>%s</i>') % res.event_ticket_id.name) or '',
'order': res.origin or res.sale_order_id.name})
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({
'event_id': line_id.event_id.id,
'event_id': line_id.event_id.id,
'event_ticket_id': line_id.event_ticket_id.id,
'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id,
'sale_order_line_id': line_id.id,
})
return att_data
| agpl-3.0 |
jcode89/WebDev-Blog | BlogProject/app/views.py | 1 | 1099 | from flask import render_template, flash, redirect
from app import app
from .forms import LoginForm
@app.route('/')
@app.route('/index')
def index():
user = {'nickname': 'Johnny'} # fake user
posts = [ # Fake array of posts
{
'author': {'nickname': 'Michael'},
'body': 'Beautiful day in Portland'
},
{
'author': {'nickname': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html',
title='Home',
user=user,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
flash('Login requested for OpenID="%s", remember_me=%s' %
(form.openid.data, str(form.remember_me.data)))
return redirect('/index')
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS']) | gpl-2.0 |
alazaro/lymph | lymph/tests/integration/test_web_interface.py | 1 | 2101 | import mock
import unittest
from werkzeug.routing import Map, Rule
from werkzeug.test import Client
from werkzeug.wrappers import Response, BaseResponse
from lymph.testing import WebServiceTestCase
from lymph.web.interfaces import WebServiceInterface
from lymph.web.handlers import RequestHandler
from lymph.web.routing import HandledRule
class RuleHandler(RequestHandler):
def get(self):
return Response("Rule Handler")
class HandledRuleHandler(RequestHandler):
def get(self):
return Response("Handled Rule Handler")
class Web(WebServiceInterface):
url_map = Map([
Rule("/test/", endpoint="test"),
Rule("/foo/", endpoint=RuleHandler),
HandledRule("/bar/", endpoint="bar", handler=HandledRuleHandler),
Rule("/fail/", endpoint="fail"),
Rule("/fail-wrong-endpoint/", endpoint=42),
])
def test(self, request):
return Response("method test")
class WebIntegrationTest(WebServiceTestCase):
service_class = Web
def test_dispatch_rule_with_string_endpoint(self):
response = self.client.get("/test/")
self.assertEqual(response.data.decode("utf8"), "method test")
self.assertEqual(response.status_code, 200)
def test_dispatch_rule_with_callable_endpoint(self):
response = self.client.get("/foo/")
self.assertEqual(response.data.decode("utf8"), "Rule Handler")
self.assertEqual(response.status_code, 200)
def test_dispatch_handled_rule(self):
response = self.client.get("/bar/")
self.assertEqual(response.data.decode("utf8"), "Handled Rule Handler")
self.assertEqual(response.status_code, 200)
def test_dispatch_failing_rule_to_500(self):
response = self.client.get("/fail/")
self.assertEqual(response.data.decode("utf8"), "")
self.assertEqual(response.status_code, 500)
def test_dispatch_failing_endpoint_to_500(self):
response = self.client.get("/fail-wrong-endpoint/")
self.assertEqual(response.data.decode("utf8"), "")
self.assertEqual(response.status_code, 500)
| apache-2.0 |
adrianschlatter/python-ivi | ivi/agilent/agilentMSO7054B.py | 7 | 1689 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent7000B import *
class agilentMSO7054B(agilent7000B):
"Agilent InfiniiVision MSO7054B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO7054B')
super(agilentMSO7054B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 500e6
self._init_channels()
| mit |
Hasimir/pyjs | examples/kitchensink/sink/Lists.py | 6 | 2410 | from pyjamas.ui.Sink import Sink, SinkInfo
from pyjamas.ui.ListBox import ListBox
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui import HasAlignment
from pyjamas.ui.Label import Label
from pyjamas.ui.Widget import Widget
class Lists(Sink):
def __init__(self):
Sink.__init__(self)
self.sStrings=[["foo0", "bar0", "baz0", "toto0", "tintin0"],
["foo1", "bar1", "baz1", "toto1", "tintin1"],
["foo2", "bar2", "baz2", "toto2", "tintin2"],
["foo3", "bar3", "baz3", "toto3", "tintin3"],
["foo4", "bar4", "baz4", "toto4", "tintin4"]]
self.combo=ListBox(VisibleItemCount=1)
self.list=ListBox(MultipleSelect=True, VisibleItemCount=10)
self.echo=Label()
self.combo.addChangeListener(self)
for i in range(len(self.sStrings)):
txt = "List %d" % i
self.combo.addItem(txt)
# test setItemText
self.combo.setItemText(i, txt + " using set text")
self.combo.setSelectedIndex(0)
self.fillList(0)
self.list.setItemSelected(0, False)
self.list.setItemSelected(1, True)
self.list.addChangeListener(self)
horz = HorizontalPanel(VerticalAlignment=HasAlignment.ALIGN_TOP,
Spacing=8)
horz.add(self.combo)
horz.add(self.list)
panel = VerticalPanel(HorizontalAlignment=HasAlignment.ALIGN_LEFT)
panel.add(horz)
panel.add(self.echo)
self.initWidget(panel)
self.echoSelection()
def onChange(self, sender):
if sender == self.combo:
self.fillList(self.combo.getSelectedIndex())
elif sender == self.list:
self.echoSelection()
def onShow(self):
pass
def fillList(self, idx):
self.list.clear()
strings = self.sStrings[idx]
for i in range(len(strings)):
self.list.addItem(strings[i])
self.echoSelection()
def echoSelection(self):
msg = "Selected items: "
for i in range(self.list.getItemCount()):
if self.list.isItemSelected(i):
msg += self.list.getItemText(i) + " "
self.echo.setText(msg)
def init():
text="Here is the ListBox widget in its two major forms."
return SinkInfo("Lists", text, Lists)
| apache-2.0 |
malecka1/BI-SKJ-project | malecka1.py | 1 | 40099 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' Program implements BI-SKJ LS2014/2015 semestral project. For more info read "README.txt" file. '''
__author__ = "Kamil Maleček"
__license__ = "MIT"
__version__ = "1.0"
import argparse, os, re, itertools, sys, signal, shutil, tempfile, urllib.request, subprocess, math
g_dirName = None # output directory, global because of cleanup
g_FinalInputFiles = None # prepared files for Gnuplot, global because of cleanup
class CGraph:
''' CGraph class represents data for graph. '''
def __init__(self, TimeFormat, Xmax, Xmin, Ymax, Ymin, Speed, Time, FPS, CriticalValue, Legend, GnuplotParams, EffectParams, Name, InputFiles):
''' Constructor of the CGraph class. '''
self.m_TimeFormat = TimeFormat
self.m_Xmax = Xmax
self.m_Xmin = Xmin
self.m_Ymax = Ymax
self.m_Ymin = Ymin
self.m_Speed = Speed
self.m_Time = Time
self.m_FPS = FPS
self.m_CriticalValue = CriticalValue
self.m_Legend = Legend
self.m_GnuplotParams = GnuplotParams
self.m_EffectParams = EffectParams
self.m_Name = Name
self.m_InputFiles = InputFiles
def printInstance(self):
''' Prints CGraph instance for debug purpose. '''
print("----------- After input -----------") # stderr: file=sys.stderr
print("TF: {0}\nXmax:Xmin: {1}:{2}\nYmax:Ymin {3}:{4}\nSPEED: {5}\nTime: {6} \nFPS: {7}\nCriticalValue: {8}\nLegend: {9}\nGnuplotP: {10}\nEffectP: {11}\nName: {12}\nInput Files: {13}\n-----------------------------------".format(self.m_TimeFormat, self.m_Xmax, self.m_Xmin, self.m_Ymax, self.m_Ymin, self.m_Speed, self.m_Time, self.m_FPS, self.m_CriticalValue, self.m_Legend, self.m_GnuplotParams, self.m_EffectParams, self.m_Name, self.m_InputFiles))
class UniqueStore(argparse.Action):
''' Class for multiple argument occurences error for argparse, source: 'http://stackoverflow.com/a/23032953'. '''
def __call__(self, parser, namespace, values, option_string):
if getattr(namespace, self.dest, self.default) is not None:
parser.error(option_string + " appears several times")
setattr(namespace, self.dest, values)
def cleanup():
''' Function deletes output folder with all files. '''
shutil.rmtree(g_dirName, ignore_errors=True)
if g_FinalInputFiles:
for f in g_FinalInputFiles:
os.remove(f)
def signalHandler(signum, frame):
''' Function catches signals and delete all files. Exit code '3' means signal received.'''
cleanup()
print("Recieved signal: {0}, cleaned up.".format(signum), file=sys.stderr)
sys.exit(3) # return code 3 = signal
def isValid3c(parser, x):
''' Function checks timestamp format (implemented %[YymdHMS]). '''
impl = "YymdHMS" # implemented symbols
occ = [None]*7 # idicates multiple occurence of symbols
pos = x.find('%', 0) # finds first %
if pos != -1:
i = 0
while pos != -1:
if pos+1 == len(x): # last char is %
parser.error("argument -t: invalid value: \"{0}\"".format(x[pos:]))
if x[pos+1] == impl[0]:
if occ[0] != None:
parser.error("argument -t: multiple \'%Y\' occurence")
else:
if occ[1] != None:
parser.error("argument -t: used \'%Y\' and \'%y\', use just one year specification")
occ[0] = 1
elif x[pos+1] == impl[1]:
if occ[1] != None:
parser.error("argument -t: multiple \'%y\' occurence")
else:
if occ[0] != None:
parser.error("argument -t: used \'%Y\' and \'%y\', use just one year specification")
occ[1] = 1
elif x[pos+1] == impl[2]:
if occ[2] != None:
parser.error("argument -t: multiple \'%m\' occurence")
else:
occ[2] = 1
elif x[pos+1] == impl[3]:
if occ[3] != None:
parser.error("argument -t: multiple \'%d\' occurence")
else:
occ[3] = 1
elif x[pos+1] == impl[4]:
if occ[4] != None:
parser.error("argument -t: multiple \'%H\' occurence")
else:
occ[4] = 1
elif x[pos+1] == impl[5]:
if occ[5] != None:
parser.error("argument -t: multiple \'%M\' occurence")
else:
occ[5] = 1
elif x[pos+1] == impl[6]:
if occ[6] != None:
parser.error("argument -t: multiple \'%S\' occurence")
else:
occ[6] = 1
else:
parser.error("argument -t: unsupported value: \"{0}\"".format('%'+x[pos+1]))
i = pos + 2
pos = x.find('%', i)
else: # has to be at least one %x
parser.error("argument -t: invalid value: \"{0}\"".format(x))
return x
def isValidMinMax(parser, x, arg):
''' Function validates X and Y min/max values. '''
if x == "auto":
return x
elif (arg == '-X' or arg == '-Y') and x == "max": # args with max
return x
elif (arg == '-x' or arg == '-y') and x == "min":
return x
elif arg == '-Y' or arg == '-y': # y values are always float
try:
return float(x) # check float
except ValueError: # err mesg
if arg == '-X' or arg == '-Y':
parser.error("argument {0}: invalid choice: {1} (choose from \"auto\", \"max\", int/float)".format(arg,x))
else:
parser.error("argument {0}: invalid choice: {1} (choose from \"auto\", \"min\", int/float)".format(arg,x))
else: # x values are not checked in the moment
return x
def isValidSTP(parser, x, arg):
''' Function validates Speed, Time and FPS values. '''
try:
if float(x) <= 0:
parser.error("argument {0}: negative or zero value: \'{1}\'".format(arg, x))
except ValueError:
parser.error("argument {0}: invalid value: \'{1}\'".format(arg, x))
return float(x)
def isValidCrit(parser, x):
''' Function checks and parses critical values into list. '''
i = 0
y = [] # output list
pos = x.find('y',0)
pos2 = x.find('x', 0)
if (pos2 < pos and pos2 != -1) or pos == -1: # what is first
pos = pos2
while pos != -1:
j = pos+1 # j is start of the next item
pos = x.find('y',j)
pos2 = x.find('x',j)
if (pos2 < pos and pos2 != -1) or pos == -1:
pos = pos2
if pos == -1: # last value
break
if re.search(r'^y=(-?0|-?[1-9][0-9]*)($|\.\d+$)', x[i:pos-1]) or re.search(r'^x=.*\d+.*$', x[i:pos-1]): # using regex
y.append(x[i:pos-1])
else:
parser.error("argument -c: invalid value: \'{0}\'".format(x[i:pos+2]))
i = pos
if re.search(r'^y=(-?0|-?[1-9][0-9]*)($|\.\d+$)', x[i:]) or re.search(r'^x=.*\d+.*$', x[i:]):
y.append(x[i:])
else:
parser.error("argument -c: invalid value: \'{0}\'".format(x[i:]))
return y
def isValidEffect(parser, x):
''' Function validates effect parameter (integer). '''
try:
if int(x) < 0:
parser.error("argument -e: negative value: \'{0}\'".format(x))
except ValueError:
parser.error("argument -e: invalid value: \'{0}\', must be an integer".format(x))
return x
def isValidFile(parser, x):
''' Function checks path and readability of the configuration file. '''
if not os.path.isfile(x):
parser.error("File \"{0}\" does not exists.".format(x))
elif not os.access(x, os.R_OK):
parser.error("File \"{0}\" is not readable.".format(x))
return x
def isValidNameDir(x):
''' Function finds correct name of dir for output. '''
if os.path.exists(x):
a = [f for f in os.listdir() if re.search(r'^'+x+r'_[1-9][0-9]*$', f)] # get list of current valid names
if a:
m = 0;
for i in a:
if int(i[len(x)+1:]) > m: # get int
m = int(i[len(x)+1:])
x = x + '_' + str(m+1)
else:
x = x + "_1"
return x
def parseFile(parser, args):
''' Function gets arguments from the configuration file (lower priority than cmd line args). '''
val = [None]*11 # indicate arg in cmd line
with open(args.f, mode='r', encoding='utf-8') as f: # properly closes file everytime
l = 0 # nr of line
for line in f: # iterate through lines
l+=1
found = line.find("#") # find first # from the end
if found != -1: # cut off comment #.....
line = line[:found]
if not line.strip(): # ignore whitespace lines
continue
line = line.strip() # cut off whitespace inlcuding \n
found = re.search(r'( |\t)', line) # find first ' ' or \t from left = delimiter
if not found: # no delimiter between directive and value
parser.error("config file error: no delimiter between directive and value on line {0}".format(l))
substr = line[:found.start()] # directive
substr = substr.lower() # case insensitive
line = line[found.end():] # rest of the line = directive's value
line = line.lstrip() # if there is more ' '\t as delimiter
#print(substr, "|", line, "|") # debug output
if substr == "timeformat": # validate config file values
val[0] = isValid3c(parser, line)
elif substr == "xmax":
val[1] = isValidMinMax(parser, line, '-X')
elif substr == "xmin":
val[2] = isValidMinMax(parser, line, '-x')
elif substr == "ymax":
val[3] = isValidMinMax(parser, line, '-Y')
elif substr == "ymin":
val[4] = isValidMinMax(parser, line, '-y')
elif substr == "speed":
val[5] = isValidSTP(parser, line, '-S')
elif substr == "time":
val[6] = isValidSTP(parser, line, '-T')
elif substr == "fps":
val[7] = isValidSTP(parser, line, '-F')
elif substr == "criticalvalue":
if args.c == None:
args.c = []
args.c.append(isValidCrit(parser, line))
elif substr == "legend":
val[8] = line
elif substr == "gnuplotparams":
if args.g == None:
args.g = []
args.g.append(line)
elif substr == "effectparams":
val[9] = isValidEffect(parser, line)
elif substr == "name":
val[10] = isValidNameDir(line)
else:
parser.error("config file error: invalid directive on line {0}".format(l))
if args.t == None: # use values from the file (or None) now if the argument wasn't specified
args.t = val[0]
if args.X == None:
args.X = val[1]
if args.x == None:
args.x = val[2]
if args.Y == None:
args.Y = val[3]
if args.y == None:
args.y = val[4]
if args.S == None:
args.S = val[5]
if args.T == None:
args.T = val[6]
if args.F == None:
args.F = val[7]
if args.l == None:
args.l = val[8]
if args.e == None:
args.e = val[9]
if args.n == None:
args.n = val[10]
def checkX(t, x):
''' Function checks if x value has format according to argument t. Returns -1 for error. '''
d1 = (31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) # days
prestupny = None
den = None # indicates what has been already checked
mesic = None
rok = None
a = 0 # position in t
b = 0 # position in x
#print ("|", t, "|", x, "|") # debug output with format and value to check
while a < len(t): # go through -t image
if b >= len(x): # x is shorter than has to be according -t spec
return -1
if t[a] == '%':
try: # check valid value \d
if t[a+1] != 'Y': # xx (two digits)
if b+2 > len(x): # x is shorter than has to be for -ymdHMS
return -1
if t[a+1] == 'y':
rok = int(x[b:b+2])
elif t[a+1] == 'H':
hod = int(x[b:b+2])
if hod < 0 or hod > 24: # according timefmt specs in Gnuplot
return -1
elif t[a+1] == 'M':
minu = int(x[b:b+2])
if minu < 0 or minu > 60:
return -1
elif t[a+1] == 'S':
sec = int(x[b:b+2])
if sec < 0 or sec > 60:
return -1
elif t[a+1] == 'm':
mon = int(x[b:b+2])
if mon < 1 or mon > 12:
return -1
mesic = mon
else: # days according to month and year later
day = int(x[b:b+2])
if day < 1 or day > 31:
return -1
den = day
b+=2
else: # -Y -> xxxx (4 digits)
if b+4 > len(x): # x is shorter than has to be for -Y
return -1
rok = int(x[b:b+4])
b+=4
a+=2 # jump over %x in time format (always 2 chars)
continue
except ValueError: # not integer
return -1
if t[a] is not x[b]: # some other symbol in -t spec
return -1
a+=1 # next iteration
b+=1
if b != len(x): # x is longer than has to be according t spec
return -1
if mesic is not None and den is not None: # it's time to check valid day
if rok is None and den > d1[mesic-1]: # easiest - without year
return -1
else: # with year
if rok % 4 == 0: # leap year?
if rok % 100 == 0 and rok % 400 != 0:
prestupny = False
else:
prestupny = True
else:
prestupny = False
if not prestupny:
if mesic == 2 and den > 28: # February in not leap year
return -1
elif den > d1[mesic-1]: # other months
return -1
elif day > d1[mesic-1]: # leap year
return -1
#print(prestupny, den, mesic, rok) # debug output
def myCompare(x1, x2, t):
''' Compare function for Insertion sort, values stored at [1] index. Returns -1 if x1 is lower than x2, 0 for equal items and otherwise returns 1. '''
#print(x1, x2, t) # debug output
if x1 == x2: # same x values
return 0
# t is time format, priority: %Y %y %m %d %H %M %S
pos = t.find('%Y')
posOfY = None # remmember this position, because it has 2 more chars!
if pos is not -1:
posOfY = pos
sub1 = x1[1][pos:pos+4]
sub2 = x2[1][pos:pos+4]
if sub1 < sub2:
return -1
elif sub1 > sub2:
return 1
pos = t.find('%y') # can't be %Y and %y (handled in isValid3c)
if pos is not -1:
sub1 = x1[1][pos:pos+2]
sub2 = x2[1][pos:pos+2]
if sub1 < sub2:
return -1
elif sub1 > sub2:
return 1
pos = t.find('%m') # from now check if %Y was before
if pos is not -1:
if posOfY and pos > posOfY: # if %Y was before
sub1 = x1[1][pos+2:pos+4]
sub2 = x2[1][pos+2:pos+4]
else: # if %Y wasn't before or wasn't at all
sub1 = x1[1][pos:pos+2]
sub2 = x2[1][pos:pos+2]
if sub1 < sub2:
return -1
elif sub1 > sub2:
return 1
pos = t.find('%d')
if pos is not -1:
if posOfY and pos > posOfY:
sub1 = x1[1][pos+2:pos+4]
sub2 = x2[1][pos+2:pos+4]
else:
sub1 = x1[1][pos:pos+2]
sub2 = x2[1][pos:pos+2]
if sub1 < sub2:
return -1
elif sub1 > sub2:
return 1
pos = t.find('%H')
if pos is not -1:
if posOfY and pos > posOfY:
sub1 = x1[1][pos+2:pos+4]
sub2 = x2[1][pos+2:pos+4]
else:
sub1 = x1[1][pos:pos+2]
sub2 = x2[1][pos:pos+2]
if sub1 < sub2:
return -1
elif sub1 > sub2:
return 1
pos = t.find('%M')
if pos is not -1:
if posOfY and pos > posOfY:
sub1 = x1[1][pos+2:pos+4]
sub2 = x2[1][pos+2:pos+4]
else:
sub1 = x1[1][pos:pos+2]
sub2 = x2[1][pos:pos+2]
if sub1 < sub2:
return -1
elif sub1 > sub2:
return 1
pos = t.find('%S')
if pos is not -1:
if posOfY and pos > posOfY:
sub1 = x1[1][pos+2:pos+4]
sub2 = x2[1][pos+2:pos+4]
else:
sub1 = x1[1][pos:pos+2]
sub2 = x2[1][pos:pos+2]
if sub1 < sub2:
return -1
elif sub1 > sub2:
return 1
return 0
def InsSort(lines, t):
''' Function implements Insertion sort for list of (originalIndex, timeValue) and returns new index order. '''
for i in range(1,len(lines)): # take item from the left to the right and find his correct position and swap
key = lines[i]
j = i
tmp = myCompare(lines[j-1], key, t)
if tmp == 0: # same values
return None
while j > 0 and tmp == 1:
lines[j] = lines[j-1]
j-=1
lines[j] = key
tmp = myCompare(lines[j-1], key, t)
if tmp == 0:
return None
return lines
def formatCrit(parser, args):
''' Function checks -c values (especially axe x's) and prepares them. '''
x = [] # list for x values
y = [] # list for y values
for i in itertools.chain.from_iterable(args.c):
if i[0] == 'y':
y.append(float(i[2:]))
else: # x values
if checkX(args.t, i[2:]) == -1:
parser.error("argument -c: invalid x format: \"{0}\"".format(i[2:]))
x.append(i[2:])
x = list(set(x)) # remove duplicates, random order
tmpList = []
for i in range(0, len(x)): # list for x-sort (index, x value)
tmpList.append((i, x[i]))
tmpList = InsSort(tmpList, args.t) # insertion sort
i = 0
x = []
while i < len(tmpList): # [i][1] values are sorted original x values
x.append(tmpList[i][1])
i+=1
#print("-c x sorted:", x) # debug output, shows sorted x values
args.c = (x, sorted(list(set(y)))) # tuple due to guaranteed order (x, y)
def defaultsAndCheck(parser, args):
''' Function makes another arguments check and sets default values (after all possible inputs). '''
if args.t is None:
args.t = "[%Y-%m-%d %H:%M:%S]"
if args.X is None:
args.X = "max"
if args.x is None:
args.x = "min"
if args.X != "max" and args.X != "auto":
if checkX(args.t, args.X) == -1:
parser.error("argument -X: invalid Xmax format: \"{0}\"".format(args.X))
if args.x != "min" and args.x != "auto":
if checkX(args.t, args.x) == -1:
parser.error("argument -x: invalid Xmin format: \"{0}\"".format(args.x))
if args.X != "max" and args.X != "auto":
a = myCompare((0, args.X), (0, args.x), args.t)
if a == -1 or a == 0:
parser.error("value of Xmax is not higher than xmin")
if args.Y is None:
args.Y = "auto"
if args.y is None:
args.y = "auto"
if type(args.Y) is float and type(args.y) is float and args.Y <= args.y:
parser.error("value of Ymax is not higher than ymin")
if args.c is not None: # check format and organize -c values
formatCrit(parser, args)
if args.e is None:
args.e = 0
if args.n is None: # set default name
args.n = isValidNameDir(parser.prog)
def getArgsAndRetInstance():
''' Function parses arguments and returns new CGraph instance. Exit code '2' means invalid command line arguments or directives in the configuration file. '''
parser = argparse.ArgumentParser(description='This script creates animation in Gnuplot according to command-line arguments or configuration file.')
# command-line params have priority over the configuration file
parser.add_argument('-t', type=lambda x: isValid3c(parser, x), metavar="%{YymdHMS}", action=UniqueStore, help="timestamp format")
parser.add_argument('-X', type=lambda x: isValidMinMax(parser, x, '-X'), metavar="{\"auto\",\"max\",int/float}", action=UniqueStore, help="x-max")
parser.add_argument('-x', type=lambda x: isValidMinMax(parser, x, '-x'), metavar="{\"auto\",\"min\",int/float}", action=UniqueStore, help="x-min")
parser.add_argument('-Y', type=lambda x: isValidMinMax(parser, x, '-Y'), metavar="{\"auto\",\"max\",int/float}", action=UniqueStore, help="y-max")
parser.add_argument('-y', type=lambda x: isValidMinMax(parser, x, '-y'), metavar="{\"auto\",\"min\",int/float}", action=UniqueStore, help="y-min")
parser.add_argument('-S', type=lambda x: isValidSTP(parser, x, '-S'), metavar="int/float", action=UniqueStore, help="speed")
parser.add_argument('-T', type=lambda x: isValidSTP(parser, x, '-T'), metavar="int/float", action=UniqueStore, help="time (duration)")
parser.add_argument('-F', type=lambda x: isValidSTP(parser, x, '-F'), metavar="int/float", action=UniqueStore,help="fps")
parser.add_argument('-c', type=lambda x: isValidCrit(parser, x), metavar="x/y=int/float", action="append", help="critical values, separate by \':\'")
parser.add_argument('-l', type=str, metavar="text", action=UniqueStore, help="legend")
parser.add_argument('-g', metavar="parameter", action="append", help="Gnuplot parameter")
parser.add_argument('-e', type=lambda x: isValidEffect(parser, x), metavar="int", action=UniqueStore, help="count of animated points")
parser.add_argument('-f', type=lambda x: isValidFile(parser, x), metavar="pathname", action=UniqueStore, help="config file")
parser.add_argument('-n', type=lambda x: isValidNameDir(x), metavar="text", action=UniqueStore, help="name")
parser.add_argument("file", metavar="FILE", nargs='+', help="input file or url")
args = parser.parse_args() # get values of args
if args.f is not None: # file - lower priority
parseFile(parser, args)
defaultsAndCheck(parser, args) # another input control (except input files) and setting defaults
y = [] # input file list
[y.append(x) for x in args.file if not x in y] # remove duplicities from input files list and keep the order!
args.file = y
return CGraph(args.t, args.X, args.x, args.Y, args.y, args.S, args.T, args.F, args.c, args.l, args.g, args.e, args.n, args.file)
def prepareInputFiles(self):
''' Function downloads (http(s)), checks, organizes input files. Error code '1' means input file error. '''
global g_FinalInputFiles # global -> local
g_FinalInputFiles = [] # new list for prepared, sorted input files (= references to temp files)
xmax = None # get values from files
xmin = None
ymax = None
ymin = None
for f in self.m_InputFiles:
if re.search(r'^http(s)?://.*', f) != None: # download file from url
fpT = tempfile.NamedTemporaryFile() # temporary file with name for download
try:
with urllib.request.urlopen(f) as stream: # download file to tmp
fpT.write(stream.read())
except urllib.error.HTTPError as e: # not responding
print("Input file error: \"{0}\", error code:".format(f), e.code, file=sys.stderr)
cleanup()
sys.exit(1) # return code 1 = error with input files
except urllib.error.URLError as e: # server not available
print("Input file error: \"{0}\", reason:".format(f), e.reason, file=sys.stderr)
cleanup()
sys.exit(1)
f2 = fpT.name # get name of the local tmp file '/tmp/xxxx'
else: # local file
if not os.path.isfile(f):
print("Input file error: file \"{0}\" does not exist".format(f), file=sys.stderr)
cleanup()
sys.exit(1)
elif not os.access(f, os.R_OK): # readability
print("Input file error: file \'{0}\" is not readable".format(f), file=sys.stderr)
cleanup()
sys.exit(1)
elif os.stat(f).st_size == 0: # empty file
print("Input file error: file \"{0}\" is empty".format(f), file=sys.stderr)
cleanup()
sys.exit(1)
f2 = f
try:
with open(f2, encoding='utf-8') as soubor:
i = 0 # nr of line
forSort = []
for line in soubor: # check lines' format (1)
i+=1
line = line.strip() # cut off whitespace including \n
found = re.search(r'( |\t)', line[::-1]) # find delimiter in inversed line = last ' '\t is delimiter
if not found: # no delimiter = just one column
print("Input file error: file \"{0}\", line {1} does not have two columns".format(f, i), file=sys.stderr)
cleanup()
sys.exit(1)
substr = line[:-found.end()] # x value
substr = substr.rstrip() # more ' '\t as delimiter
line = line[-found.end()+1:] # y value
try: # y value has to be int/float
if ymin:
if float(line) < ymin: # get y max/min
ymin = float(line)
else:
ymin = float(line)
if ymax:
if float(line) > ymax:
ymax = float(line)
else:
ymax = float(line)
except ValueError:
print("Input file error: file \"{0}\", line {1}: y value is not int/float".format(f, i), file=sys.stderr)
cleanup()
sys.exit(1)
#print(i, ":", substr, ":", line, ":") # debug output...parsed line of file
if checkX(self.m_TimeFormat, substr) == -1: # x value timeformat check
print("Input file error: file \"{0}\", line {1}: incorrect format".format(f, i), file=sys.stderr)
cleanup()
sys.exit(1)
forSort.append((i-1, substr)) # (line's index, x value)
linesPos = InsSort(forSort, self.m_TimeFormat) # insertion sort for lines, return new index order at [i][0] (2)
if not linesPos: # more values with the same time doesn't make sense -> error
print("Input file error: file \"{0}\": same x values in one file does not make sense".format(f), file=sys.stderr)
cleanup()
sys.exit(1)
soubor.seek(0) # reset read pointer's position
lines = soubor.readlines() # read all lines into list
fp = tempfile.NamedTemporaryFile(delete=False) # final tmp file, default r+w, delete prevention!!
i = 0
while i < len(linesPos): # write lines in accordance with the new order (index [linesPos[i][0])
line = lines[linesPos[i][0]]
line = line.lstrip() # get rid of init spaces (because of Gnuplot)
fp.write(line.encode('utf-8')) # important encode!!
i+=1
fp.close() # important
g_FinalInputFiles.append(fp.name) # remember final input file
#print("prepared file:", f, "->", fp.name) # debug output ordered files
except UnicodeDecodeError:
print("Input file error: cannot decode file (from) \"{0}\"".format(f), file=sys.stderr) # file from url
cleanup()
sys.exit(1)
i = 0 # merge input files in case of continuation (3)
while i < len(g_FinalInputFiles)-1: # index of file
with open(g_FinalInputFiles[i], mode='r+', encoding='utf-8') as f: # first file (also final file)
aline = f.readlines()
lline = aline[-1].strip() # get last line timestamp
found = re.search(r'( |\t)', lline[::-1]) # get last x
lline = lline[:-found.end()]
with open(g_FinalInputFiles[i+1], encoding='utf-8') as f2:
fline = f2.readline().strip()
found = re.search(r'( |\t)', fline[::-1]) # get first x
fline = fline[:-found.end()]
if myCompare((0, lline), (0, fline), self.m_TimeFormat) != -1: # no data to move
i+=1 # next two files
# get x max/min value from aline[0/-1] = final input file
fline = aline[0].strip() # first line
found = re.search(r'( |\t)', fline[::-1])
fline = fline[:-found.end()]
lline = aline[-1].strip() # last line
found = re.search(r'( |\t)', lline[::-1])
lline = lline[:-found.end()]
if xmax:
if myCompare((0, xmax), (0, lline), self.m_TimeFormat) == -1:
xmax = lline
else:
xmax = lline
if xmin:
if myCompare((0, xmin), (0, fline), self.m_TimeFormat) == 1:
xmin = fline
else:
xmin = fline
continue # next two files
else: # move data (merge) -> jump to the 2nd file's begin
f2.seek(0)
f.write(f2.read()) # write all data from f2's pointer
#print("file merge:", g_FinalInputFiles[i+1], "->", g_FinalInputFiles[i]) # debug output
os.remove(g_FinalInputFiles[i+1]) # remove old file f2
g_FinalInputFiles.pop(i+1) # remove item from file list
# x max/min values from the last file
with open(g_FinalInputFiles[-1], mode='r', encoding='utf-8') as f:
aline = f.readlines()
fline = aline[0].strip() # first line
found = re.search(r'( |\t)', fline[::-1])
fline = fline[:-found.end()]
lline = aline[-1].strip()
found = re.search(r'( |\t)', lline[::-1])
lline = lline[:-found.end()]
if xmax:
if myCompare((0, xmax), (0, lline), self.m_TimeFormat) == -1:
xmax = lline
else:
xmax = lline
if xmin:
if myCompare((0, xmin), (0, fline), self.m_TimeFormat) == 1:
xmin = fline
else:
xmin = fline
if self.m_Xmax == "max": # save float values
self.m_Xmax = xmax
if self.m_Xmin == "min":
self.m_Xmin = xmin
if self.m_Ymax == "max":
self.m_Ymax = ymax
if self.m_Ymin == "min":
self.m_Ymin = ymin
#print("xmax:", self.m_Xmax, "xmin:", self.m_Xmin, "ymax:", self.m_Ymax, "ymin:", self.m_Ymin) # debug output
def createAnim(self):
''' Function creates final output. Error code '4' means Gnuplot error. '''
# X/Y max/min, length of animation, legend are already known
lines = 0 # sum of all lines
linesN = [] # list of sums per files
for f in g_FinalInputFiles:
with open(f, mode='r', encoding='utf-8') as file:
linesN.append(sum(1 for line in file))
lines+=linesN[-1]
nrY = self.m_TimeFormat.count(' ') + 2 # nr of y column is at least '2'
# Time, Speed, FPS -> delay (1/100 s), speed (ceil), number of gif frames
framesNr = 0
delay = 0
if self.m_Speed and self.m_Speed > lines:
framesNr = 1
delay = 0
elif self.m_Time: # comb of all params goes here, Time
if self.m_FPS: # ceil + high FPS + not many lines -> not simple solution, Barinka said that ceil is enought
framesNr = math.ceil(self.m_Time * self.m_FPS)
if self.m_Speed and math.ceil(lines/framesNr) != int(self.m_Speed):
print("Gnuplot error: invalid combination of all Speed, FPS and Time arguments", file=sys.stderr)
cleanup()
sys.exit(4)
self.m_Speed = math.ceil(lines / framesNr)
delay = math.ceil(self.m_Time * 100 / (framesNr-1))
else: # Time + speed
if not self.m_Speed:
self.m_Speed = 1
self.m_Speed = math.ceil(self.m_Speed)
framesNr = math.ceil(lines / self.m_Speed)
delay = math.ceil(self.m_Time * 100 / (framesNr-1))
elif self.m_Speed: # Speed
self.m_Speed = math.ceil(self.m_Speed)
if not self.m_FPS:
self.m_FPS = 25
framesNr = math.ceil(lines / self.m_Speed)
delay = math.ceil(framesNr * 100 / self.m_FPS / (framesNr-1))
elif self.m_FPS: # FPS
self.m_Speed = 1
framesNr = lines
delay = math.ceil(framesNr * 100 / self.m_FPS / (framesNr-1))
else: # 3x None -> defaults: Speed + FPS
self.m_Speed = 1
framesNr = lines
delay = math.ceil(framesNr * 100 / 25 / (framesNr-1))
gfile = tempfile.NamedTemporaryFile() # file for Gnuplot, temp -> removes itself
#print("Gnuplot file:", gfile.name, "\nframes:", framesNr, "delay:", delay, "speed:", self.m_Speed) # debug output
with open(gfile.name, mode='w', encoding='utf-8') as f:
f.write("reset\nset timefmt \""+str(self.m_TimeFormat)+"\"\nset xdata time\nframes="+str(framesNr)+"\nj="+str(self.m_Speed)+" # = speed\ni=1\n") # common settings
if self.m_GnuplotParams: # Gnuplot params
for param in self.m_GnuplotParams:
f.write("set " + str(param) + '\n')
if self.m_Legend: # legend = graph title
f.write("set title \"" + str(self.m_Legend) + "\"\n")
if self.m_Xmax == "auto": # x autoscale
f.write("set xrange [:*]\n") # autoscale xmax
else: # custom xmax xrange
f.write("set xrange [:\"" + str(self.m_Xmax) + "\"]\n")
if self.m_Xmin == "auto":
f.write("set xrange [*:]\n")
else:
f.write("set xrange [\"" + str(self.m_Xmin) + "\":]\n")
if self.m_Ymax == "auto": # y autoscale
f.write("set yrange [:*]\n")
else: # custom ymax yrange
f.write("set yrange [:\"" + str(self.m_Ymax) + "\"]\n")
if self.m_Ymin == "auto":
f.write("set yrange [*:]\n")
else:
f.write("set yrange [\"" + str(self.m_Ymin) + "\":]\n")
f.write("set terminal unknown # disable showing upcomming test-plot\nplot")
s = 0
for i in g_FinalInputFiles: # test plot to unknown output, then set correct output term & dir
f.write(" '"+str(i)+"' using 1:($0+"+str(s)+" < j ? $"+str(nrY)+" : 1/0) notitle,")
s+=linesN[g_FinalInputFiles.index(i)]
f.write("\ntmpY=GPVAL_Y_MIN # plot first line and get Ymin for first frame\ntmpS=GPVAL_DATA_X_MIN # start for -c lines at axe x\nset term gif animate delay "+str(delay)+"\nset output \""+str(self.m_Name)+"/animation.gif\"\nwhile (i < frames) {\n")
if self.m_CriticalValue: # -c as arrows before plot command
l=1
for xc in self.m_CriticalValue[0]: # x critical values
f.write("set arrow "+str(l)+" from \""+str(xc)+"\",GPVAL_Y_MIN to \""+str(xc)+"\",GPVAL_Y_MAX nohead lw 1 lc rgb 'gold'\n")
l+=1
for yc in self.m_CriticalValue[1]: # y critical values
f.write("set arrow "+str(l)+" from tmpS,"+str(yc)+" to GPVAL_X_MAX,"+str(yc)+" nohead lw 1 lc rgb 'skyblue'\n")
l+=1
f.write("plot") # just one plot command
s = 0 # sum of lines of all previous files
k = 1 # linestyle number for -g params like "style line 1 lt 2 lc rgb \"red\" lw 3"
for i in g_FinalInputFiles: # two graphs per file, second is animation, using GPVAL_Y_MIN variable to get current ymin for correct animation steps
f.write(" '"+str(i)+"' using 1:($0+"+str(s)+" < j ? $"+str(nrY)+" : 1/0) notitle linestyle "+str(k)+", '"+str(i)+"' using 1:($0+"+str(s)+" >= j && $0+"+str(s)+" < j+"+str(self.m_EffectParams)+" ? (($"+str(nrY)+"-tmpY)*(1-1.0/"+str(self.m_EffectParams)+"*($0+"+str(s)+"+1-j))+tmpY) : 1/0) notitle linestyle "+str(k)+",")
s+=linesN[g_FinalInputFiles.index(i)]
k+=1
f.write("\nif (GPVAL_Y_MIN < tmpY) { # for valid animation lowest value\ntmpY=GPVAL_Y_MIN\n}\ni=i+1\nj=j+"+str(self.m_Speed)+"\n}\n") # for next iteration
if self.m_CriticalValue: # -c before final frame
l=1
for xc in self.m_CriticalValue[0]: # x critical values
f.write("set arrow "+str(l)+" from \""+str(xc)+"\",GPVAL_Y_MIN to \""+str(xc)+"\",GPVAL_Y_MAX nohead lw 1 lc rgb 'gold'\n")
l+=1
for yc in self.m_CriticalValue[1]: # y critical values
f.write("set arrow "+str(l)+" from tmpS,"+str(yc)+" to GPVAL_X_MAX,"+str(yc)+" nohead lw 1 lc rgb 'skyblue'\n")
l+=1
f.write("plot")
k = 1 # linestyle number for -g params like "style line 1 lt 2 lc rgb \"red\" lw 3"
for i in g_FinalInputFiles: # final frame
f.write(" '"+str(i)+"' using 1:"+str(nrY)+" notitle linestyle "+str(k)+", sqrt(-1) notitle,") # pair with empty graph to get correct color
k+=1
f.write("\nset output\n") # closes gnuplot file
#while input("------------- Quit ----------------\nGnuplot and input files accessible, press \'q\' to quit: ") != 'q': # debug output - all input files
# pass
try: # execute gnuplot and create animation
p = subprocess.Popen(["gnuplot", gfile.name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError: # can't find Gnuplot
print("Gnuplot error: cannot execute Gnuplot", file=sys.stderr)
cleanup()
sys.exit(4) # err code 4 = Gnuplot error
output, error = p.communicate() # at the end calls wait(), DO NOT DELETE
#print("----------- Gnuplot ---------------\nreturn code:", p.returncode, "\nstdout:", output, "\nstderr:", error) # debug output of Gnuplot
if p.returncode != 0: # gnuplot wasn't successful
print("Gnuplot error:", error.decode('utf-8'), end='', file=sys.stderr)
cleanup()
sys.exit(4)
def main():
''' Main function of the script. '''
signal.signal(signal.SIGINT, signalHandler) # ctrl+c
signal.signal(signal.SIGTERM, signalHandler) # kill PID
g = getArgsAndRetInstance() # get valid instance
#g.printInstance() # debug output after input
os.makedirs(g.m_Name) # create output dir, from now use cleanup() in case of error
global g_dirName # use global variable as local
g_dirName = g.m_Name
CGraph.prepareInputFiles = prepareInputFiles
g.prepareInputFiles() # check and prepare input files
CGraph.createAnim = createAnim
g.createAnim() # finally create animation
for f in g_FinalInputFiles: # delete tmp input files!
os.remove(f)
if __name__ == "__main__": # call main
main()
| mit |
clumsy/intellij-community | python/helpers/profiler/thriftpy3/server/TNonblockingServer.py | 44 | 11883 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is to receive and send requests
only from the main thread.
The thread poool should be sized for concurrent tasks, not
maximum connections
"""
import threading
import socket
import select
import struct
import logging
logger = logging.getLogger(__name__)
from six.moves import queue
from thriftpy3.transport import TTransport
from thriftpy3.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logger.exception("Exception while processing request")
callback(False, b'')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"""Decorator which locks self.lock."""
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"""Decorator close object on socket.error."""
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self.close()
return read
class Connection:
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.message = b''
self.lock = threading.Lock()
self.wake_up = wake_up
def _read_len(self):
"""Reads length of request.
It's a safer alternative to self.socket.recv(4)
"""
read = self.socket.recv(4 - len(self.message))
if len(read) == 0:
# if we read 0 bytes and self.message is empty, then
# the client closed the connection
if len(self.message) != 0:
logger.error("can't read frame size from socket")
self.close()
return
self.message += read
if len(self.message) == 4:
self.len, = struct.unpack('!i', self.message)
if self.len < 0:
logger.error("negative frame size, it seems client "
"doesn't use FramedTransport")
self.close()
elif self.len == 0:
logger.error("empty frame, it's really strange")
self.close()
else:
self.message = b''
self.status = WAIT_MESSAGE
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logger.error("can't read frame from socket (get %d of "
"%d bytes)" % (len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self.message)
if sent == len(self.message):
self.status = WAIT_LEN
self.message = b''
self.len = 0
else:
self.message = self.message[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = 0
if len(message) == 0:
# it was a oneway request, do not write answer
self.message = b''
self.status = WAIT_LEN
else:
self.message = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"""Return True if connection should be added to write list of select"""
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"""Return True if connection should be added to read list of select"""
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"""Returns True if connection is closed."""
return self.status == CLOSED
def fileno(self):
"""Returns the file descriptor of the associated socket."""
return self.socket.fileno()
def close(self):
"""Closes connection"""
self.status = CLOSED
self.socket.close()
class TNonblockingServer:
"""Non-blocking server."""
def __init__(self,
processor,
lsocket,
inputProtocolFactory=None,
outputProtocolFactory=None,
threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
self._stop = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "Can't change number of threads after start"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
if self.prepared:
return
self.socket.listen()
for _ in range(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usually waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair.
"""
self._write.send(b'1')
def stop(self):
"""Stop the server.
This method causes the serve() method to return. stop() may be invoked
from within your handler, or from another thread.
After stop() is called, serve() will return but the server will still
be listening on the socket. serve() may then be called again to resume
processing requests. Alternatively, close() may be called after
serve() returns to close the server socket and shutdown all worker
threads.
"""
self._stop = True
self.wake_up()
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
for i, connection in list(self.clients.items()):
if connection.is_readable():
readable.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
return select.select(readable, writable, readable)
def handle(self):
"""Handle requests.
WARNING! You must call prepare() BEFORE calling handle()
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
client = self.socket.accept().handle
self.clients[client.fileno()] = Connection(client,
self.wake_up)
else:
connection = self.clients[readable]
connection.read()
if connection.status == WAIT_PROCESS:
itransport = TTransport.TMemoryBuffer(connection.message)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in range(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve requests.
Serve requests forever, or until stop() is called.
"""
self._stop = False
self.prepare()
while not self._stop:
self.handle()
| apache-2.0 |
GaretJax/i18n-utils | i18n_utils/styles.py | 1 | 3277 | from openpyxl.styles import Font, Style, Alignment, Border, Side, PatternFill
from openpyxl.styles import borders, fills
from openpyxl.styles import Protection
from .utils import memoize
cell_fills = [
PatternFill(
fill_type=fills.FILL_SOLID,
start_color='E6EFD7',
),
PatternFill(
fill_type=fills.FILL_SOLID,
start_color='FFFFFF',
),
]
font = Font(name='Tahoma')
alignment = Alignment(
vertical='center',
shrink_to_fit=True,
wrap_text=True,
indent=1,
)
border = Side(border_style=borders.BORDER_THIN, color='B8D08A')
header_border = line_height = 13
title_style = Style(
font=Font(name='Tahoma', size=20, bold=True)
)
@memoize
def column_header():
return Style(
font=Font(
name='Thaoma',
bold=True,
color='FFFFFF',
size=14,
),
border=Border(
left=Side(border_style=borders.BORDER_THIN, color='8BB048'),
right=Side(border_style=borders.BORDER_THIN, color='8BB048'),
top=Side(border_style=borders.BORDER_THIN, color='8BB048'),
bottom=Side(border_style=borders.BORDER_THIN, color='8BB048'),
),
alignment=Alignment(
vertical='center',
horizontal='center',
),
fill=PatternFill(
fill_type=fills.FILL_SOLID,
start_color='8BB048',
),
)
@memoize
def row_header(is_odd, is_last):
return Style(
font=font,
alignment=Alignment(
vertical='center',
horizontal='center' if is_last else 'left',
shrink_to_fit=True,
wrap_text=True,
indent=int(not is_last),
),
border=Border(
left=border,
right=(Side(border_style=borders.BORDER_DOUBLE, color='8BB048')
if is_last else border),
top=border,
bottom=border,
),
fill=cell_fills[int(bool(is_odd))],
protection=Protection(),
)
@memoize
def translation_cell(is_odd):
return Style(
font=font,
alignment=Alignment(
vertical='center',
shrink_to_fit=True,
wrap_text=True,
),
border=Border(
right=border,
top=border,
bottom=border,
),
fill=cell_fills[int(bool(is_odd))],
protection=Protection(locked=False),
)
@memoize
def translation_cb_cell(is_odd):
return Style(
font=font,
alignment=Alignment(
vertical='center',
horizontal='center',
),
border=Border(
left=border,
top=border,
bottom=border,
),
fill=cell_fills[int(bool(is_odd))],
protection=Protection(locked=False),
)
comment_cell = translation_cell
@memoize
def occurrences_cell(is_odd):
return Style(
font=Font(name='Tahoma', size=8),
alignment=Alignment(
vertical='top',
wrap_text=True,
),
border=Border(
right=border,
top=border,
bottom=border,
),
fill=cell_fills[int(bool(is_odd))],
protection=Protection(locked=False),
)
| mit |
google/retiming | models/lnr_model.py | 1 | 10862 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from third_party.models.base_model import BaseModel
from . import networks
import numpy as np
import torch.nn.functional as F
class LnrModel(BaseModel):
"""This class implements the layered neural rendering model for decomposing a video into layers."""
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(dataset_mode='layered_video')
parser.add_argument('--texture_res', type=int, default=16, help='texture resolution')
parser.add_argument('--texture_channels', type=int, default=16, help='# channels for neural texture')
parser.add_argument('--n_textures', type=int, default=25, help='# individual texture maps, 24 per person (1 per body part) + 1 for background')
if is_train:
parser.add_argument('--lambda_alpha_l1', type=float, default=0.01, help='alpha L1 sparsity loss weight')
parser.add_argument('--lambda_alpha_l0', type=float, default=0.005, help='alpha L0 sparsity loss weight')
parser.add_argument('--alpha_l1_rolloff_epoch', type=int, default=200, help='turn off L1 alpha sparsity loss weight after this epoch')
parser.add_argument('--lambda_mask', type=float, default=50, help='layer matting loss weight')
parser.add_argument('--mask_thresh', type=float, default=0.02, help='turn off masking loss when error falls below this value')
parser.add_argument('--mask_loss_rolloff_epoch', type=int, default=-1, help='decrease masking loss after this epoch; if <0, use mask_thresh instead')
parser.add_argument('--n_epochs_upsample', type=int, default=500,
help='number of epochs to train the upsampling module')
parser.add_argument('--batch_size_upsample', type=int, default=16, help='batch size for upsampling')
parser.add_argument('--jitter_rgb', type=float, default=0.2, help='amount of jitter to add to RGB')
parser.add_argument('--jitter_epochs', type=int, default=400, help='number of epochs to jitter RGB')
parser.add_argument('--do_upsampling', action='store_true', help='whether to use upsampling module')
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
"""
BaseModel.__init__(self, opt)
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['target_image', 'reconstruction', 'rgba_vis', 'alpha_vis', 'input_vis']
self.model_names = ['LNR']
self.netLNR = networks.define_LNR(opt.num_filters, opt.texture_channels, opt.texture_res, opt.n_textures, gpu_ids=self.gpu_ids)
self.do_upsampling = opt.do_upsampling
if self.isTrain:
self.setup_train(opt)
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def setup_train(self, opt):
"""Setup the model for training mode."""
print('setting up model')
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['total', 'recon', 'alpha_reg', 'mask']
self.visual_names = ['target_image', 'reconstruction', 'rgba_vis', 'alpha_vis', 'input_vis']
self.do_upsampling = opt.do_upsampling
if not self.do_upsampling:
self.visual_names += ['mask_vis']
self.criterionLoss = torch.nn.L1Loss()
self.criterionLossMask = networks.MaskLoss().to(self.device)
self.lambda_mask = opt.lambda_mask
self.lambda_alpha_l0 = opt.lambda_alpha_l0
self.lambda_alpha_l1 = opt.lambda_alpha_l1
self.mask_loss_rolloff_epoch = opt.mask_loss_rolloff_epoch
self.jitter_rgb = opt.jitter_rgb
self.do_upsampling = opt.do_upsampling
self.optimizer = torch.optim.Adam(self.netLNR.parameters(), lr=opt.lr)
self.optimizers = [self.optimizer]
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.target_image = input['image'].to(self.device)
if self.isTrain and self.jitter_rgb > 0:
# add brightness jitter to rgb
self.target_image += self.jitter_rgb * torch.randn(self.target_image.shape[0], 1, 1, 1).to(self.device)
self.target_image = torch.clamp(self.target_image, -1, 1)
self.input_uv = input['uv_map'].to(self.device)
self.input_id = input['pids'].to(self.device)
self.mask = input['mask'].to(self.device)
self.image_paths = input['image_path']
def gen_crop_params(self, orig_h, orig_w, crop_size=256):
"""Generate random square cropping parameters."""
starty = np.random.randint(orig_h - crop_size + 1)
startx = np.random.randint(orig_w - crop_size + 1)
endy = starty + crop_size
endx = startx + crop_size
return starty, endy, startx, endx
def forward(self):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
if self.do_upsampling:
input_uv_up = F.interpolate(self.input_uv, scale_factor=2, mode='bilinear')
crop_params = None
if self.isTrain:
# Take random crop to decrease memory requirement.
crop_params = self.gen_crop_params(*input_uv_up.shape[-2:])
starty, endy, startx, endx = crop_params
self.target_image = self.target_image[:, :, starty:endy, startx:endx]
outputs = self.netLNR.forward(self.input_uv, self.input_id, uv_map_upsampled=input_uv_up, crop_params=crop_params)
else:
outputs = self.netLNR(self.input_uv, self.input_id)
self.reconstruction = outputs['reconstruction'][:, :3]
self.alpha_composite = outputs['reconstruction'][:, 3]
self.output_rgba = outputs['layers']
n_layers = outputs['layers'].shape[1]
layers = outputs['layers'].clone()
layers[:, 0, -1] = 1 # Background layer's alpha is always 1
layers = torch.cat([layers[:, l] for l in range(n_layers)], -2)
self.alpha_vis = layers[:, 3:4]
self.rgba_vis = layers
self.mask_vis = torch.cat([self.mask[:, l:l+1] for l in range(n_layers)], -2)
self.input_vis = torch.cat([self.input_uv[:, 2*l:2*l+2] for l in range(n_layers)], -2)
self.input_vis = torch.cat([torch.zeros_like(self.input_vis[:, :1]), self.input_vis], 1)
def backward(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
self.loss_recon = self.criterionLoss(self.reconstruction[:, :3], self.target_image)
self.loss_total = self.loss_recon
if not self.do_upsampling:
self.loss_alpha_reg = networks.cal_alpha_reg(self.alpha_composite * .5 + .5, self.lambda_alpha_l1, self.lambda_alpha_l0)
alpha_layers = self.output_rgba[:, :, 3]
self.loss_mask = self.lambda_mask * self.criterionLossMask(alpha_layers, self.mask)
self.loss_total += self.loss_alpha_reg + self.loss_mask
else:
self.loss_mask = 0.
self.loss_alph_reg = 0.
self.loss_total.backward()
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.forward()
self.optimizer.zero_grad()
self.backward()
self.optimizer.step()
def update_lambdas(self, epoch):
"""Update loss weights based on current epochs and losses."""
if epoch == self.opt.alpha_l1_rolloff_epoch:
self.lambda_alpha_l1 = 0
if self.mask_loss_rolloff_epoch >= 0:
if epoch == 2*self.mask_loss_rolloff_epoch:
self.lambda_mask = 0
elif epoch > self.opt.epoch_count:
if self.loss_mask < self.opt.mask_thresh * self.opt.lambda_mask:
self.mask_loss_rolloff_epoch = epoch
self.lambda_mask *= .1
if epoch == self.opt.jitter_epochs:
self.jitter_rgb = 0
def transfer_detail(self):
"""Transfer detail to layers."""
residual = self.target_image - self.reconstruction
transmission_comp = torch.zeros_like(self.target_image[:, 0:1])
rgba_detail = self.output_rgba
n_layers = self.output_rgba.shape[1]
for i in range(n_layers - 1, 0, -1): # Don't do detail transfer for background layer, due to ghosting effects.
transmission_i = 1. - transmission_comp
rgba_detail[:, i, :3] += transmission_i * residual
alpha_i = self.output_rgba[:, i, 3:4] * .5 + .5
transmission_comp = alpha_i + (1. - alpha_i) * transmission_comp
self.rgba = torch.clamp(rgba_detail, -1, 1)
def get_results(self):
"""Return results. This is different from get_current_visuals, which gets visuals for monitoring training.
Returns a dictionary:
original - - original frame
recon - - reconstruction
rgba_l* - - RGBA for each layer
mask_l* - - mask for each layer
"""
self.transfer_detail()
# Split layers
results = {'reconstruction': self.reconstruction, 'original': self.target_image}
n_layers = self.rgba.shape[1]
for i in range(n_layers):
results[f'mask_l{i}'] = self.mask[:, i:i+1]
results[f'rgba_l{i}'] = self.rgba[:, i]
if i == 0:
results[f'rgba_l{i}'][:, -1:] = 1.
return results
def freeze_basenet(self):
"""Freeze all parameters except for the upsampling module."""
net = self.netLNR
if isinstance(net, torch.nn.DataParallel):
net = net.module
self.set_requires_grad([net.encoder, net.decoder, net.final_rgba], False)
net.texture.requires_grad = False | apache-2.0 |
FreescaleSemiconductor/quantum | quantum/tests/unit/test_auth.py | 8 | 3998 | import unittest
import webob
from quantum import auth
class QuantumKeystoneContextTestCase(unittest.TestCase):
def setUp(self):
super(QuantumKeystoneContextTestCase, self).setUp()
@webob.dec.wsgify
def fake_app(req):
self.context = req.environ['quantum.context']
return webob.Response()
self.context = None
self.middleware = auth.QuantumKeystoneContext(fake_app)
self.request = webob.Request.blank('/')
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
def test_no_user_no_user_id(self):
self.request.headers['X_TENANT_ID'] = 'testtenantid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '401 Unauthorized')
def test_with_user(self):
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
def test_with_user_id(self):
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_USER'] = 'testuser'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuser')
def test_user_id_trumps_user(self):
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_USER'] = 'testuser'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
def test_with_tenant_id(self):
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'test_user_id'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.tenant_id, 'testtenantid')
def test_with_tenant(self):
self.request.headers['X_TENANT'] = 'testtenant'
self.request.headers['X_USER_ID'] = 'test_user_id'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.tenant_id, 'testtenant')
def test_tenant_id_trumps_tenant(self):
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_TENANT'] = 'testtenant'
self.request.headers['X_USER_ID'] = 'testuserid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.tenant_id, 'testtenantid')
def test_roles_no_admin(self):
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_ROLE'] = 'role1, role2 , role3,role4,role5'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.roles, ['role1', 'role2', 'role3',
'role4', 'role5'])
self.assertEqual(self.context.is_admin, False)
def test_roles_with_admin(self):
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_ROLE'] = ('role1, role2 , role3,role4,role5,'
'AdMiN')
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.roles, ['role1', 'role2', 'role3',
'role4', 'role5', 'AdMiN'])
self.assertEqual(self.context.is_admin, True)
| apache-2.0 |
soonhokong/dReal-osx | benchmarks/network/battery/battery-double-i.py | 10 | 5340 |
from gen import *
##########
# shared #
##########
flow_var[0] = """
(declare-fun tau () Real)
(declare-fun d1 () Real)
(declare-fun g1 () Real)
(declare-fun d2 () Real)
(declare-fun g2 () Real)
"""
flow_dec[0] = """
(define-ode flow_1 ((= d/dt[d1] (- (/ 0.5 0.166) (* 0.122 d1))) (= d/dt[g1] -0.5) (= d/dt[d2] (- (/ 0.5 0.166) (* 0.122 d2))) (= d/dt[g2] -0.5) (= d/dt[tau] 1)))
(define-ode flow_2 ((= d/dt[d1] (- (/ 1 0.166) (* 0.122 d1))) (= d/dt[g1] -1) (= d/dt[d2] (- 0 (* 0.122 d2))) (= d/dt[g2] 0) (= d/dt[tau] 1)))
(define-ode flow_3 ((= d/dt[d1] (- 0 (* 0.122 d1))) (= d/dt[g1] 0) (= d/dt[d2] (- (/ 1 0.166) (* 0.122 d2))) (= d/dt[g2] -1) (= d/dt[tau] 1)))
(define-ode flow_4 ((= d/dt[d1] 0) (= d/dt[g1] 0) (= d/dt[d2] (- (/ 1 0.166) (* 0.122 d2))) (= d/dt[g2] -1) (= d/dt[tau] 1)))
(define-ode flow_5 ((= d/dt[d1] (- (/ 1 0.166) (* 0.122 d1))) (= d/dt[g1] -1) (= d/dt[d2] 0) (= d/dt[g2] 0) (= d/dt[tau] 1)))
(define-ode flow_6 ((= d/dt[d1] 0) (= d/dt[g1] 0) (= d/dt[d2] 0) (= d/dt[g2] 0) (= d/dt[tau] 1)))
"""
state_dec[0] = """
(declare-fun time_{0} () Real)
(declare-fun tau_{0}_0 () Real)
(declare-fun tau_{0}_t () Real)
(declare-fun mode_1_{0} () Int)
(declare-fun d1_{0}_0 () Real)
(declare-fun d1_{0}_t () Real)
(declare-fun g1_{0}_0 () Real)
(declare-fun g1_{0}_t () Real)
(declare-fun mode_2_{0} () Int)
(declare-fun d2_{0}_0 () Real)
(declare-fun d2_{0}_t () Real)
(declare-fun g2_{0}_0 () Real)
(declare-fun g2_{0}_t () Real)
"""
state_val[0] = """
(assert (<= 0 time_{0})) (assert (<= time_{0} 20))
(assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 50))
(assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 50))
(assert (<= -10 d1_{0}_0)) (assert (<= d1_{0}_0 10))
(assert (<= -10 d1_{0}_t)) (assert (<= d1_{0}_t 10))
(assert (<= -10 g1_{0}_0)) (assert (<= g1_{0}_0 10))
(assert (<= -10 g1_{0}_t)) (assert (<= g1_{0}_t 10))
(assert (<= -10 d2_{0}_0)) (assert (<= d2_{0}_0 10))
(assert (<= -10 d2_{0}_t)) (assert (<= d2_{0}_t 10))
(assert (<= -10 g2_{0}_0)) (assert (<= g2_{0}_0 10))
(assert (<= -10 g2_{0}_t)) (assert (<= g2_{0}_t 10))
(assert (and (not (and (= mode_1_{0} 1) (= mode_1_{0} 2))) (not (and (= mode_1_{0} 1) (= mode_1_{0} 3))) (not (and (= mode_1_{0} 1) (= mode_1_{0} 4)))
(not (and (= mode_1_{0} 2) (= mode_1_{0} 3))) (not (and (= mode_1_{0} 2) (= mode_1_{0} 4)))
(not (and (= mode_1_{0} 3) (= mode_1_{0} 4)))))
(assert (and (not (and (= mode_2_{0} 1) (= mode_2_{0} 2))) (not (and (= mode_2_{0} 1) (= mode_2_{0} 3))) (not (and (= mode_2_{0} 1) (= mode_2_{0} 4)))
(not (and (= mode_2_{0} 2) (= mode_2_{0} 3))) (not (and (= mode_2_{0} 2) (= mode_2_{0} 4)))
(not (and (= mode_2_{0} 3) (= mode_2_{0} 4)))))
(assert (or
(and (= mode_1_{0} 4) (= mode_2_{0} 4))
(and (= mode_1_{0} 3) (= mode_2_{0} 2))
(and (= mode_1_{0} 3) (= mode_2_{0} 1))
(and (= mode_1_{0} 2) (= mode_2_{0} 3))
(and (= mode_1_{0} 1) (= mode_2_{0} 3))
(and (= mode_1_{0} 1) (= mode_2_{0} 1))))
"""
cont_cond[0] = ["""
(assert (or
(and (= mode_1_{0} 4) (= mode_2_{0} 4)
(= [d1_{0}_t g1_{0}_t g2_{0}_t d2_{0}_t tau_{0}_t]
(integral 0. time_{0} [d1_{0}_0 g1_{0}_0 g2_{0}_0 d2_{0}_0 tau_{0}_0] flow_1)))
(and (= mode_1_{0} 3) (= mode_2_{0} 2)
(= [d1_{0}_t g1_{0}_t g2_{0}_t d2_{0}_t tau_{0}_t]
(integral 0. time_{0} [d1_{0}_0 g1_{0}_0 g2_{0}_0 d2_{0}_0 tau_{0}_0] flow_2)))
(and (= mode_1_{0} 2) (= mode_2_{0} 3)
(= [d1_{0}_t g1_{0}_t g2_{0}_t d2_{0}_t tau_{0}_t]
(integral 0. time_{0} [d1_{0}_0 g1_{0}_0 g2_{0}_0 d2_{0}_0 tau_{0}_0] flow_3)))
(and (= mode_1_{0} 1) (= mode_2_{0} 3)
(= [d1_{0}_t g1_{0}_t g2_{0}_t d2_{0}_t tau_{0}_t]
(integral 0. time_{0} [d1_{0}_0 g1_{0}_0 g2_{0}_0 d2_{0}_0 tau_{0}_0] flow_4)))
(and (= mode_1_{0} 3) (= mode_2_{0} 1)
(= [d1_{0}_t g1_{0}_t g2_{0}_t d2_{0}_t tau_{0}_t]
(integral 0. time_{0} [d1_{0}_0 g1_{0}_0 g2_{0}_0 d2_{0}_0 tau_{0}_0] flow_5)))
(and (= mode_1_{0} 1) (= mode_2_{0} 1)
(= [d1_{0}_t g1_{0}_t g2_{0}_t d2_{0}_t tau_{0}_t]
(integral 0. time_{0} [d1_{0}_0 g1_{0}_0 g2_{0}_0 d2_{0}_0 tau_{0}_0] flow_6)))))
"""]
jump_cond[0] = ["""
(assert (= tau_{1}_0 tau_{0}_t))
(assert (and (= d1_{1}_0 d1_{0}_t) (= g1_{1}_0 g1_{0}_t)))
(assert (or (and (<= g1_0_t (* (- 1 0.166) d1_0_t))
(= mode_1_{1} 1))
(and (> g1_0_t (* (- 1 0.166) d1_0_t))
(not (and (= mode_1_{0} 1)))
(not (and (= mode_1_{1} 1))))))
(assert (and (= d2_{1}_0 d2_{0}_t) (= g2_{1}_0 g2_{0}_t)))
(assert (or (and (<= g2_0_t (* (- 1 0.166) d2_0_t))
(= mode_2_{1} 1))
(and (> g2_0_t (* (- 1 0.166) d2_0_t))
(not (and (= mode_2_{0} 1)))
(not (and (= mode_2_{1} 1))))))"""]
#############
# Init/Goal #
#############
init_cond = """
(assert (= tau_{0}_0 0))
(assert (and (= mode_1_{0} 4)))
(assert (and (= g1_{0}_0 8.5) (= d1_{0}_0 0)))
(assert (and (= mode_2_{0} 4)))
(assert (and (= g2_{0}_0 7.5) (= d2_{0}_0 0)))
"""
goal_cond = """
(assert (and (>= tau_{0}_t 10)
(not (and (= mode_1_{0} 1)
(= mode_2_{0} 1)))))
"""
import sys
try:
bound = int(sys.argv[1])
except:
print("Usage:", sys.argv[0], "<Bound>")
else:
generate(bound, 1, [0], 3, init_cond, goal_cond)
| gpl-2.0 |
aifil/odoo | addons/account/tests/account_test_users.py | 46 | 1657 | from openerp.addons.account.tests.account_test_classes import AccountingTestCase
class AccountTestUsers(AccountingTestCase):
"""Tests for diffrent type of user 'Accountant/Adviser' and added groups"""
def setUp(self):
super(AccountTestUsers, self).setUp()
self.res_user_model = self.env['res.users']
self.main_company = self.env.ref('base.main_company')
self.main_partner = self.env.ref('base.main_partner')
self.main_bank = self.env.ref('base.res_bank_1')
res_users_account_user = self.env.ref('account.group_account_user')
res_users_account_manager = self.env.ref('account.group_account_manager')
partner_manager = self.env.ref('base.group_partner_manager')
self.tax_model = self.env['account.tax']
self.account_model = self.env['account.account']
self.account_type_model = self.env['account.account.type']
self.currency_euro = self.env.ref('base.EUR')
self.account_user = self.res_user_model.with_context({'no_reset_password': True}).create(dict(
name="Accountant",
company_id=self.main_company.id,
login="acc",
email="accountuser@yourcompany.com",
groups_id=[(6, 0, [res_users_account_user.id, partner_manager.id])]
))
self.account_manager = self.res_user_model.with_context({'no_reset_password': True}).create(dict(
name="Adviser",
company_id=self.main_company.id,
login="fm",
email="accountmanager@yourcompany.com",
groups_id=[(6, 0, [res_users_account_manager.id, partner_manager.id])]
))
| gpl-3.0 |
birryree/servo | tests/wpt/css-tests/tools/html5lib/html5lib/trie/datrie.py | 785 | 1166 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| mpl-2.0 |
crodriguezgarci/ptavi-p3 | smallsmilhandler.py | 1 | 1257 | #!/usr/bin/python
#-*- coding: UTF-8 -*-
#Carlos Rodriguez Garcia PTAVI Practica 3 Karaoke.
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import sys
class SmallSMILHandler(ContentHandler):
def __init__(self):
self.elementos = []
self.tags = ['root-layout', 'region', 'img', 'audio', 'textstream']
self.atributos = {
'root-layout': ['width', 'height', 'background-color'],
'region': ['id', 'top', 'bottom', 'left', 'right'],
'img': ['src', 'region', 'begin', 'dur'],
'audio': ['src', 'begin', 'dur'],
'textstream': ['src', 'region']
}
def startElement(self, name, attrs):
diccionario = {}
if name in self.tags:
diccionario['etiqueta'] = name
for atributo in self.atributos[name]:
diccionario[atributo] = attrs.get(atributo, "")
self.elementos.append(diccionario)
def get_tags(self):
return self.elementos
if __name__ == "__main__":
parser = make_parser()
SmallSMILHandler = SmallSMILHandler()
parser.setContentHandler(SmallSMILHandler)
parser.parse(open('karaoke.smil'))
tags = SmallSMILHandler.get_tags()
print tags
| gpl-2.0 |
bastiendonjon/sd-agent-plugins | MySQL/MySQL.py | 4 | 20555 | """
Server Density plugin
MySQL
https://www.serverdensity.com/plugins/mysql/
https://github.com/serverdensity/sd-agent-plugins/
version: 0.1
"""
import traceback
import re
try:
import MySQLdb
except ImportError:
pass
# com commands.
COMMANDS = [
'Com_select',
'Com_delete',
'Com_update',
'Com_commit',
'Questions',
'Com_rollback',
'Handler_commit',
'Handler_delete',
'Handler_update',
'Handler_write',
'Handler_rollback',
'Handler_read_first',
'Handler_read_rnd',
]
class MySQL(object):
def __init__(self, agent_config, checks_logger, raw_config):
self.agent_config = agent_config
self.checks_logger = checks_logger
self.raw_config = raw_config
self.connection = None
self.datastore = {}
def version_is_above_5(self, status):
if (int(status['version'][0]) >= 5 and
int(status['version'][2]) >= 2):
return True
else:
return False
def get_db_results(self, db, query):
cursor = db.cursor()
try:
cursor.execute(query)
results = float(cursor.fetchone()[1])
except ValueError:
cursor.execute(query)
results = cursor.fetchone()[1]
return results
def run_query(self, db, query):
"""Run a query and returns a dictionary with results"""
try:
cursor = db.cursor()
cursor.execute(query)
metric = {}
for entry in cursor:
try:
metric[entry[0]] = float(entry[1])
except ValueError as e:
metric[entry[0]] = entry[1]
return metric
except MySQLdb.OperationalError as message:
self.checks_logger.debug(
'mysql: MySQL query error when getting metrics = '.format(
message)
)
def calculate_per_s(self, command, result):
if (not self.datastore.get(command) and
self.datastore.get(command) != 0):
self.checks_logger.debug(
'mysql: Datastore unset for '
'{0}, storing for first time'.format(command))
self.datastore[command] = result
com_per_s = 0
else:
com_per_s = (result - self.datastore[command]) / 60
if com_per_s < 0:
com_per_s = 0
self.datastore[command] = result
return com_per_s
def preliminaries(self):
if ('MySQLServer' not in self.raw_config and
'mysql_server' not in self.raw_config['MySQLServer'] or
self.raw_config['MySQLServer']['mysql_server'] == '' or
self.raw_config['MySQLServer']['mysql_user'] == '' or
self.raw_config['MySQLServer']['mysql_pass'] == ''):
self.checks_logger.debug('mysql: config not set')
return False
if not self.raw_config['MySQLServer'].get('mysql_port'):
self.raw_config['MySQLServer']['mysql_port'] = "3306"
self.checks_logger.debug('mysql: config set')
try:
import MySQLdb
except ImportError:
self.checks_logger.error('mysql: unable to import MySQLdb')
return False
# Note, code here doesn't really make sense. See what I copied.
if self.raw_config['MySQLServer'].get('mysql_server'):
# Connect
try:
MySQLdb.connect(
host=self.raw_config['MySQLServer']['mysql_server'],
user=self.raw_config['MySQLServer']['mysql_user'],
passwd=self.raw_config['MySQLServer']['mysql_pass'],
port=int(self.raw_config['MySQLServer']['mysql_port'])
)
except MySQLdb.OperationalError as message:
self.checks_logger.error(
"mysql: MySQL connection error: {0}".format(message))
return False
elif (self.raw_config['MySQLServer'].get('mysql_ssl_cert') and
self.raw_config['MySQLServer'].get('mysql_ssl_key')):
ssl = {
'cert': self.raw_config['MySQLServer']['mysql_ssl_cert'],
'key': self.raw_config['MySQLServer']['mysql_ssl_key']
}
MySQLdb.connect(
host=self.raw_config['MySQLServer']['mysql_server'],
user=self.raw_config['MySQLServer']['mysql_user'],
passwd=self.raw_config['MySQLServer']['mysql_pass'],
port=int(self.raw_config['MySQLServer']['mysql_port']),
ssl=ssl
)
else:
# Connect
try:
MySQLdb.connect(
host='localhost',
user=self.raw_config['MySQLServer']['mysql_user'],
passwd=self.raw_config['MySQLServer']['mysql_pass'],
port=int(self.raw_config['MySQLServer']['mysql_port']))
except MySQLdb.OperationalError as message:
self.checks_logger.error(
'mysql: MySQL connection error: {0}'.format(message)
)
return False
return True
def get_connection(self):
try:
# connection
if (self.raw_config['MySQLServer'].get('mysql_ssl_cert') and
self.raw_config['MySQLServer'].get('mysql_ssl_key')):
self.checks_logger.debug('mysql: Trying to connect via SSL')
ssl = {
'cert': self.raw_config['MySQLServer']['mysql_ssl_cert'],
'key': self.raw_config['MySQLServer']['mysql_ssl_key']
}
db = MySQLdb.connect(
host=self.raw_config['MySQLServer']['mysql_server'],
user=self.raw_config['MySQLServer']['mysql_user'],
passwd=self.raw_config['MySQLServer']['mysql_pass'],
port=int(self.raw_config['MySQLServer']['mysql_port']),
ssl=ssl
)
self.connection = db
self.checks_logger.error('mysql: Connected to DB via SSL')
else:
self.checks_logger.debug(
'mysql: Trying to connect via password')
db = MySQLdb.connect(
host=self.raw_config['MySQLServer']['mysql_server'],
user=self.raw_config['MySQLServer']['mysql_user'],
passwd=self.raw_config['MySQLServer']['mysql_pass'],
port=int(self.raw_config['MySQLServer']['mysql_port'])
)
self.connection = db
self.checks_logger.debug(
'mysql: Connected to DB with password')
# note, how do I take into account the socket?
except Exception:
self.checks_logger.error(
'Unable to connect to MySQL server {0}'
' - Exception: {1}'.format(
self.raw_config['MySQLServer']['mysql_server'],
traceback.format_exc())
)
return False
return True
def run(self):
self.checks_logger.debug('mysql: started gathering data')
if not self.preliminaries():
return False
if not self.get_connection():
return False
try:
db = self.connection
# setup
status = {}
# Get MySQL version
try:
self.checks_logger.debug('mysql: getting mysqlversion')
cursor = db.cursor()
cursor.execute('SELECT VERSION()')
result = cursor.fetchone()
version = result[0].split('-')
# Case 31237. Might include a description e.g. 4.1.26-log.
# See http://dev.mysql.com/doc/refman/4.1/en/
# information-functions.html#function_version
version = version[0].split('.')
status['version'] = []
for version_item in version:
number = re.match('([0-9]+)', version_item)
number = number.group(0)
status['version'].append(number)
except MySQLdb.OperationalError as message:
self.checks_logger.error(
(
'mysql: MySQL query error when getting version: '
'{0}'
).format(
message)
)
return False
# get show status metrics
status_metrics = self.run_query(db, 'SHOW GLOBAL STATUS')
status_variables = self.run_query(db, 'SHOW VARIABLES')
# get Uptime
status['Uptime'] = status_metrics['Uptime']
self.checks_logger.debug('mysql: getting Uptime - done')
# Slow queries
# Determine query depending on version. For 5.02 and above we
# need the GLOBAL keyword (case 31015)
# note, update with slow queries store. making it per second?
# ask jordi about that.
status['Slow queries'] = status_metrics['Slow_queries']
self.checks_logger.debug('mysql: getting Slow_queries - done')
# Note, check for which version of mysql?
# try:
# if self.version_is_above_5(status):
# query = 'SHOW GLOBAL STATUS LIKE "Slow_queries"'
# else:
# query = 'SHOW STATUS LIKE "Slow_queries'
# QPS - Queries per second.
status['Queries per second'] = self.calculate_per_s(
'qps', status_metrics['Queries']
)
# Note check for which version of mysql
self.checks_logger.debug('mysql: getting QPS - done')
# Connection pool
status['threads connected'] = status_metrics['Threads_connected']
status['threads running'] = status_metrics['Threads_running']
status['max connections'] = status_variables['max_connections']
status['max used connections'] = status_metrics[
'Max_used_connections']
status['Connection usage %'] = (
(status['threads running'] /
status['max connections'])*100
)
self.checks_logger.debug('mysql: getting connections - done')
# Buffer pool
status['buffer pool pages total'] = status_metrics[
'Innodb_buffer_pool_pages_total']
status['buffer pool pages free'] = status_metrics[
'Innodb_buffer_pool_pages_free']
status['buffer pool pages dirty'] = status_metrics[
'Innodb_buffer_pool_pages_dirty']
status['buffer pool pages data'] = status_metrics[
'Innodb_buffer_pool_pages_data']
self.checks_logger.debug('mysql: getting buffer pool - done')
# Query cache items
status['qcache hits'] = status_metrics['Qcache_hits']
status['qcache hits/s'] = self.calculate_per_s(
'qcache_ps', status['qcache hits'])
status['qcache free memory'] = status_metrics['Qcache_free_memory']
status['qcache not cached'] = status_metrics['Qcache_not_cached']
status['qcache in cache'] = status_metrics[
'Qcache_queries_in_cache']
self.checks_logger.debug('mysql: getting Qcache data - done')
# writes, reads, transactions
writes = (status_metrics['Com_insert'] +
status_metrics['Com_replace'] +
status_metrics['Com_update'] +
status_metrics['Com_delete'])
status['Writes/s'] = self.calculate_per_s('writes', writes)
# reads
reads = status_metrics['Com_select'] + status['qcache hits']
status['Reads/s'] = self.calculate_per_s('reads', reads)
try:
status['RW ratio'] = reads/writes
except ZeroDivisionError:
status['RW ratio'] = 0
# transactions
transactions = (status_metrics['Com_commit'] +
status_metrics['Com_rollback'])
status['Transactions/s'] = self.calculate_per_s(
'transactions', transactions)
self.checks_logger.debug(
'mysql: getting transactions, reads and writes - done')
# Aborted connections and clients
status['aborted clients'] = status_metrics['Aborted_clients']
status['aborted connects'] = status_metrics['Aborted_connects']
self.checks_logger.debug(
'mysql: getting aborted connections - done')
# Replication - Seconds Behind Master
secondsBehindMaster = None
try:
cursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SHOW SLAVE STATUS')
result = cursor.fetchone()
except MySQLdb.OperationalError as message:
self.checks_logger.error(
'getMySQLStatus: MySQL query error when '
'getting SHOW SLAVE STATUS = %s', message)
result = None
if result is not None:
try:
# Handle the case when Seconds_Behind_Master is NULL
if result['Seconds_Behind_Master'] is None:
secondsBehindMaster = -1
else:
secondsBehindMaster = result['Seconds_Behind_Master']
self.checks_logger.debug(
'getMySQLStatus: '
'secondsBehindMaster = %s', secondsBehindMaster
)
except IndexError as e:
self.checks_logger.debug(
'getMySQLStatus: secondsBehindMaster empty. %s', e
)
else:
self.checks_logger.debug(
'getMySQLStatus: secondsBehindMaster empty. Result = None.'
)
# Created temporary tables in memory and on disk
status['created tmp tables'] = status_metrics['Created_tmp_tables']
status['created tmp tables on disk'] = status_metrics[
'Created_tmp_disk_tables']
# Note check mysql version?
self.checks_logger.debug(
'mysql: getting temporary tables data - done')
# select_full_join
status['select full join'] = status_metrics['Select_full_join']
# note check for mysql version?
self.checks_logger.debug('mysql: getting select_full_join - done')
# slave_running
result = status_metrics['Slave_running']
if result == 'OFF':
result = 0
else:
result = 1
status['slave running'] = result
self.checks_logger.debug(
'mysql: getting slave_running - done')
# open files
status['open files'] = status_metrics['Open_files']
status['open files limit'] = status_variables['open_files_limit']
self.checks_logger.debug('mysql: getting open_files - done')
# table_locks_waited
status['table locks waited'] = status_metrics['Table_locks_waited']
self.checks_logger.debug(
'mysql: getting table_locks_waited - done')
# checkpoint age
# note this needs to be changed.
try:
cursor = db.cursor()
cursor.execute('SHOW ENGINE INNODB STATUS')
results = cursor.fetchone()[2]
log_loci = results.find('Log sequence number')
checkpoint_loci = results.find('Last checkpoint at')
log_nr = int(re.search(r'\d+', results[log_loci:]).group(0))
cp_nr = int(re.search(
r'\d+', results[checkpoint_loci:]).group(0))
cp_age = cp_nr - log_nr
status['Checkpoint age'] = cp_age
except MySQLdb.OperationalError as message:
self.checks_logger.error(
'mysql: MySQL query error when '
'getting checkpoint age = {0}'.format(
message)
)
return False
self.checks_logger.debug(
'mysql: getting checkpoint age - done')
# note remove this.
try:
# Key cache hit ratio
# http://www.percona.com/blog/2010/02/28/why-you-should-ignore-mysqls-key-cache-hit-ratio/
key_read = self.get_db_results(
db, 'SHOW STATUS LIKE "Key_reads"')
key_requests = self.get_db_results(
db, 'SHOW STATUS LIKE "Key_read_requests"')
# status['Key cache hit ratio'] = (
# 100 - ((key_read * 100) / key_requests))
status['Key reads/s'] = self.calculate_per_s(
"Key_reads", key_read)
except MySQLdb.OperationalError as message:
self.checks_logger.error(
'mysql: MySQL query error when '
'getting key cache = {0}'.format(
message)
)
return False
self.checks_logger.debug(
'mysql: getting key cache hit ratio - done')
# com commands per second
com = self.raw_config['MySQLServer'].get('mysql_include_per_s')
if com:
user_com_ps = com
user_com_ps = user_com_ps.split(',')
user_com_ps = [command.strip() for command in user_com_ps]
user_com_ps = user_com_ps + COMMANDS
else:
user_com_ps = COMMANDS
for command in user_com_ps:
try:
com_per_s = self.calculate_per_s(
command, status_metrics[command])
status[command.replace('_', ' ')+'/s'] = com_per_s
except KeyError, e:
self.checks_logger.exception(e)
if self.raw_config['MySQLServer'].get('mysql_include'):
user_com = self.raw_config['MySQLServer']['mysql_include']
user_com = user_com.split(',')
user_com = [command.strip() for command in user_com]
user_com = user_com + COMMANDS
else:
user_com = COMMANDS
for command in user_com:
status[command.replace('_', ' ')] = status_metrics[
command]
self.checks_logger.debug(
'mysql: getting com_commands - done')
except Exception:
self.checks_logger.error(
'mysql: unable to get data from MySQL - '
'Exception: {0}'.format(traceback.format_exc())
)
self.checks_logger.debug('mysql: completed, returning')
return status
if __name__ == "__main__":
"""Standalone test"""
import logging
import sys
import json
import time
host = 'localhost'
port = '3306'
raw_agent_config = {
'MySQLServer': {
'mysql_server': host,
'mysql_port': port,
'mysql_user': 'jonathan',
'mysql_pass': 'password',
'mysql_include_per_s': 'Com_check, Com_checksum, Com_begin',
# 'mysql_ssl_cert': '/etc/mysql-ssl/client-cert.pem',
# 'mysql_ssl_key': '/etc/mysql-ssl/client-key.pem'
}
}
main_checks_logger = logging.getLogger('MySQLplugin')
main_checks_logger.setLevel(logging.DEBUG)
main_checks_logger.addHandler(logging.StreamHandler(sys.stdout))
mysql_check = MySQL({}, main_checks_logger, raw_agent_config)
while True:
try:
result = mysql_check.run()
print(json.dumps(result, indent=4, sort_keys=True))
except:
main_checks_logger.exception("Unhandled Exception")
finally:
time.sleep(60)
| bsd-3-clause |
dati91/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/logging/test_fixture.py | 30 | 3581 | # -*- coding: utf-8 -*-
import logging
import pytest
logger = logging.getLogger(__name__)
sublogger = logging.getLogger(__name__ + ".baz")
def test_fixture_help(testdir):
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines(["*caplog*"])
def test_change_level(caplog):
caplog.set_level(logging.INFO)
logger.debug("handler DEBUG level")
logger.info("handler INFO level")
caplog.set_level(logging.CRITICAL, logger=sublogger.name)
sublogger.warning("logger WARNING level")
sublogger.critical("logger CRITICAL level")
assert "DEBUG" not in caplog.text
assert "INFO" in caplog.text
assert "WARNING" not in caplog.text
assert "CRITICAL" in caplog.text
def test_change_level_undo(testdir):
"""Ensure that 'set_level' is undone after the end of the test"""
testdir.makepyfile(
"""
import logging
def test1(caplog):
caplog.set_level(logging.INFO)
# using + operator here so fnmatch_lines doesn't match the code in the traceback
logging.info('log from ' + 'test1')
assert 0
def test2(caplog):
# using + operator here so fnmatch_lines doesn't match the code in the traceback
logging.info('log from ' + 'test2')
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*log from test1*", "*2 failed in *"])
assert "log from test2" not in result.stdout.str()
def test_with_statement(caplog):
with caplog.at_level(logging.INFO):
logger.debug("handler DEBUG level")
logger.info("handler INFO level")
with caplog.at_level(logging.CRITICAL, logger=sublogger.name):
sublogger.warning("logger WARNING level")
sublogger.critical("logger CRITICAL level")
assert "DEBUG" not in caplog.text
assert "INFO" in caplog.text
assert "WARNING" not in caplog.text
assert "CRITICAL" in caplog.text
def test_log_access(caplog):
caplog.set_level(logging.INFO)
logger.info("boo %s", "arg")
assert caplog.records[0].levelname == "INFO"
assert caplog.records[0].msg == "boo %s"
assert "boo arg" in caplog.text
def test_record_tuples(caplog):
caplog.set_level(logging.INFO)
logger.info("boo %s", "arg")
assert caplog.record_tuples == [(__name__, logging.INFO, "boo arg")]
def test_unicode(caplog):
caplog.set_level(logging.INFO)
logger.info(u"bū")
assert caplog.records[0].levelname == "INFO"
assert caplog.records[0].msg == u"bū"
assert u"bū" in caplog.text
def test_clear(caplog):
caplog.set_level(logging.INFO)
logger.info(u"bū")
assert len(caplog.records)
assert caplog.text
caplog.clear()
assert not len(caplog.records)
assert not caplog.text
@pytest.fixture
def logging_during_setup_and_teardown(caplog):
caplog.set_level("INFO")
logger.info("a_setup_log")
yield
logger.info("a_teardown_log")
assert [x.message for x in caplog.get_records("teardown")] == ["a_teardown_log"]
def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown):
assert not caplog.records
assert not caplog.get_records("call")
logger.info("a_call_log")
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
# This reachers into private API, don't use this type of thing in real tests!
assert set(caplog._item.catch_log_handlers.keys()) == {"setup", "call"}
| mpl-2.0 |
wavenator/tasker | tasker/tests/test_devices.py | 1 | 6306 | import unittest
import time
import os
import sys
import multiprocessing
import signal
from .. import devices
class DevicesTestCase(
unittest.TestCase,
):
@classmethod
def setUpClass(
cls,
):
pass
def setUp(
self,
):
self.sigint_fired = False
self.sigabrt_fired = False
signal.signal(signal.SIGABRT, self.sigabrt_handler)
signal.signal(signal.SIGINT, self.sigint_handler)
def sigabrt_handler(
self,
signal_num,
frame,
):
self.sigabrt_fired = True
def sigint_handler(
self,
signal_num,
frame,
):
self.sigint_fired = True
def test_timeouts_killer(
self,
):
killer = devices.killer.KillerClient.create_a_killer(
pid_to_kill=os.getpid(),
sleep_interval=0.05,
soft_timeout=1.0,
hard_timeout=3.0,
critical_timeout=5.0,
)
killer.start()
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
time.sleep(1.2)
self.assertFalse(self.sigabrt_fired)
self.assertTrue(self.sigint_fired)
time.sleep(1.2)
self.assertFalse(self.sigabrt_fired)
self.assertTrue(self.sigint_fired)
time.sleep(1.2)
self.assertTrue(self.sigabrt_fired)
self.assertTrue(self.sigint_fired)
killer.stop()
self.sigint_fired = False
self.sigabrt_fired = False
time.sleep(1.2)
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
time.sleep(1.2)
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
killer.reset()
killer.start()
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
time.sleep(0.5)
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
killer.reset()
time.sleep(0.5)
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
killer.reset()
time.sleep(0.5)
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
killer.reset()
time.sleep(0.5)
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
killer.reset()
killer.stop()
self.assertFalse(self.sigabrt_fired)
self.assertFalse(self.sigint_fired)
def test_sleep_case_killer(
self,
):
test_process_obj = TestProcess()
testing_process = multiprocessing.Process(
target=test_process_obj.sleep,
kwargs={
'interval': 30,
},
)
testing_process.daemon = True
testing_process.start()
self.assertTrue(testing_process.is_alive())
killer = devices.killer.KillerClient.create_a_killer(
pid_to_kill=testing_process.pid,
sleep_interval=0.05,
soft_timeout=1.0,
hard_timeout=2.0,
critical_timeout=5.0,
)
killer.start()
self.assertTrue(testing_process.is_alive())
time.sleep(1.2)
self.assertFalse(testing_process.is_alive())
self.assertEqual(testing_process.exitcode, 20)
killer.stop()
def test_no_int_case_killer(
self,
):
test_process_obj = TestProcess()
testing_process = multiprocessing.Process(
target=test_process_obj.no_int_sleep,
kwargs={
'interval': 30,
},
)
testing_process.daemon = True
testing_process.start()
self.assertTrue(testing_process.is_alive())
killer = devices.killer.KillerClient.create_a_killer(
pid_to_kill=testing_process.pid,
sleep_interval=0.05,
soft_timeout=1.0,
hard_timeout=2.0,
critical_timeout=5.0,
)
killer.start()
self.assertTrue(testing_process.is_alive())
time.sleep(1.2)
self.assertTrue(testing_process.is_alive())
time.sleep(1.2)
self.assertFalse(testing_process.is_alive())
self.assertEqual(testing_process.exitcode, 10)
killer.stop()
def test_lost_case_killer(
self,
):
test_process_obj = TestProcess()
testing_process = multiprocessing.Process(
target=test_process_obj.lost,
kwargs={
'interval': 30,
},
)
testing_process.daemon = True
testing_process.start()
self.assertTrue(testing_process.is_alive())
killer = devices.killer.KillerClient.create_a_killer(
pid_to_kill=testing_process.pid,
sleep_interval=0.05,
soft_timeout=1.0,
hard_timeout=2.0,
critical_timeout=3.0,
)
killer.start()
self.assertTrue(testing_process.is_alive())
time.sleep(1.2)
self.assertTrue(testing_process.is_alive())
time.sleep(1.2)
self.assertTrue(testing_process.is_alive())
time.sleep(1.2)
self.assertFalse(testing_process.is_alive())
self.assertEqual(testing_process.exitcode, -15)
killer.stop()
class TestProcess:
def init(
self,
):
signal.signal(signal.SIGABRT, self.sigabrt_handler)
signal.signal(signal.SIGINT, self.sigint_handler)
def sleep(
self,
interval,
):
self.init()
time.sleep(interval)
def no_int_sleep(
self,
interval,
):
self.init()
signal.signal(signal.SIGINT, lambda a, b: True)
time.sleep(interval)
def lost(
self,
interval,
):
self.init()
signal.signal(signal.SIGINT, lambda a, b: True)
signal.signal(signal.SIGABRT, lambda a, b: True)
time.sleep(interval)
def sigabrt_handler(
self,
signal_num,
frame,
):
sys.exit(10)
def sigint_handler(
self,
signal_num,
frame,
):
sys.exit(20)
def __setstate__(
self,
state,
):
self.init()
| apache-2.0 |
matthiascy/panda3d | direct/src/distributed/ConnectionRepository.py | 6 | 27983 | from pandac.PandaModules import *
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DoInterestManager import DoInterestManager
from direct.distributed.DoCollectionManager import DoCollectionManager
from direct.showbase import GarbageReport
from PyDatagram import PyDatagram
from PyDatagramIterator import PyDatagramIterator
import types
import imp
import gc
class ConnectionRepository(
DoInterestManager, DoCollectionManager, CConnectionRepository):
"""
This is a base class for things that know how to establish a
connection (and exchange datagrams) with a gameserver. This
includes ClientRepository and AIRepository.
"""
notify = DirectNotifyGlobal.directNotify.newCategory("ConnectionRepository")
taskPriority = -30
taskChain = None
CM_HTTP=0
CM_NET=1
CM_NATIVE=2
gcNotify = directNotify.newCategory("GarbageCollect")
GarbageCollectTaskName = "allowGarbageCollect"
GarbageThresholdTaskName = "adjustGarbageCollectThreshold"
def __init__(self, connectMethod, config, hasOwnerView = False,
threadedNet = None):
assert self.notify.debugCall()
if threadedNet is None:
# Default value.
threadedNet = config.GetBool('threaded-net', False)
# let the C connection repository know whether we're supporting
# 'owner' views of distributed objects (i.e. 'receives ownrecv',
# 'I own this object and have a separate view of it regardless of
# where it currently is located')
CConnectionRepository.__init__(self, hasOwnerView, threadedNet)
self.setWantMessageBundling(config.GetBool('want-message-bundling', 1))
# DoInterestManager.__init__ relies on CConnectionRepository being
# initialized
DoInterestManager.__init__(self)
DoCollectionManager.__init__(self)
self.setPythonRepository(self)
# Create a unique ID number for each ConnectionRepository in
# the world, helpful for sending messages specific to each one.
self.uniqueId = hash(self)
# Accept this hook so that we can respond to lost-connection
# events in the main thread, instead of within the network
# thread (if there is one).
self.accept(self._getLostConnectionEvent(), self.lostConnection)
self.config = config
if self.config.GetBool('verbose-repository'):
self.setVerbose(1)
# Set this to 'http' to establish a connection to the server
# using the HTTPClient interface, which ultimately uses the
# OpenSSL socket library (even though SSL is not involved).
# This is not as robust a socket library as NET's, but the
# HTTPClient interface does a good job of negotiating the
# connection over an HTTP proxy if one is in use.
#
# Set it to 'net' to use Panda's net interface
# (e.g. QueuedConnectionManager, etc.) to establish the
# connection. This is a higher-level layer build on top of
# the low-level "native net" library. There is no support for
# proxies. This is a good, general choice.
#
# Set it to 'native' to use Panda's low-level native net
# interface directly. This is much faster than either http or
# net for high-bandwidth (e.g. server) applications, but it
# doesn't support the simulated delay via the start_delay()
# call.
#
# Set it to 'default' to use an appropriate interface
# according to the type of ConnectionRepository we are
# creating.
userConnectMethod = self.config.GetString('connect-method', 'default')
if userConnectMethod == 'http':
connectMethod = self.CM_HTTP
elif userConnectMethod == 'net':
connectMethod = self.CM_NET
elif userConnectMethod == 'native':
connectMethod = self.CM_NATIVE
self.connectMethod = connectMethod
if self.connectMethod == self.CM_HTTP:
self.notify.info("Using connect method 'http'")
elif self.connectMethod == self.CM_NET:
self.notify.info("Using connect method 'net'")
elif self.connectMethod == self.CM_NATIVE:
self.notify.info("Using connect method 'native'")
self.connectHttp = None
self.http = None
# This DatagramIterator is constructed once, and then re-used
# each time we read a datagram.
self.private__di = PyDatagramIterator()
self.recorder = None
self.readerPollTaskObj = None
# This is the string that is appended to symbols read from the
# DC file. The AIRepository will redefine this to 'AI'.
self.dcSuffix = ''
self._serverAddress = ''
if self.config.GetBool('gc-save-all', 1):
# set gc to preserve every object involved in a cycle, even ones that
# would normally be freed automatically during garbage collect
# allows us to find and fix these cycles, reducing or eliminating the
# need to run garbage collects
# garbage collection CPU usage is O(n), n = number of Python objects
gc.set_debug(gc.DEBUG_SAVEALL)
if self.config.GetBool('want-garbage-collect-task', 1):
# manual garbage-collect task
taskMgr.add(self._garbageCollect, self.GarbageCollectTaskName, 200)
# periodically increase gc threshold if there is no garbage
taskMgr.doMethodLater(self.config.GetFloat('garbage-threshold-adjust-delay', 5 * 60.),
self._adjustGcThreshold, self.GarbageThresholdTaskName)
self._gcDefaultThreshold = gc.get_threshold()
def _getLostConnectionEvent(self):
return self.uniqueName('lostConnection')
def _garbageCollect(self, task=None):
# allow a collect
# enable automatic garbage collection
gc.enable()
# creating an object with gc enabled causes garbage collection to trigger if appropriate
gct = GCTrigger()
# disable the automatic garbage collect during the rest of the frame
gc.disable()
return Task.cont
def _adjustGcThreshold(self, task):
# do an unconditional collect to make sure gc.garbage has a chance to be
# populated before we start increasing the auto-collect threshold
# don't distribute the leak check from the client to the AI, they both
# do these garbage checks independently over time
numGarbage = GarbageReport.checkForGarbageLeaks()
if numGarbage == 0:
self.gcNotify.debug('no garbage found, doubling gc threshold')
a, b, c = gc.get_threshold()
gc.set_threshold(min(a * 2, 1 << 30), b, c)
task.delayTime = task.delayTime * 2
retVal = Task.again
else:
self.gcNotify.warning('garbage found, reverting gc threshold')
# the process is producing garbage, stick to the default collection threshold
gc.set_threshold(*self._gcDefaultThreshold)
retVal = Task.done
return retVal
def generateGlobalObject(self, doId, dcname, values=None):
def applyFieldValues(distObj, dclass, values):
for i in range(dclass.getNumInheritedFields()):
field = dclass.getInheritedField(i)
if field.asMolecularField() == None:
value = values.get(field.getName(), None)
if value is None and field.isRequired():
# Gee, this could be better. What would really be
# nicer is to get value from field.getDefaultValue
# or similar, but that returns a binary string, not
# a python tuple, like the following does. If you
# want to change something better, please go ahead.
packer = DCPacker()
packer.beginPack(field)
packer.packDefaultValue()
packer.endPack()
unpacker = DCPacker()
unpacker.setUnpackData(packer.getString())
unpacker.beginUnpack(field)
value = unpacker.unpackObject()
unpacker.endUnpack()
if value is not None:
function = getattr(distObj, field.getName())
if function is not None:
function(*value)
else:
self.notify.error("\n\n\nNot able to find %s.%s"%(
distObj.__class__.__name__, field.getName()))
# Look up the dclass
dclass = self.dclassesByName.get(dcname+self.dcSuffix)
if dclass is None:
#print "\n\n\nNeed to define", dcname+self.dcSuffix
self.notify.warning("Need to define %s" % (dcname+self.dcSuffix))
dclass = self.dclassesByName.get(dcname+'AI')
if dclass is None:
dclass = self.dclassesByName.get(dcname)
# Create a new distributed object, and put it in the dictionary
#distObj = self.generateWithRequiredFields(dclass, doId, di)
# Construct a new one
classDef = dclass.getClassDef()
if classDef == None:
self.notify.error("Could not create an undefined %s object."%(
dclass.getName()))
distObj = classDef(self)
distObj.dclass = dclass
# Assign it an Id
distObj.doId = doId
# Put the new do in the dictionary
self.doId2do[doId] = distObj
# Update the required fields
distObj.generateInit() # Only called when constructed
distObj.generate()
if values is not None:
applyFieldValues(distObj, dclass, values)
distObj.announceGenerate()
distObj.parentId = 0
distObj.zoneId = 0
# updateRequiredFields calls announceGenerate
return distObj
def readDCFile(self, dcFileNames = None):
"""
Reads in the dc files listed in dcFileNames, or if
dcFileNames is None, reads in all of the dc files listed in
the Config.prc file.
"""
dcFile = self.getDcFile()
dcFile.clear()
self.dclassesByName = {}
self.dclassesByNumber = {}
self.hashVal = 0
if isinstance(dcFileNames, types.StringTypes):
# If we were given a single string, make it a list.
dcFileNames = [dcFileNames]
dcImports = {}
if dcFileNames == None:
readResult = dcFile.readAll()
if not readResult:
self.notify.error("Could not read dc file.")
else:
searchPath = getModelPath().getValue()
for dcFileName in dcFileNames:
pathname = Filename(dcFileName)
vfs.resolveFilename(pathname, searchPath)
readResult = dcFile.read(pathname)
if not readResult:
self.notify.error("Could not read dc file: %s" % (pathname))
#if not dcFile.allObjectsValid():
# names = []
# for i in range(dcFile.getNumTypedefs()):
# td = dcFile.getTypedef(i)
# if td.isBogusTypedef():
# names.append(td.getName())
# nameList = ', '.join(names)
# self.notify.error("Undefined types in DC file: " + nameList)
self.hashVal = dcFile.getHash()
# Now import all of the modules required by the DC file.
for n in range(dcFile.getNumImportModules()):
moduleName = dcFile.getImportModule(n)[:]
# Maybe the module name is represented as "moduleName/AI".
suffix = moduleName.split('/')
moduleName = suffix[0]
suffix=suffix[1:]
if self.dcSuffix in suffix:
moduleName += self.dcSuffix
elif self.dcSuffix == 'UD' and 'AI' in suffix: #HACK:
moduleName += 'AI'
importSymbols = []
for i in range(dcFile.getNumImportSymbols(n)):
symbolName = dcFile.getImportSymbol(n, i)
# Maybe the symbol name is represented as "symbolName/AI".
suffix = symbolName.split('/')
symbolName = suffix[0]
suffix=suffix[1:]
if self.dcSuffix in suffix:
symbolName += self.dcSuffix
elif self.dcSuffix == 'UD' and 'AI' in suffix: #HACK:
symbolName += 'AI'
importSymbols.append(symbolName)
self.importModule(dcImports, moduleName, importSymbols)
# Now get the class definition for the classes named in the DC
# file.
for i in range(dcFile.getNumClasses()):
dclass = dcFile.getClass(i)
number = dclass.getNumber()
className = dclass.getName() + self.dcSuffix
# Does the class have a definition defined in the newly
# imported namespace?
classDef = dcImports.get(className)
if classDef is None and self.dcSuffix == 'UD': #HACK:
className = dclass.getName() + 'AI'
classDef = dcImports.get(className)
# Also try it without the dcSuffix.
if classDef == None:
className = dclass.getName()
classDef = dcImports.get(className)
if classDef is None:
self.notify.debug("No class definition for %s." % (className))
else:
if type(classDef) == types.ModuleType:
if not hasattr(classDef, className):
self.notify.warning("Module %s does not define class %s." % (className, className))
continue
classDef = getattr(classDef, className)
if type(classDef) != types.ClassType and type(classDef) != types.TypeType:
self.notify.error("Symbol %s is not a class name." % (className))
else:
dclass.setClassDef(classDef)
self.dclassesByName[className] = dclass
if number >= 0:
self.dclassesByNumber[number] = dclass
# Owner Views
if self.hasOwnerView():
ownerDcSuffix = self.dcSuffix + 'OV'
# dict of class names (without 'OV') that have owner views
ownerImportSymbols = {}
# Now import all of the modules required by the DC file.
for n in range(dcFile.getNumImportModules()):
moduleName = dcFile.getImportModule(n)
# Maybe the module name is represented as "moduleName/AI".
suffix = moduleName.split('/')
moduleName = suffix[0]
suffix=suffix[1:]
if ownerDcSuffix in suffix:
moduleName = moduleName + ownerDcSuffix
importSymbols = []
for i in range(dcFile.getNumImportSymbols(n)):
symbolName = dcFile.getImportSymbol(n, i)
# Check for the OV suffix
suffix = symbolName.split('/')
symbolName = suffix[0]
suffix=suffix[1:]
if ownerDcSuffix in suffix:
symbolName += ownerDcSuffix
importSymbols.append(symbolName)
ownerImportSymbols[symbolName] = None
self.importModule(dcImports, moduleName, importSymbols)
# Now get the class definition for the owner classes named
# in the DC file.
for i in range(dcFile.getNumClasses()):
dclass = dcFile.getClass(i)
if ((dclass.getName()+ownerDcSuffix) in ownerImportSymbols):
number = dclass.getNumber()
className = dclass.getName() + ownerDcSuffix
# Does the class have a definition defined in the newly
# imported namespace?
classDef = dcImports.get(className)
if classDef is None:
self.notify.error("No class definition for %s." % className)
else:
if type(classDef) == types.ModuleType:
if not hasattr(classDef, className):
self.notify.error("Module %s does not define class %s." % (className, className))
classDef = getattr(classDef, className)
dclass.setOwnerClassDef(classDef)
self.dclassesByName[className] = dclass
def importModule(self, dcImports, moduleName, importSymbols):
"""
Imports the indicated moduleName and all of its symbols
into the current namespace. This more-or-less reimplements
the Python import command.
"""
module = __import__(moduleName, globals(), locals(), importSymbols)
if importSymbols:
# "from moduleName import symbolName, symbolName, ..."
# Copy just the named symbols into the dictionary.
if importSymbols == ['*']:
# "from moduleName import *"
if hasattr(module, "__all__"):
importSymbols = module.__all__
else:
importSymbols = module.__dict__.keys()
for symbolName in importSymbols:
if hasattr(module, symbolName):
dcImports[symbolName] = getattr(module, symbolName)
else:
raise StandardError, 'Symbol %s not defined in module %s.' % (symbolName, moduleName)
else:
# "import moduleName"
# Copy the root module name into the dictionary.
# Follow the dotted chain down to the actual module.
components = moduleName.split('.')
dcImports[components[0]] = module
def getServerAddress(self):
return self._serverAddress
def connect(self, serverList,
successCallback = None, successArgs = [],
failureCallback = None, failureArgs = []):
"""
Attempts to establish a connection to the server. May return
before the connection is established. The two callbacks
represent the two functions to call (and their arguments) on
success or failure, respectively. The failure callback also
gets one additional parameter, which will be passed in first:
the return status code giving reason for failure, if it is
known.
"""
## if self.recorder and self.recorder.isPlaying():
## # If we have a recorder and it's already in playback mode,
## # don't actually attempt to connect to a gameserver since
## # we don't need to. Just let it play back the data.
## self.notify.info("Not connecting to gameserver; using playback data instead.")
## self.connectHttp = 1
## self.tcpConn = SocketStreamRecorder()
## self.recorder.addRecorder('gameserver', self.tcpConn)
## self.startReaderPollTask()
## if successCallback:
## successCallback(*successArgs)
## return
hasProxy = 0
if self.checkHttp():
proxies = self.http.getProxiesForUrl(serverList[0])
hasProxy = (proxies != 'DIRECT')
if hasProxy:
self.notify.info("Connecting to gameserver via proxy list: %s" % (proxies))
else:
self.notify.info("Connecting to gameserver directly (no proxy).")
#Redefine the connection to http or net in the default case
self.bootedIndex = None
self.bootedText = None
if self.connectMethod == self.CM_HTTP:
# In the HTTP case, we can't just iterate through the list
# of servers, because each server attempt requires
# spawning a request and then coming back later to check
# the success or failure. Instead, we start the ball
# rolling by calling the connect callback, which will call
# itself repeatedly until we establish a connection (or
# run out of servers).
ch = self.http.makeChannel(0)
self.httpConnectCallback(
ch, serverList, 0,
successCallback, successArgs,
failureCallback, failureArgs)
elif self.connectMethod == self.CM_NET or (not hasattr(self,"connectNative")):
# Try each of the servers in turn.
for url in serverList:
self.notify.info("Connecting to %s via NET interface." % (url))
if self.tryConnectNet(url):
self.startReaderPollTask()
if successCallback:
successCallback(*successArgs)
return
# Failed to connect.
if failureCallback:
failureCallback(0, '', *failureArgs)
elif self.connectMethod == self.CM_NATIVE:
for url in serverList:
self.notify.info("Connecting to %s via Native interface." % (url))
if self.connectNative(url):
self.startReaderPollTask()
if successCallback:
successCallback(*successArgs)
return
# Failed to connect.
if failureCallback:
failureCallback(0, '', *failureArgs)
else:
print "uh oh, we aren't using one of the tri-state CM variables"
failureCallback(0, '', *failureArgs)
def disconnect(self):
"""
Closes the previously-established connection.
"""
self.notify.info("Closing connection to server.")
self._serverAddress = ''
CConnectionRepository.disconnect(self)
self.stopReaderPollTask()
def shutdown(self):
self.ignoreAll()
CConnectionRepository.shutdown(self)
def httpConnectCallback(self, ch, serverList, serverIndex,
successCallback, successArgs,
failureCallback, failureArgs):
if ch.isConnectionReady():
self.setConnectionHttp(ch)
self._serverAddress = serverList[serverIndex-1]
self.notify.info("Successfully connected to %s." % (self._serverAddress))
## if self.recorder:
## # If we have a recorder, we wrap the connect inside a
## # SocketStreamRecorder, which will trap incoming data
## # when the recorder is set to record mode. (It will
## # also play back data when the recorder is in playback
## # mode, but in that case we never get this far in the
## # code, since we just create an empty
## # SocketStreamRecorder without actually connecting to
## # the gameserver.)
## stream = SocketStreamRecorder(self.tcpConn, 1)
## self.recorder.addRecorder('gameserver', stream)
## # In this case, we pass ownership of the original
## # connection to the SocketStreamRecorder object.
## self.tcpConn.userManagesMemory = 0
## self.tcpConn = stream
self.startReaderPollTask()
if successCallback:
successCallback(*successArgs)
elif serverIndex < len(serverList):
# No connection yet, but keep trying.
url = serverList[serverIndex]
self.notify.info("Connecting to %s via HTTP interface." % (url))
ch.preserveStatus()
ch.beginConnectTo(DocumentSpec(url))
ch.spawnTask(name = 'connect-to-server',
callback = self.httpConnectCallback,
extraArgs = [ch, serverList, serverIndex + 1,
successCallback, successArgs,
failureCallback, failureArgs])
else:
# No more servers to try; we have to give up now.
if failureCallback:
failureCallback(ch.getStatusCode(), ch.getStatusString(),
*failureArgs)
def checkHttp(self):
# Creates an HTTPClient, if possible, if we don't have one
# already. This might fail if the OpenSSL library isn't
# available. Returns the HTTPClient (also self.http), or None
# if not set.
if self.http == None:
try:
self.http = HTTPClient()
except:
pass
return self.http
def startReaderPollTask(self):
# Stop any tasks we are running now
self.stopReaderPollTask()
self.accept(CConnectionRepository.getOverflowEventName(),
self.handleReaderOverflow)
self.readerPollTaskObj = taskMgr.add(
self.readerPollUntilEmpty, self.uniqueName("readerPollTask"),
priority = self.taskPriority, taskChain = self.taskChain)
def stopReaderPollTask(self):
if self.readerPollTaskObj:
taskMgr.remove(self.readerPollTaskObj)
self.readerPollTaskObj = None
self.ignore(CConnectionRepository.getOverflowEventName())
def readerPollUntilEmpty(self, task):
while self.readerPollOnce():
pass
return Task.cont
def readerPollOnce(self):
if self.checkDatagram():
self.getDatagramIterator(self.private__di)
self.handleDatagram(self.private__di)
return 1
# Unable to receive a datagram: did we lose the connection?
if not self.isConnected():
self.stopReaderPollTask()
messenger.send(self.uniqueName('lostConnection'), taskChain = 'default')
return 0
def handleReaderOverflow(self):
# this is called if the incoming-datagram queue overflowed and
# we lost some data. Override and handle if desired.
pass
def lostConnection(self):
# This should be overrided by a derived class to handle an
# unexpectedly lost connection to the gameserver.
self.notify.warning("Lost connection to gameserver.")
def handleDatagram(self, di):
# This class is meant to be pure virtual, and any classes that
# inherit from it need to make their own handleDatagram method
pass
def send(self, datagram):
# Zero-length datagrams might freak out the server. No point
# in sending them, anyway.
if datagram.getLength() > 0:
## if self.notify.getDebug():
## print "ConnectionRepository sending datagram:"
## datagram.dumpHex(ostream)
self.sendDatagram(datagram)
# debugging funcs for simulating a network-plug-pull
def pullNetworkPlug(self):
self.notify.warning('*** SIMULATING A NETWORK-PLUG-PULL ***')
self.setSimulatedDisconnect(1)
def networkPlugPulled(self):
return self.getSimulatedDisconnect()
def restoreNetworkPlug(self):
if self.networkPlugPulled():
self.notify.info('*** RESTORING SIMULATED PULLED-NETWORK-PLUG ***')
self.setSimulatedDisconnect(0)
def uniqueName(self, idString):
return ("%s-%s" % (idString, self.uniqueId))
class GCTrigger:
# used to trigger garbage collection
pass
| bsd-3-clause |
knutole/python-oauth2 | example/server.py | 375 | 7669 | """
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib
import oauth.oauth as oauth
# fake urls for the test server
REQUEST_TOKEN_URL = 'https://photos.example.net/request_token'
ACCESS_TOKEN_URL = 'https://photos.example.net/access_token'
AUTHORIZATION_URL = 'https://photos.example.net/authorize'
CALLBACK_URL = 'http://printer.example.com/request_token_ready'
RESOURCE_URL = 'http://photos.example.net/photos'
REALM = 'http://photos.example.net/'
VERIFIER = 'verifier'
# example store for one of each thing
class MockOAuthDataStore(oauth.OAuthDataStore):
def __init__(self):
self.consumer = oauth.OAuthConsumer('key', 'secret')
self.request_token = oauth.OAuthToken('requestkey', 'requestsecret')
self.access_token = oauth.OAuthToken('accesskey', 'accesssecret')
self.nonce = 'nonce'
self.verifier = VERIFIER
def lookup_consumer(self, key):
if key == self.consumer.key:
return self.consumer
return None
def lookup_token(self, token_type, token):
token_attrib = getattr(self, '%s_token' % token_type)
if token == token_attrib.key:
## HACK
token_attrib.set_callback(CALLBACK_URL)
return token_attrib
return None
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
if oauth_token and oauth_consumer.key == self.consumer.key and (oauth_token.key == self.request_token.key or oauth_token.key == self.access_token.key) and nonce == self.nonce:
return self.nonce
return None
def fetch_request_token(self, oauth_consumer, oauth_callback):
if oauth_consumer.key == self.consumer.key:
if oauth_callback:
# want to check here if callback is sensible
# for mock store, we assume it is
self.request_token.set_callback(oauth_callback)
return self.request_token
return None
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
if oauth_consumer.key == self.consumer.key and oauth_token.key == self.request_token.key and oauth_verifier == self.verifier:
# want to check here if token is authorized
# for mock store, we assume it is
return self.access_token
return None
def authorize_request_token(self, oauth_token, user):
if oauth_token.key == self.request_token.key:
# authorize the request token in the store
# for mock store, do nothing
return self.request_token
return None
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.oauth_server = oauth.OAuthServer(MockOAuthDataStore())
self.oauth_server.add_signature_method(oauth.OAuthSignatureMethod_PLAINTEXT())
self.oauth_server.add_signature_method(oauth.OAuthSignatureMethod_HMAC_SHA1())
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
# example way to send an oauth error
def send_oauth_error(self, err=None):
# send a 401 error
self.send_error(401, str(err.message))
# return the authenticate header
header = oauth.build_authenticate_header(realm=REALM)
for k, v in header.iteritems():
self.send_header(k, v)
def do_GET(self):
# debug info
#print self.command, self.path, self.headers
# get the post data (if any)
postdata = None
if self.command == 'POST':
try:
length = int(self.headers.getheader('content-length'))
postdata = self.rfile.read(length)
except:
pass
# construct the oauth request from the request parameters
oauth_request = oauth.OAuthRequest.from_request(self.command, self.path, headers=self.headers, query_string=postdata)
# request token
if self.path.startswith(REQUEST_TOKEN_URL):
try:
# create a request token
token = self.oauth_server.fetch_request_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# user authorization
if self.path.startswith(AUTHORIZATION_URL):
try:
# get the request token
token = self.oauth_server.fetch_request_token(oauth_request)
# authorize the token (kind of does nothing for now)
token = self.oauth_server.authorize_token(token, None)
token.set_verifier(VERIFIER)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the callback url (to show server has it)
self.wfile.write(token.get_callback_url())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# access token
if self.path.startswith(ACCESS_TOKEN_URL):
try:
# create an access token
token = self.oauth_server.fetch_access_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# protected resources
if self.path.startswith(RESOURCE_URL):
try:
# verify the request has been oauth authorized
consumer, token, params = self.oauth_server.verify_request(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the extra parameters - just for something to return
self.wfile.write(str(params))
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
def do_POST(self):
return self.do_GET()
def main():
try:
server = HTTPServer(('', 8080), RequestHandler)
print 'Test server running...'
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == '__main__':
main() | mit |
Ialong/shogun | tests/integration/python_modular/test_one.py | 21 | 2671 | #!/usr/bin/env python
"""
Test one data file
"""
from numpy import *
import sys
import kernel
import distance
import classifier
import clustering
import distribution
import regression
import preprocessor
from modshogun import Math_init_random
SUPPORTED=['kernel', 'distance', 'classifier', 'clustering', 'distribution',
'regression', 'preprocessor']
def _get_name_fun (fnam):
module=None
for supported in SUPPORTED:
if fnam.find(supported)>-1:
module=supported
break
if module is None:
print('Module required for %s not supported yet!' % fnam)
return None
return module+'.test'
def _test_mfile (fnam):
try:
mfile=open(fnam, mode='r')
except IOError as e:
print(e)
return False
indata={}
name_fun=_get_name_fun(fnam)
if name_fun is None:
return False
for line in mfile:
line=line.strip(" \t\n;")
param = line.split('=')[0].strip()
if param=='name':
name=line.split('=')[1].strip().split("'")[1]
indata[param]=name
elif param=='kernel_symdata' or param=='kernel_data':
indata[param]=_read_matrix(line)
elif param.startswith('kernel_matrix') or \
param.startswith('distance_matrix'):
indata[param]=_read_matrix(line)
elif param.find('data_train')>-1 or param.find('data_test')>-1:
# data_{train,test} might be prepended by 'subkernelX_'
indata[param]=_read_matrix(line)
elif param=='classifier_alphas' or param=='classifier_support_vectors':
try:
indata[param]=eval(line.split('=')[1])
except SyntaxError: # might be MultiClass SVM and hence matrix
indata[param]=_read_matrix(line)
elif param=='clustering_centers' or param=='clustering_pairs':
indata[param]=_read_matrix(line)
else:
if (line.find("'")==-1):
indata[param]=eval(line.split('=')[1])
else:
indata[param]=line.split('=')[1].strip().split("'")[1]
mfile.close()
fun=eval(name_fun)
# seed random to constant value used at data file's creation
Math_init_random(indata['init_random'])
random.seed(indata['init_random'])
return fun(indata)
def _read_matrix (line):
try:
str_line=(line.split('[')[1]).split(']')[0]
except IndexError:
str_line=(line.split('{')[1]).split('}')[0]
lines=str_line.split(';')
lis2d=list()
for x in lines:
lis=list()
for y in x.split(','):
y=y.replace("'","").strip()
if(y.isalpha()):
lis.append(y)
else:
if y.find('.')!=-1:
lis.append(float(y))
else:
try:
lis.append(int(y))
except ValueError: # not int, RAWDNA?
lis.append(y)
lis2d.append(lis)
return array(lis2d)
for filename in sys.argv:
if (filename.endswith('.m')):
res=_test_mfile(filename)
if res:
sys.exit(0)
else:
sys.exit(1)
| gpl-3.0 |
x2nie/odoo | openerp/tools/sql.py | 455 | 1173 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def drop_view_if_exists(cr, viewname):
cr.execute("DROP view IF EXISTS %s CASCADE" % (viewname,))
cr.commit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/jinja2-2.6/jinja2/testsuite/tests.py | 497 | 2865 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.tests
~~~~~~~~~~~~~~~~~~~~~~
Who tests the tests?
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Markup, Environment
env = Environment()
class TestsTestCase(JinjaTestCase):
def test_defined(self):
tmpl = env.from_string('{{ missing is defined }}|{{ true is defined }}')
assert tmpl.render() == 'False|True'
def test_even(self):
tmpl = env.from_string('''{{ 1 is even }}|{{ 2 is even }}''')
assert tmpl.render() == 'False|True'
def test_odd(self):
tmpl = env.from_string('''{{ 1 is odd }}|{{ 2 is odd }}''')
assert tmpl.render() == 'True|False'
def test_lower(self):
tmpl = env.from_string('''{{ "foo" is lower }}|{{ "FOO" is lower }}''')
assert tmpl.render() == 'True|False'
def test_typechecks(self):
tmpl = env.from_string('''
{{ 42 is undefined }}
{{ 42 is defined }}
{{ 42 is none }}
{{ none is none }}
{{ 42 is number }}
{{ 42 is string }}
{{ "foo" is string }}
{{ "foo" is sequence }}
{{ [1] is sequence }}
{{ range is callable }}
{{ 42 is callable }}
{{ range(5) is iterable }}
{{ {} is mapping }}
{{ mydict is mapping }}
{{ [] is mapping }}
''')
class MyDict(dict):
pass
assert tmpl.render(mydict=MyDict()).split() == [
'False', 'True', 'False', 'True', 'True', 'False',
'True', 'True', 'True', 'True', 'False', 'True',
'True', 'True', 'False'
]
def test_sequence(self):
tmpl = env.from_string(
'{{ [1, 2, 3] is sequence }}|'
'{{ "foo" is sequence }}|'
'{{ 42 is sequence }}'
)
assert tmpl.render() == 'True|True|False'
def test_upper(self):
tmpl = env.from_string('{{ "FOO" is upper }}|{{ "foo" is upper }}')
assert tmpl.render() == 'True|False'
def test_sameas(self):
tmpl = env.from_string('{{ foo is sameas false }}|'
'{{ 0 is sameas false }}')
assert tmpl.render(foo=False) == 'True|False'
def test_no_paren_for_arg1(self):
tmpl = env.from_string('{{ foo is sameas none }}')
assert tmpl.render(foo=None) == 'True'
def test_escaped(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ x is escaped }}|{{ y is escaped }}')
assert tmpl.render(x='foo', y=Markup('foo')) == 'False|True'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestsTestCase))
return suite
| mit |
firebase/grpc-SwiftPM | src/python/grpcio_tests/tests_aio/unit/channel_test.py | 1 | 9110 | # Copyright 2019 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests behavior of the grpc.aio.Channel class."""
import logging
import os
import unittest
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2, test_pb2_grpc
from tests.unit.framework.common import test_constants
from tests_aio.unit._constants import (UNARY_CALL_WITH_SLEEP_VALUE,
UNREACHABLE_TARGET)
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
_UNARY_CALL_METHOD = '/grpc.testing.TestService/UnaryCall'
_UNARY_CALL_METHOD_WITH_SLEEP = '/grpc.testing.TestService/UnaryCallWithSleep'
_STREAMING_OUTPUT_CALL_METHOD = '/grpc.testing.TestService/StreamingOutputCall'
_INVOCATION_METADATA = (
('x-grpc-test-echo-initial', 'initial-md-value'),
('x-grpc-test-echo-trailing-bin', b'\x00\x02'),
)
_NUM_STREAM_RESPONSES = 5
_REQUEST_PAYLOAD_SIZE = 7
_RESPONSE_PAYLOAD_SIZE = 42
class TestChannel(AioTestBase):
async def setUp(self):
self._server_target, self._server = await start_test_server()
async def tearDown(self):
await self._server.stop(None)
async def test_async_context(self):
async with aio.insecure_channel(self._server_target) as channel:
hi = channel.unary_unary(
_UNARY_CALL_METHOD,
request_serializer=messages_pb2.SimpleRequest.SerializeToString,
response_deserializer=messages_pb2.SimpleResponse.FromString)
await hi(messages_pb2.SimpleRequest())
async def test_unary_unary(self):
async with aio.insecure_channel(self._server_target) as channel:
hi = channel.unary_unary(
_UNARY_CALL_METHOD,
request_serializer=messages_pb2.SimpleRequest.SerializeToString,
response_deserializer=messages_pb2.SimpleResponse.FromString)
response = await hi(messages_pb2.SimpleRequest())
self.assertIsInstance(response, messages_pb2.SimpleResponse)
async def test_unary_call_times_out(self):
async with aio.insecure_channel(self._server_target) as channel:
hi = channel.unary_unary(
_UNARY_CALL_METHOD_WITH_SLEEP,
request_serializer=messages_pb2.SimpleRequest.SerializeToString,
response_deserializer=messages_pb2.SimpleResponse.FromString,
)
with self.assertRaises(grpc.RpcError) as exception_context:
await hi(messages_pb2.SimpleRequest(),
timeout=UNARY_CALL_WITH_SLEEP_VALUE / 2)
_, details = grpc.StatusCode.DEADLINE_EXCEEDED.value # pylint: disable=unused-variable
self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertEqual(details.title(),
exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIsNotNone(
exception_context.exception.trailing_metadata())
@unittest.skipIf(os.name == 'nt',
'TODO: https://github.com/grpc/grpc/issues/21658')
async def test_unary_call_does_not_times_out(self):
async with aio.insecure_channel(self._server_target) as channel:
hi = channel.unary_unary(
_UNARY_CALL_METHOD_WITH_SLEEP,
request_serializer=messages_pb2.SimpleRequest.SerializeToString,
response_deserializer=messages_pb2.SimpleResponse.FromString,
)
call = hi(messages_pb2.SimpleRequest(),
timeout=UNARY_CALL_WITH_SLEEP_VALUE * 5)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_unary_stream(self):
channel = aio.insecure_channel(self._server_target)
stub = test_pb2_grpc.TestServiceStub(channel)
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
# Invokes the actual RPC
call = stub.StreamingOutputCall(request)
# Validates the responses
response_cnt = 0
async for response in call:
response_cnt += 1
self.assertIs(type(response),
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(_NUM_STREAM_RESPONSES, response_cnt)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
await channel.close()
async def test_stream_unary_using_write(self):
channel = aio.insecure_channel(self._server_target)
stub = test_pb2_grpc.TestServiceStub(channel)
# Invokes the actual RPC
call = stub.StreamingInputCall()
# Prepares the request
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
# Sends out requests
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
await call.done_writing()
# Validates the responses
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
await channel.close()
async def test_stream_unary_using_async_gen(self):
channel = aio.insecure_channel(self._server_target)
stub = test_pb2_grpc.TestServiceStub(channel)
# Prepares the request
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
async def gen():
for _ in range(_NUM_STREAM_RESPONSES):
yield request
# Invokes the actual RPC
call = stub.StreamingInputCall(gen())
# Validates the responses
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
await channel.close()
async def test_stream_stream_using_read_write(self):
channel = aio.insecure_channel(self._server_target)
stub = test_pb2_grpc.TestServiceStub(channel)
# Invokes the actual RPC
call = stub.FullDuplexCall()
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
response = await call.read()
self.assertIsInstance(response,
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
await call.done_writing()
self.assertEqual(grpc.StatusCode.OK, await call.code())
await channel.close()
async def test_stream_stream_using_async_gen(self):
channel = aio.insecure_channel(self._server_target)
stub = test_pb2_grpc.TestServiceStub(channel)
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
async def gen():
for _ in range(_NUM_STREAM_RESPONSES):
yield request
# Invokes the actual RPC
call = stub.FullDuplexCall(gen())
async for response in call:
self.assertIsInstance(response,
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(grpc.StatusCode.OK, await call.code())
await channel.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main(verbosity=2)
| apache-2.0 |
erjohnso/ansible | lib/ansible/modules/windows/win_wakeonlan.py | 47 | 2191 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_wakeonlan
version_added: '2.4'
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The C(win_wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for.
required: true
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet.
default: 7
author:
- Dag Wieers (@dagwieers)
todo:
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked. It always report a change.
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS).
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
'''
EXAMPLES = r'''
- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
win_wakeonlan:
mac: 00:00:5E:00:53:66
broadcast: 192.0.2.23
- name: Send a magic Wake-On-LAN packet on port 9 to 00-00-5E-00-53-66
win_wakeonlan:
mac: 00-00-5E-00-53-66
port: 9
delegate_to: remote_system
'''
RETURN = r'''
# Default return values
'''
| gpl-3.0 |
jteehan/cfme_tests | utils/appliance/db.py | 1 | 15224 | import attr
from cached_property import cached_property
import fauxfactory
from textwrap import dedent
from utils import db, conf, clear_property_cache, datafile
from utils.path import scripts_path
from utils.version import LATEST
from utils.wait import wait_for
from .plugin import AppliancePlugin, AppliancePluginException
class ApplianceDBException(AppliancePluginException):
"""Basic Exception for Appliance DB object"""
pass
@attr.s
class ApplianceDB(AppliancePlugin):
"""Holder for appliance DB related methods and functions"""
_ssh_client = attr.ib(default=None)
# Until this needs a version pick, make it an attr
postgres_version = 'rh-postgresql95'
@cached_property
def client(self):
# slightly crappy: anything that changes self.address should also del(self.client)
return db.Db(self.address)
@cached_property
def address(self):
# pulls the db address from the appliance by default, falling back to the appliance
# ip address (and issuing a warning) if that fails. methods that set up the internal
# db should set db_address to something else when they do that
if self.appliance.db_host:
return self.appliance.db_host
try:
db_addr = self.appliance.wait_for_host_address()
if db_addr is None:
return self.appliance.address
db_addr = db_addr.strip()
ip_addr = self.appliance.ssh_client.run_command('ip address show')
if db_addr in ip_addr.output or db_addr.startswith('127') or 'localhost' in db_addr:
# address is local, use the appliance address
return self.appliance.address
else:
return db_addr
except (IOError, KeyError) as exc:
self.logger.error('Unable to pull database address from appliance')
self.logger.error(exc)
return self.appliance.address
@property
def is_partition_extended(self):
return self.appliance.ssh_client.run_command(
"ls /var/www/miq/vmdb/.db_partition_extended") == 0
def extend_partition(self):
"""Extends the /var partition with DB while shrinking the unused /repo partition"""
if self.is_partition_extended:
return
with self.appliance.ssh_client as ssh:
rc, out = ssh.run_command("df -h")
self.logger.info("File systems before extending the DB partition:\n{}".format(out))
ssh.run_command("umount /repo")
ssh.run_command("lvreduce --force --size -9GB /dev/mapper/VG--CFME-lv_repo")
ssh.run_command("mkfs.xfs -f /dev/mapper/VG--CFME-lv_repo")
ssh.run_command("lvextend --resizefs --size +9GB /dev/mapper/VG--CFME-lv_var")
ssh.run_command("mount -a")
rc, out = ssh.run_command("df -h")
self.logger.info("File systems after extending the DB partition:\n{}".format(out))
ssh.run_command("touch /var/www/miq/vmdb/.db_partition_extended")
def drop(self):
""" Drops the vmdb_production database
Note: EVM service has to be stopped for this to work.
"""
def _db_dropped():
rc, out = self.appliance.ssh_client.run_command(
'systemctl restart {}-postgresql'.format(self.postgres_version), timeout=60)
assert rc == 0, "Failed to restart postgres service: {}".format(out)
self.appliance.ssh_client.run_command('dropdb vmdb_production', timeout=15)
rc, out = self.appliance.ssh_client.run_command(
"psql -l | grep vmdb_production | wc -l", timeout=15)
return rc == 0
wait_for(_db_dropped, delay=5, timeout=60, message="drop the vmdb_production DB")
@property
def ssh_client(self, **connect_kwargs):
# Not lazycached to allow for the db address changing
if self.is_internal:
return self.appliance.ssh_client
else:
if self._ssh_client is None:
self._ssh_client = self.appliance.ssh_client(hostname=self.address)
return self._ssh_client
def backup(self, database_path="/tmp/evm_db.backup"):
"""Backup VMDB database
"""
from . import ApplianceException
self.logger.info('Backing up database')
status, output = self.appliance.ssh_client.run_rake_command(
'evm:db:backup:local --trace -- --local-file "{}" --dbname vmdb_production'.format(
database_path))
if status != 0:
msg = 'Failed to backup database'
self.logger.error(msg)
raise ApplianceException(msg)
def restore(self, database_path="/tmp/evm_db.backup"):
"""Restore VMDB database
"""
from . import ApplianceException
self.logger.info('Restoring database')
status, output = self.appliance.ssh_client.run_rake_command(
'evm:db:restore:local --trace -- --local-file "{}"'.format(database_path))
if status != 0:
msg = 'Failed to restore database on appl {}, output is {}'.format(self.address,
output)
self.logger.error(msg)
raise ApplianceException(msg)
def setup(self, **kwargs):
"""Configure database
On downstream appliances, invokes the internal database setup.
On all appliances waits for database to be ready.
"""
self.logger.info('Starting DB setup')
if self.appliance.version != LATEST:
# We only execute this on downstream appliances.
# TODO: Handle external DB setup. Probably pop the db_address and decide on that one.
self.enable_internal(**kwargs)
else:
# Ensure the evmserverd is on on the upstream appliance
if not self.appliance.evmserverd.running:
self.appliance.evmserverd.start()
self.appliance.evmserverd.enable() # just to be sure here.
self.appliance.wait_for_web_ui()
# Make sure the database is ready
wait_for(func=lambda: self.is_ready,
message='appliance db ready', delay=20, num_sec=1200)
self.logger.info('DB setup complete')
def loosen_pgssl(self, with_ssl=False):
"""Loosens postgres connections"""
self.logger.info('Loosening postgres permissions')
# Init SSH client
client = self.appliance.ssh_client
# set root password
cmd = "psql -d vmdb_production -c \"alter user {} with password '{}'\"".format(
conf.credentials['database']['username'], conf.credentials['database']['password']
)
client.run_command(cmd)
# back up pg_hba.conf
scl = self.postgres_version
client.run_command('mv /opt/rh/{scl}/root/var/lib/pgsql/data/pg_hba.conf '
'/opt/rh/{scl}/root/var/lib/pgsql/data/pg_hba.conf.sav'.format(scl=scl))
if with_ssl:
ssl = 'hostssl all all all cert map=sslmap'
else:
ssl = ''
# rewrite pg_hba.conf
write_pg_hba = dedent("""\
cat > /opt/rh/{scl}/root/var/lib/pgsql/data/pg_hba.conf <<EOF
local all postgres,root trust
host all all 0.0.0.0/0 md5
{ssl}
EOF
""".format(ssl=ssl, scl=scl))
client.run_command(write_pg_hba)
client.run_command("chown postgres:postgres "
"/opt/rh/{scl}/root/var/lib/pgsql/data/pg_hba.conf".format(scl=scl))
# restart postgres
status, out = client.run_command("systemctl restart {scl}-postgresql".format(scl=scl))
return status
def enable_internal(self, region=0, key_address=None, db_password=None, ssh_password=None):
"""Enables internal database
Args:
region: Region number of the CFME appliance.
key_address: Address of CFME appliance where key can be fetched.
Note:
If key_address is None, a new encryption key is generated for the appliance.
"""
self.logger.info('Enabling internal DB (region {}) on {}.'.format(region, self.address))
self.address = self.appliance.address
clear_property_cache(self, 'client')
client = self.ssh_client
# Defaults
db_password = db_password or conf.credentials['database']['password']
ssh_password = ssh_password or conf.credentials['ssh']['password']
if self.appliance.has_cli:
# use the cli
if key_address:
status, out = client.run_command(
'appliance_console_cli --region {0} --internal --fetch-key {1} -p {2} -a {3}'
.format(region, key_address, db_password, ssh_password)
)
else:
status, out = client.run_command(
'appliance_console_cli --region {} --internal --force-key -p {}'
.format(region, db_password)
)
else:
# no cli, use the enable internal db script
rbt_repl = {
'miq_lib': '/var/www/miq/lib',
'region': region,
'postgres_version': self.postgres_version
}
# Find and load our rb template with replacements
rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath)
rb = datafile.load_data_file(rbt, rbt_repl)
# sent rb file over to /tmp
remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric())
client.put_file(rb.name, remote_file)
# Run the rb script, clean it up when done
status, out = client.run_command('ruby {}'.format(remote_file))
client.run_command('rm {}'.format(remote_file))
return status, out
def enable_external(self, db_address, region=0, db_name=None, db_username=None,
db_password=None):
"""Enables external database
Args:
db_address: Address of the external database
region: Number of region to join
db_name: Name of the external DB
db_username: Username to access the external DB
db_password: Password to access the external DB
Returns a tuple of (exitstatus, script_output) for reporting, if desired
"""
self.logger.info('Enabling external DB (db_address {}, region {}) on {}.'
.format(db_address, region, self.address))
# reset the db address and clear the cached db object if we have one
self.address = db_address
clear_property_cache(self, 'client')
# default
db_name = db_name or 'vmdb_production'
db_username = db_username or conf.credentials['database']['username']
db_password = db_password or conf.credentials['database']['password']
client = self.ssh_client
if self.appliance.has_cli:
# copy v2 key
master_client = client(hostname=self.address)
rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric())
master_client.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename)
client.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key")
# enable external DB with cli
status, out = client.run_command(
'appliance_console_cli '
'--hostname {0} --region {1} --dbname {2} --username {3} --password {4}'.format(
self.address, region, db_name, db_username, db_password
)
)
else:
# no cli, use the enable external db script
rbt_repl = {
'miq_lib': '/var/www/miq/lib',
'host': self.address,
'region': region,
'database': db_name,
'username': db_username,
'password': db_password
}
# Find and load our rb template with replacements
rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath)
rb = datafile.load_data_file(rbt, rbt_repl)
# Init SSH client and sent rb file over to /tmp
remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric())
client.put_file(rb.name, remote_file)
# Run the rb script, clean it up when done
status, out = client.run_command('ruby {}'.format(remote_file))
client.run_command('rm {}'.format(remote_file))
if status != 0:
self.logger.error('error enabling external db')
self.logger.error(out)
msg = ('Appliance {} failed to enable external DB running on {}'
.format(self.appliance.address, db_address))
self.logger.error(msg)
from . import ApplianceException
raise ApplianceException(msg)
return status, out
@property
def is_dedicated_active(self):
return_code, output = self.appliance.ssh_client.run_command(
"systemctl status {}-postgresql.service | grep running".format(
self.postgres_version))
return return_code == 0
def wait_for(self, timeout=600):
"""Waits for appliance database to be ready
Args:
timeout: Number of seconds to wait until timeout (default ``180``)
"""
wait_for(func=lambda: self.is_ready,
message='appliance.db.is_ready',
delay=20,
num_sec=timeout)
@property
def is_enabled(self):
"""Is database enabled"""
if self.address is None:
return False
return True
@property
def is_internal(self):
"""Is database internal"""
if self.address == self.appliance.address:
return True
return False
@property
def is_ready(self):
"""Is database ready"""
# Using 'and' chain instead of all(...) to
# prevent calling more things after a step fails
return self.is_online and self.has_database and self.has_tables
@property
def is_online(self):
"""Is database online"""
db_check_command = ('psql -U postgres -t -c "select now()" postgres')
result = self.ssh_client.run_command(db_check_command)
return result.rc == 0
@property
def has_database(self):
"""Does database have a database defined"""
db_check_command = ('psql -U postgres -t -c "SELECT datname FROM pg_database '
'WHERE datname LIKE \'vmdb_%\';" postgres | grep -q vmdb_production')
result = self.ssh_client.run_command(db_check_command)
return result.rc == 0
@property
def has_tables(self):
"""Does database have tables defined"""
db_check_command = ('psql -U postgres -t -c "SELECT * FROM information_schema.tables '
'WHERE table_schema = \'public\';" vmdb_production | grep -q vmdb_production')
result = self.ssh_client.run_command(db_check_command)
return result.rc == 0
| gpl-2.0 |
photoninger/ansible | test/sanity/code-smell/no-tests-as-filters.py | 14 | 2571 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2017, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
import re
import sys
from collections import defaultdict
from ansible.plugins.test import core, files, mathstuff
TESTS = list(core.TestModule().tests().keys()) + list(files.TestModule().tests().keys()) + list(mathstuff.TestModule().tests().keys())
TEST_MAP = {
'version_compare': 'version',
'is_dir': 'directory',
'is_file': 'file',
'is_link': 'link',
'is_abs': 'abs',
'is_same_file': 'same_file',
'is_mount': 'mount',
'issubset': 'subset',
'issuperset': 'superset',
'isnan': 'nan',
'succeeded': 'successful',
'success': 'successful',
'change': 'changed',
'skip': 'skipped',
}
FILTER_RE = re.compile(r'((.+?)\s*([\w \.\'"]+)(\s*)\|(\s*)(\w+))')
def main():
all_matches = defaultdict(list)
for root, dirs, filenames in os.walk('.'):
for name in filenames:
if os.path.splitext(name)[1] not in ('.yml', '.yaml'):
continue
path = os.path.join(root, name)
with open(path) as f:
text = f.read()
for match in FILTER_RE.findall(text):
filter_name = match[5]
try:
test_name = TEST_MAP[filter_name]
except KeyError:
test_name = filter_name
if test_name not in TESTS:
continue
all_matches[path].append(match[0])
if all_matches:
print('Use of Ansible provided Jinja2 tests as filters is deprecated.')
print('Please update to use `is` syntax such as `result is failed`.')
for path, matches in all_matches.items():
for match in matches:
print('%s: %s' % (path, match,))
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 |
dsajkl/123 | common/djangoapps/dark_lang/tests.py | 34 | 6671 | """
Tests of DarkLangMiddleware
"""
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
from mock import Mock
from dark_lang.middleware import DarkLangMiddleware
from dark_lang.models import DarkLangConfig
UNSET = object()
def set_if_set(dct, key, value):
"""
Sets ``key`` in ``dct`` to ``value``
unless ``value`` is ``UNSET``
"""
if value is not UNSET:
dct[key] = value
class DarkLangMiddlewareTests(TestCase):
"""
Tests of DarkLangMiddleware
"""
def setUp(self):
self.user = User()
self.user.save()
DarkLangConfig(
released_languages='rel',
changed_by=self.user,
enabled=True
).save()
def process_request(self, django_language=UNSET, accept=UNSET, preview_lang=UNSET, clear_lang=UNSET):
"""
Build a request and then process it using the ``DarkLangMiddleware``.
Args:
django_language (str): The language code to set in request.session['django_language']
accept (str): The accept header to set in request.META['HTTP_ACCEPT_LANGUAGE']
preview_lang (str): The value to set in request.GET['preview_lang']
clear_lang (str): The value to set in request.GET['clear_lang']
"""
session = {}
set_if_set(session, 'django_language', django_language)
meta = {}
set_if_set(meta, 'HTTP_ACCEPT_LANGUAGE', accept)
get = {}
set_if_set(get, 'preview-lang', preview_lang)
set_if_set(get, 'clear-lang', clear_lang)
request = Mock(
spec=HttpRequest,
session=session,
META=meta,
GET=get
)
self.assertIsNone(DarkLangMiddleware().process_request(request))
return request
def assertAcceptEquals(self, value, request):
"""
Assert that the HTML_ACCEPT_LANGUAGE header in request
is equal to value
"""
self.assertEquals(
value,
request.META.get('HTTP_ACCEPT_LANGUAGE', UNSET)
)
def test_empty_accept(self):
self.assertAcceptEquals(UNSET, self.process_request())
def test_wildcard_accept(self):
self.assertAcceptEquals('*', self.process_request(accept='*'))
def test_released_accept(self):
self.assertAcceptEquals(
'rel;q=1.0',
self.process_request(accept='rel;q=1.0')
)
def test_unreleased_accept(self):
self.assertAcceptEquals(
'rel;q=1.0',
self.process_request(accept='rel;q=1.0, unrel;q=0.5')
)
def test_accept_with_syslang(self):
self.assertAcceptEquals(
'en;q=1.0, rel;q=0.8',
self.process_request(accept='en;q=1.0, rel;q=0.8, unrel;q=0.5')
)
def test_accept_multiple_released_langs(self):
DarkLangConfig(
released_languages=('rel, unrel'),
changed_by=self.user,
enabled=True
).save()
self.assertAcceptEquals(
'rel;q=1.0, unrel;q=0.5',
self.process_request(accept='rel;q=1.0, unrel;q=0.5')
)
self.assertAcceptEquals(
'rel;q=1.0, unrel;q=0.5',
self.process_request(accept='rel;q=1.0, notrel;q=0.3, unrel;q=0.5')
)
self.assertAcceptEquals(
'rel;q=1.0, unrel;q=0.5',
self.process_request(accept='notrel;q=0.3, rel;q=1.0, unrel;q=0.5')
)
def test_accept_released_territory(self):
self.assertAcceptEquals(
'rel-ter;q=1.0, rel;q=0.5',
self.process_request(accept='rel-ter;q=1.0, rel;q=0.5')
)
def test_accept_mixed_case(self):
self.assertAcceptEquals(
'rel-TER;q=1.0, REL;q=0.5',
self.process_request(accept='rel-TER;q=1.0, REL;q=0.5')
)
DarkLangConfig(
released_languages=('REL-TER'),
changed_by=self.user,
enabled=True
).save()
self.assertAcceptEquals(
'rel-ter;q=1.0',
self.process_request(accept='rel-ter;q=1.0, rel;q=0.5')
)
def assertSessionLangEquals(self, value, request):
"""
Assert that the 'django_language' set in request.session is equal to value
"""
self.assertEquals(
value,
request.session.get('django_language', UNSET)
)
def test_preview_lang_with_released_language(self):
self.assertSessionLangEquals(
UNSET,
self.process_request(preview_lang='rel')
)
self.assertSessionLangEquals(
'notrel',
self.process_request(preview_lang='rel', django_language='notrel')
)
def test_preview_lang_with_dark_language(self):
self.assertSessionLangEquals(
'unrel',
self.process_request(preview_lang='unrel')
)
self.assertSessionLangEquals(
'unrel',
self.process_request(preview_lang='unrel', django_language='notrel')
)
def test_clear_lang(self):
self.assertSessionLangEquals(
UNSET,
self.process_request(clear_lang=True)
)
self.assertSessionLangEquals(
UNSET,
self.process_request(clear_lang=True, django_language='rel')
)
self.assertSessionLangEquals(
UNSET,
self.process_request(clear_lang=True, django_language='unrel')
)
def test_disabled(self):
DarkLangConfig(enabled=False, changed_by=self.user).save()
self.assertAcceptEquals(
'notrel;q=0.3, rel;q=1.0, unrel;q=0.5',
self.process_request(accept='notrel;q=0.3, rel;q=1.0, unrel;q=0.5')
)
self.assertSessionLangEquals(
'rel',
self.process_request(clear_lang=True, django_language='rel')
)
self.assertSessionLangEquals(
'unrel',
self.process_request(clear_lang=True, django_language='unrel')
)
self.assertSessionLangEquals(
'rel',
self.process_request(preview_lang='unrel', django_language='rel')
)
def test_accept_chinese_language_codes(self):
DarkLangConfig(
released_languages=('zh-cn, zh-hk, zh-tw'),
changed_by=self.user,
enabled=True
).save()
self.assertAcceptEquals(
'zh-CN;q=1.0, zh-TW;q=0.5, zh-HK;q=0.3',
self.process_request(accept='zh-Hans;q=1.0, zh-Hant-TW;q=0.5, zh-HK;q=0.3')
)
| agpl-3.0 |
fausecteam/ctf-gameserver | src/ctf_gameserver/web/dev_settings.py | 1 | 1175 | """
Django and project specific settings for usage during development.
Everything should be ready-to-go for a common development environment, but you may of course tweak some
options.
"""
# pylint: disable=wildcard-import, unused-wildcard-import
from .base_settings import *
CSP_POLICIES = {
# The debug error page uses inline JavaScript and CSS
'script-src': ["'self'", "'unsafe-inline'"],
'style-src': ["'self'", "'unsafe-inline'"],
'object-src': ["'self'"],
'connect-src': ["'self'"]
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'dev-db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache'
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'ctf-gameserver.web@localhost'
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
SECRET_KEY = 'OnlySuitableForDevelopment' # nosec
TIME_ZONE = 'UTC'
FIRST_DAY_OF_WEEK = 1
DEBUG = True
INTERNAL_IPS = ('127.0.0.1')
GRAYLOG_SEARCH_URL = 'http://localhost:9000/search'
| isc |
crcresearch/osf.io | api/base/settings/defaults.py | 1 | 8176 | """
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'osf.db.backends.postgresql', # django.db.backends.postgresql
'NAME': os.environ.get('OSF_DB_NAME', 'osf'),
'USER': os.environ.get('OSF_DB_USER', 'postgres'),
'PASSWORD': os.environ.get('OSF_DB_PASSWORD', ''),
'HOST': os.environ.get('OSF_DB_HOST', '127.0.0.1'),
'PORT': os.environ.get('OSF_DB_PORT', '5432'),
'ATOMIC_REQUESTS': True,
}
}
DATABASE_ROUTERS = ['osf.db.router.PostgreSQLFailoverRouter', ]
CELERY_IMPORTS = [
'osf.management.commands.migratedata',
'osf.management.commands.migraterelations',
'osf.management.commands.verify',
]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_USER_MODEL = 'osf.OSFUser'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
AUTHENTICATION_BACKENDS = (
'api.base.authentication.backends.ODMBackend',
'guardian.backends.ObjectPermissionBackend',
)
# SECURITY WARNING: don't run with debug turned on in production!
DEV_MODE = osf_settings.DEV_MODE
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'api'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'api-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
CSRF_COOKIE_HTTPONLY = osf_settings.SECURE_MODE
ALLOWED_HOSTS = [
'.osf.io'
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.admin',
# 3rd party
'django_celery_beat',
'rest_framework',
'corsheaders',
'raven.contrib.django.raven_compat',
'django_extensions',
'guardian',
# OSF
'osf',
'reviews',
# Addons
'addons.osfstorage',
'addons.bitbucket',
'addons.box',
'addons.dataverse',
'addons.dropbox',
'addons.figshare',
'addons.forward',
'addons.github',
'addons.gitlab',
'addons.googledrive',
'addons.mendeley',
'addons.onedrive',
'addons.owncloud',
'addons.s3',
'addons.twofactor',
'addons.wiki',
'addons.zotero',
)
# local development using https
if osf_settings.SECURE_MODE and DEBUG:
INSTALLED_APPS += ('sslserver',)
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'api'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
BULK_SETTINGS = {
'DEFAULT_BULK_LIMIT': 100
}
MAX_PAGE_SIZE = 100
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_RENDERER_CLASSES': (
'api.base.renderers.JSONAPIRenderer',
'api.base.renderers.JSONRendererWithESISupport',
'api.base.renderers.BrowsableAPIRendererNoForms',
),
'DEFAULT_PARSER_CLASSES': (
'api.base.parsers.JSONAPIParser',
'api.base.parsers.JSONAPIParserForRegularJSON',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'EXCEPTION_HANDLER': 'api.base.exceptions.json_api_exception_handler',
'DEFAULT_CONTENT_NEGOTIATION_CLASS': 'api.base.content_negotiation.JSONAPIContentNegotiation',
'DEFAULT_VERSIONING_CLASS': 'api.base.versioning.BaseVersioning',
'DEFAULT_VERSION': '2.0',
'ALLOWED_VERSIONS': (
'2.0',
'2.1',
'2.2',
'2.3',
'2.4',
'2.5',
'2.6',
),
'DEFAULT_FILTER_BACKENDS': ('api.base.filters.OSFOrderingFilter',),
'DEFAULT_PAGINATION_CLASS': 'api.base.pagination.JSONAPIPagination',
'ORDERING_PARAM': 'sort',
'DEFAULT_AUTHENTICATION_CLASSES': (
# Custom auth classes
'api.base.authentication.drf.OSFBasicAuthentication',
'api.base.authentication.drf.OSFSessionAuthentication',
'api.base.authentication.drf.OSFCASAuthentication'
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
'api.base.throttling.NonCookieAuthThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'user': '10000/day',
'non-cookie-auth': '100/hour',
'add-contributor': '10/second',
'create-guid': '1000/hour',
'root-anon-throttle': '1000/hour',
'test-user': '2/hour',
'test-anon': '1/hour',
}
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
# This needs to remain True to allow cross origin requests that are in CORS_ORIGIN_WHITELIST to
# use cookies.
CORS_ALLOW_CREDENTIALS = True
# Set dynamically on app init
ORIGINS_WHITELIST = ()
MIDDLEWARE_CLASSES = (
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.PostcommitTaskMiddleware',
# A profiling middleware. ONLY FOR DEV USE
# Uncomment and add "prof" to url params to recieve a profile for that url
# 'api.base.middleware.ProfileMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
'api.base.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True
}]
ROOT_URLCONF = 'api.base.urls'
WSGI_APPLICATION = 'api.base.wsgi.application'
LANGUAGE_CODE = 'en-us'
# Disabled to make a test work (TestNodeLog.test_formatted_date)
# TODO Try to understand what's happening to cause the test to break when that line is active.
# TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static/vendor')
API_BASE = 'v2/'
STATIC_URL = '/static/'
NODE_CATEGORY_MAP = osf_settings.NODE_CATEGORY_MAP
DEBUG_TRANSACTIONS = DEBUG
JWT_SECRET = 'osf_api_cas_login_jwt_secret_32b'
JWE_SECRET = 'osf_api_cas_login_jwe_secret_32b'
ENABLE_VARNISH = osf_settings.ENABLE_VARNISH
ENABLE_ESI = osf_settings.ENABLE_ESI
VARNISH_SERVERS = osf_settings.VARNISH_SERVERS
ESI_MEDIA_TYPES = osf_settings.ESI_MEDIA_TYPES
ADDONS_FOLDER_CONFIGURABLE = ['box', 'dropbox', 's3', 'googledrive', 'figshare', 'owncloud', 'onedrive']
ADDONS_OAUTH = ADDONS_FOLDER_CONFIGURABLE + ['dataverse', 'github', 'bitbucket', 'gitlab', 'mendeley', 'zotero', 'forward']
BYPASS_THROTTLE_TOKEN = 'test-token'
OSF_SHELL_USER_IMPORTS = None
# Settings for use in the admin
OSF_URL = 'https://osf.io'
SELECT_FOR_UPDATE_ENABLED = True
# Disable anonymous user permissions in django-guardian
ANONYMOUS_USER_NAME = None
CELERY_BEAT_SCHEDULER = 'django_celery_beat.schedulers:DatabaseScheduler'
| apache-2.0 |
kobe25/gasistafelice | gasistafelice/gf/base/accounting.py | 3 | 9228 | from django.utils.translation import ugettext, ugettext_lazy as _
from simple_accounting.exceptions import MalformedTransaction
from simple_accounting.models import AccountingProxy, Transaction, LedgerEntry, account_type
from simple_accounting.utils import register_transaction
from consts import (
INCOME, EXPENSE, ASSET, LIABILITY, EQUITY,
GASMEMBER_GAS, RECYCLE, ADJUST
)
from datetime import datetime
class PersonAccountingProxy(AccountingProxy):
"""
This class is meant to be the place where implementing the accounting API
for ``Person``-like economic subjects.
Since it's a subclass of ``AccountingProxy``, it inherits from its parent
all the methods and attributes comprising the *generic* accounting API;
here, you can add whatever logic is needed to augment that generic API,
tailoring it to the specific needs of the ``Person``' model.
"""
def last_entry(self, base_path):
"""last entry for one subject"""
try:
latest = self.system[base_path].ledger_entries.latest('transaction__date')
except LedgerEntry.DoesNotExist:
latest = None
return latest
#FIXME: create last_entry or one method for each base_path? Encapsulation and refactoring
#FIXME: self <gasistafelice.base.accounting.PersonAccountingProxy object at 0xabaf86c>
# base_path '/expenses/gas/gas-1/recharges'
def do_recharge(self, gas, amount, note="", date=None):
"""
Do a recharge of amount ``amount`` to the corresponding member account
in the GAS ``gas``.
If this person is not a member of GAS ``gas``, or if ``amount`` is a negative number
a ``MalformedTransaction`` exception is raised.
"""
person = self.subject.instance
if amount < 0:
raise MalformedTransaction(ugettext("Amount of a recharge must be non-negative"))
elif not person.has_been_member(gas):
raise MalformedTransaction(ugettext("A person can't make an account recharge for a GAS that (s)he is not member of"))
else:
source_account = self.system['/wallet']
exit_point = self.system['/expenses/gas/' + gas.uid + '/recharges']
entry_point = gas.accounting.system['/incomes/recharges']
target_account = gas.accounting.system['/members/' + person.uid]
description = unicode(person.report_name)
issuer = self.subject
if not date:
date = datetime.now() #_date.today
transaction = register_transaction(source_account, exit_point,
entry_point, target_account, amount, description, issuer,
date, 'RECHARGE'
)
transaction.add_references([person, gas])
#Transaction
# date = models.DateTimeField(default=datetime.now)
# description = models.CharField(max_length=512, help_text=ugettext("Reason of the transaction"))
# issuer = models.ForeignKey(Subject, related_name='issued_transactions_set')
# source = models.ForeignKey(CashFlow)
# split_set = models.ManyToManyField(Split)
# kind = models.CharField(max_length=128, choices=settings.TRANSACTION_TYPES)
# is_confirmed = models.BooleanField(default=False)
# def splits(self):
# def is_split(self):
# def is_internal(self):
# def is_simple(self):
#LedgerEntry
# account = models.ForeignKey(Account, related_name='entry_set')
# transaction = models.ForeignKey(Transaction, related_name='entry_set')
# entry_id = models.PositiveIntegerField(null=True, blank=True, editable=False)
# amount = CurrencyField()
# def date(self):
# def description(self):
# def issuer(self):
def entries_gasmember(self, gasmember):
"""
List all LedgerEntries (account, transaction, amount)
Show transactions for gasmembers link to GAS kind='GAS_WITHDRAWAL' + another kind?
"""
member_account = gasmember.person.uid
gas_account = gasmember.gas.uid
#accounts = self.system.accounts.filter(name="wallet") | \
accounts = \
self.system.accounts.filter(parent__name="members", name__in=member_account) | \
self.system.accounts.filter(parent__name="expenses/gas/" + gas_account + "/fees", name__in=member_account) | \
self.system.accounts.filter(parent__name="expenses/gas/" + gas_account + "/recharges", name__in=member_account) | \
gasmember.gas.accounting.system.accounts.filter(parent__name="members", name=member_account)
#gasmember.gas.accounting.system.accounts.filter(name="members/%s" % member_account) ko?
return LedgerEntry.objects.filter(account__in=accounts).order_by('-id', '-transaction__date')
def extra_operation(self, gas, amount, target, causal, date):
"""
Another account operation for this subject
For a GASMEMBER the target operation can be income or expense operation
The operation can implicate a GAS economic change
"""
if amount < 0:
raise MalformedTransaction(ugettext("Payment amounts must be non-negative"))
person = self.subject.instance
if not person.has_been_member(gas):
raise MalformedTransaction(ugettext("A person can't pay membership fees to a GAS that (s)he is not member of"))
gas_acc = gas.accounting
gas_system = gas.accounting.system
kind = GASMEMBER_GAS
#UGLY: remove me when done and executed one command that regenerate all missing accounts
self.missing_accounts(gas)
if target == INCOME: #Correction for gasmember: +gasmember -GAS
source_account = gas_system['/cash']
exit_point = gas_system['/expenses/member']
entry_point = gas_system['/incomes/recharges']
target_account = gas_system['/members/' + person.uid]
elif target == EXPENSE: #Correction for GAS: +GAS -gasmember
source_account = gas_system['/members/' + person.uid]
exit_point = gas_system['/expenses/gas']
entry_point = gas_system['/incomes/member']
target_account = gas_system['/cash']
elif target == ASSET: #Detraction for Gasmember: -gasmember
source_account = gas_system['/members/' + person.uid]
exit_point = gas_system['/expenses/member']
entry_point = self.system['/incomes/other']
target_account = self.system['/wallet']
kind = ADJUST
elif target == LIABILITY: #Addition for Gasmember: +gasmember
source_account = self.system['/wallet']
exit_point = self.system['/expenses/other']
entry_point = gas_system['/incomes/recharges']
target_account = gas_system['/members/' + person.uid]
kind = ADJUST
elif target == EQUITY: #Restitution for gasmember: empty container +gasmember -GAS
source_account = gas_system['/cash']
exit_point = gas_system['/expenses/member']
entry_point = gas_system['/incomes/recharges']
target_account = gas_system['/members/' + person.uid]
kind = RECYCLE
else:
raise MalformedTransaction(ugettext("Payment target %s not identified") % target)
description = "%(gas)s %(target)s %(causal)s" % {
'gas': gas.id_in_des,
'target': target,
'causal': causal
}
issuer = self.subject
if not date:
date = datetime.now() #_date.today
transaction = register_transaction(source_account, exit_point, entry_point, target_account, amount, description, issuer, date, kind)
# . gasmember ROOT (/)
# |----------- wallet [A]
# +----------- incomes [P,I] +
# | +--- TODO: Other (Private order, correction, Deposit)
# +----------- expenses [P,E] + UNUSED because we use the gas_system[/incomes/recharges]
# +--- TODO: Other (Correction, Donation, )
# . GAS ROOT (/)
# |----------- cash [A]
# +----------- members [P,A]+
# | +--- <UID member #1> [A]
# | | ..
# | +--- <UID member #n> [A]
# +----------- expenses [P,E]+
# | +--- TODO: member (correction or other)
# | +--- TODO: gas (correction or other)
# +----------- incomes [P,I]+
# | +--- recharges [I]
# | +--- TODO: member (correction or other)
#UGLY: remove me when done and executed one command that regenerate all missing accounts
def missing_accounts(self, gas):
gas_acc = gas.accounting
gas_system = gas.accounting.system
xsys = gas_acc.get_account(gas_system, '/expenses', 'member', account_type.expense)
xsys = gas_acc.get_account(gas_system, '/expenses', 'gas', account_type.expense)
xsys = gas_acc.get_account(gas_system, '/incomes', 'member', account_type.income)
xsys = gas_acc.get_account(self.system, '/expenses', 'other', account_type.expense)
xsys = gas_acc.get_account(self.system, '/incomes', 'other', account_type.income)
| agpl-3.0 |
antgonza/qiita | qiita_pet/handlers/rest/study_samples.py | 1 | 4233 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.escape import json_encode, json_decode
import pandas as pd
from qiita_db.handlers.oauth2 import authenticate_oauth
from .rest_handler import RESTHandler
class StudySamplesHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
if study.sample_template is None:
samples = []
else:
samples = list(study.sample_template.keys())
self.write(json_encode(samples))
self.finish()
@authenticate_oauth
def patch(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
if study.sample_template is None:
self.fail('No sample information found', 404)
return
else:
sample_info = study.sample_template.to_dataframe()
data = pd.DataFrame.from_dict(json_decode(self.request.body),
orient='index')
if len(data.index) == 0:
self.fail('No samples provided', 400)
return
categories = set(study.sample_template.categories)
if set(data.columns) != categories:
if set(data.columns).issubset(categories):
self.fail('Not all sample information categories provided',
400)
else:
unknown = set(data.columns) - categories
self.fail("Some categories do not exist in the sample "
"information", 400,
categories_not_found=sorted(unknown))
return
existing_samples = set(sample_info.index)
overlapping_ids = set(data.index).intersection(existing_samples)
new_ids = set(data.index) - existing_samples
status = 500
# warnings generated are not currently caught
# see https://github.com/biocore/qiita/issues/2096
if overlapping_ids:
to_update = data.loc[overlapping_ids]
study.sample_template.update(to_update)
status = 200
if new_ids:
to_extend = data.loc[new_ids]
study.sample_template.extend(to_extend)
status = 201
self.set_status(status)
self.finish()
class StudySamplesCategoriesHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id, categories):
if not categories:
self.fail('No categories specified', 405)
return
study = self.safe_get_study(study_id)
if study is None:
return
categories = categories.split(',')
if study.sample_template is None:
self.fail('Study does not have sample information', 404)
return
available_categories = set(study.sample_template.categories)
not_found = set(categories) - available_categories
if not_found:
self.fail('Category not found', 404,
categories_not_found=sorted(not_found))
return
blob = {'header': categories,
'samples': {}}
df = study.sample_template.to_dataframe()
for idx, row in df[categories].iterrows():
blob['samples'][idx] = list(row)
self.write(json_encode(blob))
self.finish()
class StudySamplesInfoHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
st = study.sample_template
if st is None:
info = {'number-of-samples': 0,
'categories': []}
else:
info = {'number-of-samples': len(st),
'categories': st.categories}
self.write(json_encode(info))
self.finish()
| bsd-3-clause |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_internal/req/req_tracker.py | 14 | 3129 | from __future__ import absolute_import
import contextlib
import errno
import hashlib
import logging
import os
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from types import TracebackType
from typing import Iterator, Optional, Set, Type
from pip._internal.req.req_install import InstallRequirement
from pip._internal.models.link import Link
logger = logging.getLogger(__name__)
class RequirementTracker(object):
def __init__(self):
# type: () -> None
self._root = os.environ.get('PIP_REQ_TRACKER')
if self._root is None:
self._temp_dir = TempDirectory(delete=False, kind='req-tracker')
self._temp_dir.create()
self._root = os.environ['PIP_REQ_TRACKER'] = self._temp_dir.path
logger.debug('Created requirements tracker %r', self._root)
else:
self._temp_dir = None
logger.debug('Re-using requirements tracker %r', self._root)
self._entries = set() # type: Set[InstallRequirement]
def __enter__(self):
# type: () -> RequirementTracker
return self
def __exit__(
self,
exc_type, # type: Optional[Type[BaseException]]
exc_val, # type: Optional[BaseException]
exc_tb # type: Optional[TracebackType]
):
# type: (...) -> None
self.cleanup()
def _entry_path(self, link):
# type: (Link) -> str
hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest()
return os.path.join(self._root, hashed)
def add(self, req):
# type: (InstallRequirement) -> None
link = req.link
info = str(req)
entry_path = self._entry_path(link)
try:
with open(entry_path) as fp:
# Error, these's already a build in progress.
raise LookupError('%s is already being built: %s'
% (link, fp.read()))
except IOError as e:
if e.errno != errno.ENOENT:
raise
assert req not in self._entries
with open(entry_path, 'w') as fp:
fp.write(info)
self._entries.add(req)
logger.debug('Added %s to build tracker %r', req, self._root)
def remove(self, req):
# type: (InstallRequirement) -> None
link = req.link
self._entries.remove(req)
os.unlink(self._entry_path(link))
logger.debug('Removed %s from build tracker %r', req, self._root)
def cleanup(self):
# type: () -> None
for req in set(self._entries):
self.remove(req)
remove = self._temp_dir is not None
if remove:
self._temp_dir.cleanup()
logger.debug('%s build tracker %r',
'Removed' if remove else 'Cleaned',
self._root)
@contextlib.contextmanager
def track(self, req):
# type: (InstallRequirement) -> Iterator[None]
self.add(req)
yield
self.remove(req)
| apache-2.0 |
karibou/sosreport | sos/plugins/kdump.py | 12 | 1514 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class KDump(Plugin):
"""Kdump crash dumps
"""
plugin_name = "kdump"
profiles = ('system', 'debug')
def setup(self):
self.add_copy_spec([
"/proc/cmdline"
])
class RedHatKDump(KDump, RedHatPlugin):
files = ('/etc/kdump.conf',)
packages = ('kexec-tools',)
def setup(self):
self.add_copy_spec([
"/etc/kdump.conf",
"/etc/udev/rules.d/*kexec.rules",
"/var/crash/*/vmcore-dmesg.txt"
])
class DebianKDump(KDump, DebianPlugin, UbuntuPlugin):
files = ('/etc/default/kdump-tools',)
packages = ('kdump-tools',)
def setup(self):
self.add_copy_spec([
"/etc/default/kdump-tools"
])
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
okfde/odm-datenerfassung | analysis/rtmethodeffectiveness.py | 1 | 5843 | import urllib
import unicodecsv as csv
import sys
def printlist ( array ):
for item in array:
print item
return
def iscorrectdata ( cell ):
scell = cell.strip()
if (scell != 'Daten') and (scell != 'Geodaten'):
return False
else:
return True
#Unfortunately, some special handling is needed per city
#But not this much. Will optimize once a new city comes.
cityname = sys.argv[1]
#Key for the sheet with 'final' results, although they are still allowed to include overlaps...
vkey = '1dRTL0fuXYxHDx6R7uW6l6ys1_Hw3s4yYFZD_3TjP2Fk'
#Key for the sheet with lists of files from data catalogs
kkey = '1tkKGeQqlx9YTLlwGiGhkYEG-kswTkQlSp7Kcj6J0Eh4'
removeparameters = False
nocatalog = False
if cityname == 'rostock':
ggid = '336231741'
kgid = '1862363954'
elif cityname == 'ulm':
ggid = '1992341045'
kgid = '1085553358'
elif cityname == 'koeln':
ggid = '2110295467'
kgid = '1846551467'
removeparameters = True
elif cityname == 'bonn':
ggid = '1093780796'
kgid = '596265031'
elif cityname == 'moers':
ggid = '988368152'
kgid = '1743221268'
elif cityname == 'berlin':
ggid = '588106105'
kgid = '1978531407'
elif cityname == 'stuttgart':
ggid = '51929456'
nocatalog = True
elif cityname == 'muenchen':
ggid = '1289667874'
nocatalog = True
googlebingcrawlcsvurl = urllib.urlopen('https://docs.google.com/spreadsheets/d/' + vkey + '/export?single=true&gid=' + ggid + '&format=csv')
if not nocatalog:
catalogcsvurl = urllib.urlopen('https://docs.google.com/spreadsheets/d/' + kkey + '/export?single=true&gid=' + kgid + '&format=csv')
print 'Downloading and reading Google and Bing and Crawl results'
csvreader = csv.reader(googlebingcrawlcsvurl)
firstRow = True
googleresults = dict()
bingresults = dict()
crawlresults = dict()
for row in csvreader:
#print row
if (firstRow):
firstRow = False
print 'Skipping header...'
continue
filename = urllib.unquote(row[4].split('/')[-1])
if removeparameters:
filename = filename.split('?')[0]
filename = filename.strip()
source = row[0].strip()
if source == 'g':
if filename not in googleresults:
print 'Adding ' + filename + ' to Google results.'
googleresults[filename] = row
else:
print 'Warning: ' + filename + ' already in Google results. Not adding.'
elif source == 'b':
if filename not in bingresults:
print 'Adding ' + filename + ' to Bing results.'
bingresults[filename] = row
else:
print 'Warning: ' + filename + ' already in Bing results. Not adding.'
elif source == 'c':
if filename not in crawlresults:
print 'Adding ' + filename + ' to Crawl results.'
crawlresults[filename] = row
else:
print 'Warning: ' + filename + ' already in Crawl results. Not adding.'
else:
print 'Not a valid entry type: ' + row[0]
print 'Downloading and reading Catalog results'
catalogresults = dict()
if not nocatalog:
csvreader = csv.reader(catalogcsvurl)
for row in csvreader:
#Handle wms results specially; they don't appear in the other sources
if 'wms' in row[0]:
filename = urllib.unquote(row[0].split('/')[-2])
else:
filename = urllib.unquote(row[0].split('/')[-1])
if filename not in catalogresults:
print 'Adding ' + filename + ' to Catalog results.'
catalogresults[filename] = row
else:
print 'Warning: ' + filename + ' already in Catalog results. Not adding.'
alldata = []
alldata.append(googleresults)
alldata.append(bingresults)
alldata.append(crawlresults)
alldata.append(catalogresults)
uniquelist = []
for dataset in alldata:
for filename in dataset.keys():
if filename not in uniquelist:
uniquelist.append(filename)
total = len(uniquelist)
gfound = len(googleresults)
bfound = len(bingresults)
cfound = len(crawlresults)
kfound = len(catalogresults)
print 'There are ' + str(total) + ' unique entries based on filename'
print 'Google found ' + str(gfound) + ' of those'
print 'Bing found ' + str(bfound) + ' of those'
print 'Crawler found ' + str(cfound) + ' of those'
print 'Catalog \'found\' ' + str(kfound) + ' of those'
print 'For pasting: '
print str(total) + '\t' + str(gfound) + '\t' + str(bfound) + '\t' + str(cfound) + '\t' + str(kfound)
gset = set(googleresults.keys())
bset = set(bingresults.keys())
cset = set(crawlresults.keys())
kset = set(catalogresults.keys())
allset = set(uniquelist)
intersection = gset.intersection(bset)
print 'Intersection of Google and Bing: ' + str(len(intersection))
printlist(intersection)
intersection = gset.intersection(cset)
print 'Intersection of Google and Crawler: ' + str(len(intersection))
printlist(intersection)
intersection = gset.intersection(kset)
print 'Intersection of Google and Catalog: ' + str(len(intersection))
printlist(intersection)
intersection = bset.intersection(cset)
print 'Intersection of Bing and Crawler: ' + str(len(intersection))
printlist(intersection)
intersection = bset.intersection(kset)
print 'Intersection of Bing and Catalog: ' + str(len(intersection))
printlist(intersection)
intersection = cset.intersection(kset)
print 'Intersection of Crawler and Catalog: ' + str(len(intersection))
printlist(intersection)
difference = allset.difference(kset)
print 'What the catalog doesn\'t contain (' + str(len(difference)) + '):'
for key in difference:
if key in googleresults: print googleresults[key][4]
elif key in bingresults: print bingresults[key][4]
elif key in crawlresults: print crawlresults[key][4]
else: print 'Serious error: item not in set not found in any other source!'
| mit |
YangSongzhou/django | django/contrib/sites/management.py | 467 | 1564 | """
Creates the default Site object.
"""
from django.apps import apps
from django.conf import settings
from django.core.management.color import no_style
from django.db import DEFAULT_DB_ALIAS, connections, router
def create_default_site(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
try:
Site = apps.get_model('sites', 'Site')
except LookupError:
return
if not router.allow_migrate_model(using, Site):
return
if not Site.objects.using(using).exists():
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=getattr(settings, 'SITE_ID', 1), domain="example.com", name="example.com").save(using=using)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[using].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
with connections[using].cursor() as cursor:
for command in sequence_sql:
cursor.execute(command)
| bsd-3-clause |
leppa/home-assistant | homeassistant/components/xfinity/device_tracker.py | 1 | 1754 | """Support for device tracking via Xfinity Gateways."""
import logging
from requests.exceptions import RequestException
import voluptuous as vol
from xfinity_gateway import XfinityGateway
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "10.0.0.1"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string}
)
def get_scanner(hass, config):
"""Validate the configuration and return an Xfinity Gateway scanner."""
gateway = XfinityGateway(config[DOMAIN][CONF_HOST])
scanner = None
try:
gateway.scan_devices()
scanner = XfinityDeviceScanner(gateway)
except (RequestException, ValueError):
_LOGGER.error(
"Error communicating with Xfinity Gateway. " "Check host: %s", gateway.host
)
return scanner
class XfinityDeviceScanner(DeviceScanner):
"""This class queries an Xfinity Gateway."""
def __init__(self, gateway):
"""Initialize the scanner."""
self.gateway = gateway
def scan_devices(self):
"""Scan for new devices and return a list of found MACs."""
connected_devices = []
try:
connected_devices = self.gateway.scan_devices()
except (RequestException, ValueError):
_LOGGER.error("Unable to scan devices. " "Check connection to gateway")
return connected_devices
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
return self.gateway.get_device_name(device)
| apache-2.0 |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/boolean_mask.py | 94 | 3691 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Masks one `Series` based on the content of another `Series`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
"""Boolean mask for `SparseTensor`s.
Args:
sparse_tensor: a `SparseTensor`.
mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
of `sparse_tensor`.
name: optional name for this operation.
Returns:
A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
`True`.
"""
# TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
with ops.name_scope(name, values=[sparse_tensor, mask]):
mask = ops.convert_to_tensor(mask)
mask_rows = array_ops.where(mask)
first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
[0, 0], [-1, 1]))
# Identify indices corresponding to the rows identified by mask_rows.
sparse_entry_matches = functional_ops.map_fn(
lambda x: math_ops.equal(first_indices, x),
mask_rows,
dtype=dtypes.bool)
# Combine the rows of index_matches to form a mask for the sparse indices
# and values.
to_retain = array_ops.reshape(
functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])
return sparse_ops.sparse_retain(sparse_tensor, to_retain)
@series.Series.register_binary_op("select_rows")
class BooleanMask(transform.TensorFlowTransform):
"""Apply a boolean mask to a `Series`."""
@property
def name(self):
return "BooleanMask"
@property
def input_valency(self):
return 2
@property
def _output_names(self):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
**kwargs: Additional keyword arguments, unused here.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
input_tensor = input_tensors[0]
mask = input_tensors[1]
if mask.get_shape().ndims > 1:
mask = array_ops.squeeze(mask)
if isinstance(input_tensor, sparse_tensor_py.SparseTensor):
mask_fn = sparse_boolean_mask
else:
mask_fn = array_ops.boolean_mask
# pylint: disable=not-callable
return self.return_type(mask_fn(input_tensor, mask))
| apache-2.0 |
Eigenlabs/EigenD | pisession/registry.py | 1 | 3404 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import glob,zipfile,os
import sys
def iscompatible(mod_version, state_version):
mod_version = mod_version.split('.')
state_version = state_version.split('.')
if len(state_version)==2:
state_version.insert(0,'0')
if len(mod_version)==2:
mod_version.insert(0,'0')
if mod_version[0] != state_version[0]:
return False
if mod_version[1] < state_version[1]:
return False
if mod_version[1] > state_version[1]:
return True
if mod_version[2] < state_version[2]:
return False
return True
class Registry:
def __init__(self):
self.__registry={}
def dump(self,dumper):
for (mname,vlist) in self.__registry.iteritems():
for (version,(cversion,module)) in vlist.iteritems():
print '%s:%s:%s %s' % (mname,version,cversion,dumper(module))
def modules(self):
return self.__registry.keys()
def get_module(self,name):
versions = self.__registry.get(name)
if not versions:
return None
vkeys = versions.keys()
vkeys.sort(reverse=True)
(cversion,module) = versions[vkeys[0]]
return module
def get_compatible_module(self,name,state_cversion):
actual_module = None
actual_cversion = None
for (mod_version,mod_cversion,mod) in self.iter_versions(name):
if iscompatible(mod_cversion,state_cversion):
if actual_module is None or mod_cversion>actual_cversion:
actual_module = mod
actual_cversion = mod_cversion
return actual_module
def iter_versions(self,name):
mlist = self.__registry.get(name)
if mlist:
for (version,(cversion,module)) in mlist.iteritems():
yield (version,cversion,module)
def add_module(self,name,version,cversion,module):
r = self.__registry
if name not in r:
r[name] = {}
if version in r[name]:
raise RuntimeError('module %s:%s already defined' % (name,version))
r[name][version] = (cversion,module)
def scan_path(self,directory,klass):
for p in glob.glob(os.path.join(directory,'*')):
try:
m = open(p,'r').read()
except:
continue
for a in m.splitlines():
a = a.split(':')
(name,module,cversion,version) = a[0:4]
self.add_module(name,version,cversion,klass(name,version,cversion,p,module))
for e in a[4:]:
self.add_module(e,version,cversion,klass(name,version,cversion,p,module))
| gpl-3.0 |
ClaudioNahmad/Servicio-Social | Parametros/CosmoMC/prerrequisitos/plc-2.0/build/pyfits-3.2.2/lib/pyfits/hdu/groups.py | 3 | 20247 | import sys
import numpy as np
from pyfits.column import Column, ColDefs, FITS2NUMPY
from pyfits.fitsrec import FITS_rec, FITS_record
from pyfits.hdu.image import _ImageBaseHDU, PrimaryHDU
from pyfits.hdu.table import _TableLikeHDU
from pyfits.util import (lazyproperty, _is_int, _is_pseudo_unsigned,
_unsigned_zero)
class Group(FITS_record):
"""
One group of the random group data.
"""
def __init__(self, input, row=0, start=None, end=None, step=None,
base=None):
super(Group, self).__init__(input, row, start, end, step, base)
@property
def parnames(self):
return self.array.parnames
@property
def data(self):
# The last column in the coldefs is the data portion of the group
return self.field(self.array._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter value.
"""
if _is_int(parname):
result = self.array[self.row][parname]
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.array[self.row][indx[0]]
# if more than one group parameter have the same name
else:
result = self.array[self.row][indx[0]].astype('f8')
for i in indx[1:]:
result += self.array[self.row][i]
return result
def setpar(self, parname, value):
"""
Set the group parameter value.
"""
# TODO: It would be nice if, instead of requiring a multi-part value to
# be an array, there were an *option* to automatically split the value
# into multiple columns if it doesn't already fit in the array data
# type.
if _is_int(parname):
self.array[self.row][parname] = value
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
self.array[self.row][indx[0]] = value
# if more than one group parameter have the same name, the
# value must be a list (or tuple) containing arrays
else:
if isinstance(value, (list, tuple)) and \
len(indx) == len(value):
for i in range(len(indx)):
self.array[self.row][indx[i]] = value[i]
else:
raise ValueError('Parameter value must be a sequence '
'with %d arrays/numbers.' % len(indx))
class GroupData(FITS_rec):
"""
Random groups data object.
Allows structured access to FITS Group data in a manner analogous
to tables.
"""
_record_type = Group
def __new__(cls, input=None, bitpix=None, pardata=None, parnames=[],
bscale=None, bzero=None, parbscales=None, parbzeros=None):
"""
Parameters
----------
input : array or FITS_rec instance
input data, either the group data itself (a
`numpy.ndarray`) or a record array (`FITS_rec`) which will
contain both group parameter info and the data. The rest
of the arguments are used only for the first case.
bitpix : int
data type as expressed in FITS ``BITPIX`` value (8, 16, 32,
64, -32, or -64)
pardata : sequence of arrays
parameter data, as a list of (numeric) arrays.
parnames : sequence of str
list of parameter names.
bscale : int
``BSCALE`` of the data
bzero : int
``BZERO`` of the data
parbscales : sequence of int
list of bscales for the parameters
parbzeros : sequence of int
list of bzeros for the parameters
"""
if not isinstance(input, FITS_rec):
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None] * npars
if parbzeros is None:
parbzeros = [None] * npars
if parnames is None:
parnames = ['PAR%d' % (idx + 1) for idx in range(npars)]
if len(parnames) != npars:
raise ValueError('The number of paramater data arrays does '
'not match the number of paramaters.')
unique_parnames = _unique_parnames(parnames + ['DATA'])
if bitpix is None:
bitpix = _ImageBaseHDU.ImgCode[input.dtype.name]
fits_fmt = GroupsHDU._width2format[bitpix] # -32 -> 'E'
format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4'
data_fmt = '%s%s' % (str(input.shape[1:]), format)
formats = ','.join(([format] * npars) + [data_fmt])
gcount = input.shape[0]
cols = [Column(name=unique_parnames[idx], format=fits_fmt,
bscale=parbscales[idx], bzero=parbzeros[idx])
for idx in range(npars)]
cols.append(Column(name=unique_parnames[-1], format=fits_fmt,
bscale=bscale, bzero=bzero))
coldefs = ColDefs(cols)
self = FITS_rec.__new__(cls,
np.rec.array(None,
formats=formats,
names=coldefs.names,
shape=gcount))
self._coldefs = coldefs
self.parnames = parnames
for idx in range(npars):
scale, zero = self._get_scale_factors(idx)[3:5]
if scale or zero:
self._convert[idx] = pardata[idx]
else:
np.rec.recarray.field(self, idx)[:] = pardata[idx]
scale, zero = self._get_scale_factors(npars)[3:5]
if scale or zero:
self._convert[npars] = input
else:
np.rec.recarray.field(self, npars)[:] = input
else:
self = FITS_rec.__new__(cls, input)
self.parnames = None
return self
def __array_finalize__(self, obj):
super(GroupData, self).__array_finalize__(obj)
if isinstance(obj, GroupData):
self.parnames = obj.parnames
elif isinstance(obj, FITS_rec):
self.parnames = obj._coldefs.names
def __getitem__(self, key):
out = super(GroupData, self).__getitem__(key)
if isinstance(out, GroupData):
out.parnames = self.parnames
return out
@property
def data(self):
# The last column in the coldefs is the data portion of the group
return self.field(self._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter values.
"""
if _is_int(parname):
result = self.field(parname)
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.field(indx[0])
# if more than one group parameter have the same name
else:
result = self.field(indx[0]).astype('f8')
for i in indx[1:]:
result += self.field(i)
return result
class GroupsHDU(PrimaryHDU, _TableLikeHDU):
"""
FITS Random Groups HDU class.
See the :ref:`random-groups` section in the PyFITS documentation for more
details on working with this type of HDU.
"""
_width2format = {8: 'B', 16: 'I', 32: 'J', 64: 'K', -32: 'E', -64: 'D'}
_data_type = GroupData
def __init__(self, data=None, header=None):
super(GroupsHDU, self).__init__(data=data, header=header)
# The name of the table record array field that will contain the group
# data for each group; 'data' by default, but may be precdeded by any
# number of underscores if 'data' is already a parameter name
self._data_field = 'DATA'
# Update the axes; GROUPS HDUs should always have at least one axis
if len(self._axes) <= 0:
self._axes = [0]
self._header['NAXIS'] = 1
self._header.set('NAXIS1', 0, after='NAXIS')
@classmethod
def match_header(cls, header):
keyword = header.cards[0].keyword
return (keyword == 'SIMPLE' and 'GROUPS' in header and
header['GROUPS'] == True)
@lazyproperty
def data(self):
"""
The data of a random group FITS file will be like a binary table's
data.
"""
data = self._get_tbdata()
data._coldefs = self.columns
data.formats = self.columns.formats
data.parnames = self.parnames
del self.columns
return data
@lazyproperty
def parnames(self):
"""The names of the group parameters as described by the header."""
pcount = self._header['PCOUNT']
# The FITS standard doesn't really say what to do if a parname is
# missing, so for now just assume that won't happen
return [self._header['PTYPE' + str(idx + 1)] for idx in range(pcount)]
@lazyproperty
def columns(self):
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
format = self._width2format[self._header['BITPIX']]
pcount = self._header['PCOUNT']
parnames = []
bscales = []
bzeros = []
for idx in range(pcount):
bscales.append(self._header.get('PSCAL' + str(idx + 1), 1))
bzeros.append(self._header.get('PZERO' + str(idx + 1), 0))
parnames.append(self._header['PTYPE' + str(idx + 1)])
# Now create columns from collected parameters, but first add the DATA
# column too, to contain the group data.
formats = [format] * len(parnames)
parnames.append('DATA')
bscales.append(self._header.get('BSCALE', 1))
bzeros.append(self._header.get('BZEROS', 0))
data_shape = self.shape[:-1]
formats.append(str(int(np.array(data_shape).sum())) + format)
parnames = _unique_parnames(parnames)
self._data_field = parnames[-1]
cols = [Column(name=name, format=fmt, bscale=bscale, bzero=bzero)
for name, fmt, bscale, bzero in
zip(parnames, formats, bscales, bzeros)]
coldefs = ColDefs(cols)
# TODO: Something has to be done about this spaghetti code of arbitrary
# attributes getting tacked on to the coldefs here.
coldefs._shape = self._header['GCOUNT']
coldefs._dat_format = FITS2NUMPY[format]
return coldefs
@lazyproperty
def _theap(self):
# Only really a lazyproperty for symmetry with _TableBaseHDU
return 0
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
# for random group image, NAXIS1 should be 0, so we skip NAXIS1.
if naxis > 1:
size = 1
for idx in range(1, naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def update_header(self):
old_naxis = self._header.get('NAXIS', 0)
if self._data_loaded:
if isinstance(self.data, GroupData):
self._axes = list(self.data.data.shape)[1:]
self._axes.reverse()
self._axes = [0] + self._axes
field0 = self.data.dtype.names[0]
field0_code = self.data.dtype.fields[field0][0].name
elif self.data is None:
self._axes = [0]
field0_code = 'uint8' # For lack of a better default
else:
raise ValueError('incorrect array type')
self._header['BITPIX'] = _ImageBaseHDU.ImgCode[field0_code]
self._header['NAXIS'] = len(self._axes)
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
if (idx == 0):
after = 'NAXIS'
else:
after = 'NAXIS' + str(idx)
self._header.set('NAXIS' + str(idx + 1), axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header['NAXIS' + str(idx)]
except KeyError:
pass
if self._has_data and isinstance(self.data, GroupData):
self._header.set('GROUPS', True,
after='NAXIS' + str(len(self._axes)))
self._header.set('PCOUNT', len(self.data.parnames), after='GROUPS')
self._header.set('GCOUNT', len(self.data), after='PCOUNT')
npars = len(self.data.parnames)
scale, zero = self.data._get_scale_factors(npars)[3:5]
if scale:
self._header.set('BSCALE', self.data._coldefs.bscales[npars])
if zero:
self._header.set('BZERO', self.data._coldefs.bzeros[npars])
for idx in range(npars):
self._header.set('PTYPE' + str(idx + 1),
self.data.parnames[idx])
scale, zero = self.data._get_scale_factors(idx)[3:5]
if scale:
self._header.set('PSCAL' + str(idx + 1),
self.data._coldefs.bscales[idx])
if zero:
self._header.set('PZERO' + str(idx + 1),
self.data._coldefs.bzeros[idx])
# Update the position of the EXTEND keyword if it already exists
if 'EXTEND' in self._header:
if len(self._axes):
after = 'NAXIS' + str(len(self._axes))
else:
after = 'NAXIS'
self._header.set('EXTEND', after=after)
def _get_tbdata(self):
# get the right shape for the data part of the random group,
# since binary table does not support ND yet
self.columns._recformats[-1] = (repr(self.shape[:-1]) +
self.columns._dat_format)
return super(GroupsHDU, self)._get_tbdata()
def _writedata_internal(self, fileobj):
"""
Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
we have to get the data's byte order a different way...
TODO: Might be nice to store some indication of the data's byte order
as an attribute or function so that we don't have to do this.
"""
size = 0
if self.data is not None:
self.data._scale_back()
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_unsigned(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _unsigned_zero(self.data.dtype),
dtype='>i%d' % self.data.dtype.itemsize)
should_swap = False
else:
output = self.data
fname = self.data.dtype.names[0]
byteorder = self.data.dtype.fields[fname][0].str[0]
should_swap = (byteorder in swap_types)
if not fileobj.simulateonly:
if should_swap:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _verify(self, option='warn'):
errs = super(GroupsHDU, self)._verify(option=option)
# Verify locations and values of mandatory keywords.
self.req_cards('NAXIS', 2,
lambda v: (_is_int(v) and v >= 1 and v <= 999), 1,
option, errs)
self.req_cards('NAXIS1', 3, lambda v: (_is_int(v) and v == 0), 0,
option, errs)
after = self._header['NAXIS'] + 3
pos = lambda x: x >= after
self.req_cards('GCOUNT', pos, _is_int, 1, option, errs)
self.req_cards('PCOUNT', pos, _is_int, 0, option, errs)
self.req_cards('GROUPS', pos, lambda v: (v == True), True, option,
errs)
return errs
def _calculate_datasum(self, blocking):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
byteorder = \
self.data.dtype.fields[self.data.dtype.names[0]][0].str[0]
if byteorder != '>':
byteswapped = True
d = self.data.byteswap(True)
d.dtype = d.dtype.newbyteorder('>')
else:
byteswapped = False
d = self.data
cs = self._compute_checksum(np.fromstring(d, dtype='ubyte'),
blocking=blocking)
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped:
d.byteswap(True)
d.dtype = d.dtype.newbyteorder('<')
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a gereric manner.
return super(GroupsHDU, self)._calculate_datasum(blocking=blocking)
def _summary(self):
summary = super(GroupsHDU, self)._summary()
name, classname, length, shape, format, gcount = summary
# Drop the first axis from the shape
if shape:
shape = shape[1:]
if shape and all(shape):
# Update the format
format = self.columns[0].dtype.name
# Update the GCOUNT report
gcount = '%d Groups %d Parameters' % (self._gcount, self._pcount)
return (name, classname, length, shape, format, gcount)
def _par_indices(names):
"""
Given a list of objects, returns a mapping of objects in that list to the
index or indices at which that object was found in the list.
"""
unique = {}
for idx, name in enumerate(names):
# Case insensitive
name = name.upper()
if name in unique:
unique[name].append(idx)
else:
unique[name] = [idx]
return unique
def _unique_parnames(names):
"""
Given a list of parnames, including possible duplicates, returns a new list
of parnames with duplicates prepended by one or more underscores to make
them unique. This is also case insensitive.
"""
upper_names = set()
unique_names = []
for name in names:
name_upper = name.upper()
while name_upper in upper_names:
name = '_' + name
name_upper = '_' + name_upper
unique_names.append(name)
upper_names.add(name_upper)
return unique_names
| gpl-3.0 |
yongshengwang/hue | desktop/core/ext-py/guppy-0.1.10/build/lib.linux-x86_64-2.7/guppy/etc/RE.py | 37 | 18932 | #._cv_part guppy.etc.RE
from guppy.etc.RE_Rect import chooserects
from guppy.etc.IterPermute import iterpermute
class InfiniteError(Exception):
pass
class WordsMemo:
def __init__(self, re, ch):
self.re = re
self.ch = ch
self.xs = {}
self.N = 0
def get_words_of_length(self, N):
# Return a list of words of length up to N
if N not in self.xs:
self.xs[N] = self.re.get_words_of_length_memoized(N, self)
return self.xs[N]
def get_words_of_length_upto(self, N):
# Return all words of length up to N, in the form
# [(0, <list of words of length 0>),
# (1, <list of words of length 0>),
# ...]
xsu = []
for i in range(N+1):
xs = self.get_words_of_length(i)
if xs:
xsu.append((i, xs))
return xsu
REBASE = tuple
class RE(REBASE):
# Regular expression nodes
# The operators are choosen to be compatible with Pythonic standards:
# o sets : using | for union
# o strings, sequences : using + for concatenation.
#
# This differs from mathematical presentations of regular
# expressions where + is the union, but it seemed more important
# to not confuse the Python usage.
# There are also operators for closure x*, x+ that can not be
# represented directly in Python expressions and these were choosen
# to use a function call syntax.
# The following table summarizes the operators.
# RE node expr re lib mathematical name
# x + y x y x y Concatenation
# x | y x | y x + y Union
# x('*') x* x* Kleene closure
# x('+') x+ x+ Positive closure
# x('?') x?
_re_special = r'.^$*+?{}\[]|()'
def __add__(a, b):
if isinstance(b, RE):
return concat(a, b)
else:
return Concatenation(a, Single(b))
def __call__(a, *args, **kwds):
if not kwds:
if args == ('*',):
return KleeneClosure(a)
elif args == ('+',):
return PositiveClosure(a)
elif args == ('?',):
return EpsilonOrOne(a)
raise ValueError, "Argument to regular expression must be '*' or '+' or '?'"
def __eq__(a, b):
return (a._name == b._name and
tuple(a) == tuple(b))
def __lt__(a, b):
if a._name == b._name:
return tuple(a) < tuple(b)
else:
return a._name < b._name
def __or__(a, b):
return Union(a, b)
def get_num_closures(self):
ns = 0
for ch in self:
ns += ch.get_num_closures()
return ns
def get_num_syms(self):
ns = 0
for ch in self:
ns += ch.get_num_syms()
return ns
def get_sum_sym_lengths(self):
ns = 0
for ch in self:
ns += ch.get_sum_sym_lengths()
return ns
def get_words_memo(self):
ch = [x.get_words_memo() for x in self]
return WordsMemo(self, ch)
def get_words_of_length(self, N):
xs = self.get_words_memo()
return xs.get_words_of_length(N)
def mapchildren(self, f):
return self.__class__(*[f(x) for x in self])
def regexpform(self):
return self.mappedrepr(regexpname)
def reversed(self):
return self.mapchildren(lambda x:x.reversed())
def rempretup(self):
def f(x):
if isinstance(x, Seq):
if x is not Epsilon and isinstance(x[0], tuple):
ws = x[1:]
return Seq(*ws)
else:
return x
return x.mapchildren(f)
return f(self)
def seqatoms(self):
sa = []
self.apseqatoms(sa.append)
return sa
def sequni(self):
d = {}
us = []
def ap(x):
if x not in d:
d[x] = 1
us.append(x)
self.apseq(ap)
return Union(*us)
def shform(self, conc = ' '):
r = self.mappedrepr(regexpname)
if conc != ' ':
r = conc.join(r.split(' '))
return r
def simplified(self, *a, **k):
return self
def simulform(self):
def f(x):
if x == '':
return '()'
return str(x)
return self.mappedrepr(f)
def regexpname(s):
if s == '':
return '()'
special = RE._re_special
ren = []
for c in str(s):
if c in special+"', ":
#c = r'\%s'%c
c = ''
ren.append(c)
return ''.join(ren)
def re_compare(a, b):
return a.__cmp__(b)
class Seq(RE):
_priority = 0
_name = 'Seq'
def __new__(clas, *symbols):
if not symbols:
return Epsilon
return REBASE.__new__(clas, symbols)
def __repr__(self):
return '%s(%s)'%(self.__class__.__name__, ', '.join(['%r'%(x,) for x in self]))
def __hash__(self):
return hash(repr(self))
def apseq(self, ap):
ap(self)
def apseqatoms(self, ap):
for x in self:
ap(Single(x))
def get_num_closures(self):
return 0
def get_num_syms(self):
return len(self)
def get_sum_sym_lengths(self):
s = 0
for x in self:
s += len(str(x))
return s
def get_words_memo(self):
return WordsMemo(self, ())
def get_words_of_length_memoized(self, N, memo):
if N == len(self):
return [self]
else:
return []
def limited(self, N):
return self
def mappedrepr(self, f):
if not self:
return f('')
return ' '.join(['%s'%(f(x),) for x in self])
def reversed(self):
r = list(self)
r.reverse()
return self.__class__(*r)
def unionsplitted(self):
return [self]
def Single(symbol):
return REBASE.__new__(Seq, (symbol,))
Epsilon = REBASE.__new__(Seq, ())
def concat(*args):
args = [x for x in args if x is not Epsilon]
if len(args) < 2:
if not args:
return Epsilon
return args[0]
return REBASE.__new__(Concatenation, args)
class Concatenation(RE):
_priority = 2
_name = 'Concat'
def __new__(clas, *args):
#assert Epsilon not in args
if len(args) < 2:
if not args:
return Epsilon
return args[0]
return REBASE.__new__(clas, args)
def __repr__(self):
rs = []
for ch in self:
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%(r,)
rs.append(r)
return ' + '.join(rs)
def apseq(self, ap):
uns = [x.sequni() for x in self]
ixs = [0]*len(uns)
while 1:
xs = []
for (i, us) in enumerate(uns):
for x in us[ixs[i]]:
if x is not Epsilon:
xs.append(x)
ap(Seq(*xs))
j = 0
for j, ix in enumerate(ixs):
ix += 1
if ix >= len(uns[j]):
ix = 0
ixs[j] = ix
if ix != 0:
break
else:
break
def apseqatoms(self, ap):
for x in self:
x.apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
chxs = []
for ch in memo.ch:
chxs.append(ch.get_words_of_length_upto(N))
xs = []
seen = {}
def ads(xx, i, n):
if i == len(chxs):
if n == N:
for toconc in iterpermute(*xx):
conc = simple_Concatenation(toconc)
if conc not in seen:
xs.append(conc)
seen[conc] = 1
else:
for m, x in chxs[i]:
if n + m <= N:
ads(xx + [x], i + 1, n + m)
ads([], 0, 0)
return xs
def limited(self, N):
return Concatenation(*[x.limited(N) for x in self])
def mappedrepr(self, f):
rs = []
for ch in self:
r = ch.mappedrepr(f)
if ch._priority > self._priority:
r = '(%s)'%(r,)
rs.append(r)
return ' '.join(rs)
def reversed(self):
r = [x.reversed() for x in self]
r.reverse()
return self.__class__(*r)
def simplified(self, *a, **k):
conc = [x.simplified(*a, **k) for x in self]
sa = []
for c in conc:
for a in c.seqatoms():
sa.append(a)
return simple_Concatenation(sa)
def unionsplitted(self):
runs = []
uns = []
for (i, x) in enumerate(self):
us = x.unionsplitted()
if len(us) > 1:
uns.append((i, us))
if not uns:
return [self]
ixs = [0]*len(uns)
ch = list(self)
while 1:
xs = []
i0 = 0
for j, (i, us) in enumerate(uns):
xs.extend(ch[i0:i])
ix = ixs[j]
xs.append(us[ix])
i0 = i + 1
xs.extend(ch[i0:])
runs.append( concat(*xs) )
j = 0
for j, ix in enumerate(ixs):
ix += 1
if ix >= len(uns[j][1]):
ix = 0
ixs[j] = ix
if ix != 0:
break
else:
return runs
class SimplifiedConcatenation(Concatenation):
def simplified(self, *a, **k):
# pdb.set_trace()
return self
def conclosure(conc):
# Simplification noted Mar 5 2005
# Simplify ... b b* ... or ... b* b ... to ... b+ ...
# conc is a sequence of regular expressions
seen = {}
nconc = []
w0 = None
for w in conc:
if w0 is not None:
if (w._name == '*' and # Not isinstance(KleeneClosure), would catch PositiveClosure
w[0] == w0):
w = PositiveClosure(w0)
elif (w0._name == '*' and
w0[0] == w):
w = PositiveClosure(w)
else:
if w0 is not None:
nconc.append(w0)
w0 = w
if w0 is not None:
nconc.append(w0)
return nconc
def simple_Concatenation(conc):
if len(conc) > 1:
conc0 = conc
conc = conclosure(conc)
nconc = []
i = 0
j = 0
while i < len(conc):
e = conc[i]
if not isinstance(e, Seq):
i += 1
nconc.append(e)
continue
j = i
while j < len(conc):
if not isinstance(conc[j], Seq):
break
j += 1
if j == i + 1:
nconc.append(e)
else:
syms = []
for k in range(i, j):
e = conc[k]
syms.extend(list(e))
nconc.append(Seq(*syms))
i = j
if len(nconc) > 1:
return Concatenation(*nconc)
elif nconc:
return nconc[0]
else:
return Epsilon
gauges = [
lambda x:x.get_num_syms(),
lambda x:x.get_num_closures(),
lambda x:x.get_sum_sym_lengths()
]
def simpleunion(lines, trace=''):
choosen = chooserects(lines, gauges, trace)
have_epsilon = 0
while 1:
if len(choosen) == 1 and (choosen[0].width == 0 or len(choosen[0].lines) == 1):
us = []
for line in choosen[0].lines:
if line:
us.append(line)
else:
have_epsilon = 1
break
us = []
for r in choosen:
conc = r.get_common_part()
olines = r.get_uncommons()
u = simpleunion(olines)
if u is not Epsilon:
if r.dir == -1:
conc = [u]+conc
else:
conc = conc + [u]
if conc:
us.append(conc)
else:
have_epsilon = 1
assert not isinstance(us[-1], str)
choosen = chooserects(us, gauges, trace)
if len(us) > 1:
nus = [simple_Concatenation(line) for line in us]
u = SimplifiedUnion(*nus)
elif us:
u = simple_Concatenation(us[0])
else:
u = None
if have_epsilon:
if u is not None:
u = simple_EpsilonOrOne(u)
else:
u = Epsilon
return u
class Union(RE):
_priority = 3
_name = 'Union'
def __new__(clas, *args):
return REBASE.__new__(clas, args)
def __repr__(self):
rs = []
for ch in self:
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%r
rs.append(r)
return ' | '.join(rs)
def apseq(self, ap):
for c in self:
c.apseq(ap)
def apseqatoms(self, ap):
for x in self:
x.apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
xs = []
seen = {}
for ch in memo.ch:
for x in ch.get_words_of_length(N):
if x not in seen:
seen[x] = 1
xs.append(x)
return xs
def limited(self, N):
uni = [x.limited(N) for x in self]
for i, x in enumerate(uni):
if x is not self[i]:
return self.__class__(*uni)
return self
def mappedrepr(self, f):
rs = []
for ch in self:
r = '%s'%(ch.mappedrepr(f),)
if ch._priority > self._priority:
r = '(%s)'%r
rs.append(r)
return ' | '.join(rs)
def simplified(self, args=None, trace='', *a, **k):
if args is None:
args = [x.simplified() for x in self.unionsplitted()]
#args = [x for x in self.unionsplitted()]
# Create a simplfied union
# Assuming args are simplified, non-unions
ch = [a.seqatoms() for a in args]
return simpleunion(ch, trace)
def unionsplitted(self):
us = []
for x in self:
us.extend(list(x.unionsplitted()))
return us
class SimplifiedUnion(Union):
def simplified(self, *a, **k):
return self
class Called(RE):
_priority = 1
def __new__(clas, arg):
return REBASE.__new__(clas, (arg,))
def __repr__(self):
ch = self[0]
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%r
return "%s(%r)"%(r, self._name)
def apseqatoms(self, ap):
ap(self)
def get_num_closures(self):
return 1 + self[0].get_num_closures()
def mappedrepr(self, f):
ch = self[0]
r = ch.mappedrepr(f)
if (ch._priority > self._priority
or isinstance(ch, Seq) and len(ch) > 1):
r = '(%s)'%r
return "%s%s"%(r, self._name)
def simplified(self, *a, **k):
return self.__class__(self[0].simplified(*a, **k))
class Closure(Called):
def get_words_of_length_memoized(self, N, memo):
if N == 0:
return [Epsilon]
if N == 1:
return memo.ch[0].get_words_of_length(1)
xs = []
seen = {}
for i in range(1, N):
a = memo.get_words_of_length(i)
b = memo.get_words_of_length(N-i)
for ai in a:
for bi in b:
aibi = simple_Concatenation((ai, bi))
if aibi not in seen:
xs.append(aibi)
seen[aibi] = 1
for x in memo.ch[0].get_words_of_length(N):
if x not in seen:
xs.append(x)
seen[x] = 1
return xs
def unionsplitted(self):
return [self]
class KleeneClosure(Closure):
_name = '*'
def apseq(self, ap):
raise InfiniteError, 'apseq: Regular expression is infinite: contains a Kleene Closure'
def limited(self, N):
if N == 0:
return Epsilon
cl = self[0].limited(N)
uni = []
for i in range(N+1):
toconc = [cl]*i
uni.append(Concatenation(*toconc))
return Union(*uni)
def simplified(self, *a, **k):
return simple_KleeneClosure(self[0].simplified(*a, **k))
def simple_KleeneClosure(x):
# (b+)* -> b*
if x._name == '+':
return simple_KleeneClosure(x[0])
return KleeneClosure(x)
class PositiveClosure(Closure):
_name = '+'
def apseq(self, ap):
raise InfiniteError, 'apseq: Regular expression is infinite: contains a Positive Closure'
def apseqatoms(self, ap):
self[0].apseqatoms(ap)
simple_KleeneClosure(self[0]).apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
if N <= 1:
return memo.ch[0].get_words_of_length(N)
return Closure.get_words_of_length_memoized(self, N, memo)
def limited(self, N):
a = self[0].limited(N)
b = KleeneClosure(self[0]).limited(N)
return Concatenation(a, b)
class EpsilonOrOne(Called):
_name = '?'
def apseq(self, ap):
ap(Epsilon)
self[0].apseq(ap)
def get_words_of_length_memoized(self, N, memo):
if N == 0:
return [Epsilon]
return memo.ch[0].get_words_of_length(N)
def limited(self, N):
x = self[0].limited(N)
if x is not self[0]:
self = self.__class__(x)
return self
def simplified(self, *a, **k):
return simple_EpsilonOrOne(self[0].simplified(*a, **k))
def unionsplitted(self):
return [Epsilon] + list(self[0].unionsplitted())
def simple_EpsilonOrOne(x):
# (a+)? -> a*
if x._name == '+':
return simple_KleeneClosure(x)
# (a*)? -> a*
if x._name == '*':
return x
return EpsilonOrOne(x)
class RegularSystem:
def __init__(self, table, Start, final_states):
self.table = table
self.Start = Start
self.Final = '358f0eca5c34bacdfbf6a8ac0ccf84bc'
self.final_states = final_states
def pp(self):
def statename(state):
try:
name = self.names[state]
except KeyError:
name = str(state)
return name
def transname(trans):
name = trans.simulform()
if trans._priority > 1:
name = '(%s)'%(name,)
return name
self.setup_names()
X = self.X
xs = [self.Start]+self.order
xs.append(self.Final)
for Xk in xs:
if Xk not in X:
continue
print '%3s = '%(statename(Xk),),
Tk = X[Xk]
es = []
for Xj in xs:
if Xj in Tk:
es.append('%s %s'%(transname(Tk[Xj]), statename(Xj)))
if es:
print ' | '.join(es)
else:
print
def setup_equations(self):
table = self.table
final_states = self.final_states
Final = self.Final
self.X = X = {Final:{}}
for Xi, transitions in table.items():
X[Xi] = Ti = {}
for (symbol, Xj) in transitions.items():
Ti.setdefault(Xj, []).append(Single(symbol))
for Xj, Aij in Ti.items():
if len(Aij) > 1:
Aij.sort()
Aij = Union(*Aij)
else:
Aij = Aij[0]
Ti[Xj] = Aij
if Xi in final_states:
Ti[Final] = Epsilon
def setup_order(self):
def dists(X, start):
i = 0
S = {start:i}
news = [start]
while news:
oldnews = news
news = []
i += 1
for s in oldnews:
if s not in X:
continue
for t in X[s]:
if t not in S:
news.append(t)
S[t] = i
return S
def start_distance(x):
return start_dists[x]
def sumt(f):
memo = {}
def g(x):
if x in memo:
return memo[x]
s = 0.0
for y in X[x]:
s += f(y)
memo[x] = s
return s
return g
def cmp3(x, y):
# Comparison for the sorting of equation solving order
# First in list = solved last
if x is y:
return 0
c = cmp(len(X[y]), len(X[x])) # Equations with more terms are resolved later
if c:
return c
# The equations with terms more distant from start node will be resolved earlier
i = 0
while i < 10: # 4 was enough with tests so far at Feb 24 2005
try:
f = sumdists[i]
except:
f = sumt(sumdists[i-1])
sumdists.append(f)
c = cmp(f(x), f(y))
if c:
return c
i += 1
#pdb.set_trace()
return cmp(x, y)
sumdists = [start_distance]
X = self.X
Start = self.Start
Final = self.Final
start_dists = dists(X, Start)
order = [x for x in start_dists if x is not Start and x is not Final]
order.sort(cmp3)
self.order = order
def setup_names(self):
try:
self.order
except AttributeError:
self.setup_order()
self.names = {}
self.names[self.Start] = 'X0'
for i, s in enumerate(self.order):
self.names[s] = 'X%d'%(i+1)
self.names[self.Final] = 'Final'
def solve(self):
# Set up equation system
self.setup_equations()
self.setup_order()
X = self.X
Start = self.Start
Final = self.Final
todo = list(self.order)
# Solve equation system
while todo:
Xk = todo.pop()
Tk = X[Xk]
if Xk in Tk:
# Recursive equation
# Eliminate Akk Xk, using Adler's theorem
# Given:
# Xk = Ak0 X0 | ... Akk Xk |.. Akn Xkn
# we get:
# Xk = Akk* (Ak0 X0 | ... <no Xk> ... | Akn Xn)
# which we evaluate to:
# Xk = Bk0 X0 | ... Bkn Xn
# where coefficients get the new values
# Bki := Akk* Aki
Akk = Tk[Xk]
del Tk[Xk]
AkkStar = Akk('*')
for Xi, Aki in Tk.items():
Bki = AkkStar + Aki
Tk[Xi] = Bki
# Substitute Xk in each other equation in X
# containing Xk, except eqv. Xk itself, which will not be used any more..
del X[Xk]
for Xj, Tj in X.items():
Bjk = Tj.get(Xk)
if Bjk is None:
continue
del Tj[Xk]
for Xji, Tk_Xji in Tk.items():
Cji = (Bjk + Tk_Xji)
Bji = Tj.get(Xji)
if Bji is not None:
Cji = Bji | Cji
Tj[Xji] = Cji
# The equation system is now solved
# The result is in Final term of Start equation
return X[Start][Final]
Nothing = Union()
def SolveFSA(fsa):
RS = RegularSystem(fsa.table, fsa.start_state, fsa.final_states)
return RS.solve()
| apache-2.0 |
hustodemon/spacewalk | backend/server/rhnSQL/__init__.py | 1 | 9985 | #
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# entry points for the rhnSQL module
#
import sys
from spacewalk.common.rhnLog import log_debug
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnException import rhnException
from spacewalk.common.rhnTB import add_to_seclist
# SQL objects
import sql_table
import sql_row
import sql_sequence
import dbi
import sql_types
types = sql_types
from const import ORACLE, POSTGRESQL, SUPPORTED_BACKENDS
# expose exceptions
from sql_base import SQLError, SQLSchemaError, SQLConnectError, \
SQLStatementPrepareError, Statement, ModifiedRowError
# ths module works with a private global __DB object that is
# instantiated by the initDB call. This object/instance should NEVER,
# EVER be exposed to the calling applications.
def __init__DB(backend, host, port, username, password, database, sslmode, sslrootcert):
"""
Establish and check the connection so we can wrap it and handle
exceptions.
"""
# __DB global object created here and pushed into the global namespace.
global __DB
try:
my_db = __DB
except NameError: # __DB has not been set up
db_class = dbi.get_database_class(backend=backend)
__DB = db_class(host, port, username, password, database, sslmode, sslrootcert)
__DB.connect()
return
else:
del my_db
if __DB.is_connected_to(backend, host, port, username, password,
database, sslmode, sslrootcert):
__DB.check_connection()
return
__DB.commit()
__DB.close()
# now we have to get a different connection
__DB = dbi.get_database_class(backend=backend)(
host, port, username, password, database, sslmode, sslrootcert)
__DB.connect()
return 0
def __init__DB2(backend, host, port, username, password, database, sslmode, sslrootcert):
"""
Establish and check the connection so we can wrap it and handle
exceptions.
"""
# __DB2 global object created here and pushed into the global namespace.
global __DB2
try:
my_db = __DB2
except NameError: # __DB2 has not been set up
db_class = dbi.get_database_class(backend=backend)
__DB2 = db_class(host, port, username, password, database, sslmode, sslrootcert)
__DB2.connect()
return
else:
del my_db
if __DB2.is_connected_to(backend, host, port, username, password,
database, sslmode, sslrootcert):
__DB2.check_connection()
return
__DB2.commit()
__DB2.close()
# now we have to get a different connection
__DB2 = dbi.get_database_class(backend=backend)(
host, port, username, password, database, sslmode, sslrootcert)
__DB2.connect()
return 0
def initDB(backend=None, host=None, port=None, username=None,
password=None, database=None, sslmode=None, sslrootcert=None, initsecond=False):
"""
Initialize the database.
Either we get backend and all parameter which means the caller
knows what they are doing, or we populate everything from the
config files.
initsecond: If set to True it initialize a second DB connection.
By default only one DB connection is needed.
"""
if backend is None:
if CFG is None or not CFG.is_initialized():
initCFG('server')
backend = CFG.DB_BACKEND
host = CFG.DB_HOST
port = CFG.DB_PORT
database = CFG.DB_NAME
username = CFG.DB_USER
password = CFG.DB_PASSWORD
sslmode = None
sslrootcert = None
if CFG.DB_SSL_ENABLED:
sslmode = 'verify-full'
sslrootcert = CFG.DB_SSLROOTCERT
if backend not in SUPPORTED_BACKENDS:
raise rhnException("Unsupported database backend", backend)
if port:
port = int(port)
# Hide the password
add_to_seclist(password)
try:
if initsecond == False:
__init__DB(backend, host, port, username, password, database, sslmode, sslrootcert)
else:
__init__DB2(backend, host, port, username, password, database, sslmode, sslrootcert)
# except (rhnException, SQLError):
# raise # pass on, we know those ones
# except (KeyboardInterrupt, SystemExit):
# raise
except SQLConnectError, e:
try:
closeDB()
except NameError:
pass
raise e
except:
raise
#e_type, e_value = sys.exc_info()[:2]
# raise rhnException("Could not initialize Oracle database connection",
# str(e_type), str(e_value))
return 0
def __closeDB2():
global __DB2
try:
my_db = __DB2
except NameError:
return
else:
del my_db
# can be None
if not __DB2:
del __DB2
return
__DB2.commit()
__DB2.close()
del __DB2
return
# close the database
def closeDB():
global __DB
try:
my_db = __DB
except NameError:
__closeDB2()
return
else:
del my_db
__DB.commit()
__DB.close()
del __DB
__closeDB2()
return
# common function for testing the connection state (ie, __DB defined
def __test_DB():
global __DB
try:
return __DB
except NameError:
raise SystemError("Not connected to any database!"), None, sys.exc_info()[2]
def __test_DB2():
global __DB2
try:
return __DB2
except NameError:
# try open the connection
try:
initDB(initsecond=True)
return __DB2
except NameError:
raise SystemError("Not connected to secondary database!"), None, sys.exc_info()[2]
# wrapper for a Procedure callable class
def Procedure(name):
db = __test_DB()
return db.procedure(name)
# wrapper for a Procedure callable class
def Function(name, ret_type):
db = __test_DB()
return db.function(name, ret_type)
# Wrapper for the Sequence class
def Sequence(seq):
db = __test_DB()
return sql_sequence.Sequence(db, seq)
# Wrapper for the Row class
def Row(table, hash_name, hash_value=None):
db = __test_DB()
return sql_row.Row(db, table, hash_name, hash_value)
# Wrapper for the Table class
def Table(table, hash_name, local_cache=0):
db = __test_DB()
return sql_table.Table(db, table, hash_name, local_cache)
###########################
# Functions points of entry
###########################
def cursor():
db = __test_DB()
return db.cursor()
def prepare(sql, blob_map=None):
db = __test_DB()
if isinstance(sql, Statement):
sql = sql.statement
return db.prepare(sql, blob_map=blob_map)
def prepare_secondary(sql, blob_map=None):
db = __test_DB2()
if isinstance(sql, Statement):
sql = sql.statement
return db.prepare(sql, blob_map=blob_map)
def execute(sql, *args, **kwargs):
db = __test_DB()
return db.execute(sql, *args, **kwargs)
def execute_secondary(sql, *args, **kwargs):
db = __test_DB2()
return db.execute(sql, *args, **kwargs)
def fetchall_dict(sql, *args, **kwargs):
h = prepare(sql)
h.execute(sql, *args, **kwargs)
return h.fetchall_dict()
def fetchone_dict(sql, *args, **kwargs):
h = prepare(sql)
h.execute(sql, *args, **kwargs)
return h.fetchone_dict()
def commit():
db = __test_DB()
return db.commit()
def commit_secondary():
db = __test_DB2()
return db.commit()
def rollback(name=None):
db = __test_DB()
return db.rollback(name)
def transaction(name):
db = __test_DB()
return db.transaction(name)
def TimestampFromTicks(*args, **kwargs):
db = __test_DB()
return db.TimestampFromTicks(*args, **kwargs)
def DateFromTicks(*args, **kwargs):
db = __test_DB()
return db.DateFromTicks(*args, **kwargs)
def Date(*args, **kwargs):
db = __test_DB()
return db.Date(*args, **kwargs)
def clear_log_id():
clear_log_id = Procedure("logging.clear_log_id")
clear_log_id()
def set_log_auth(user_id):
set_log_auth = Procedure("logging.set_log_auth")
set_log_auth(user_id)
def set_log_auth_login(login):
h = prepare("select id from web_contact_all where login = :login")
h.execute(login=login)
row = h.fetchone_dict()
if row:
user_id = row['id']
set_log_auth(user_id)
else:
raise rhnException("No such log user", login)
def read_lob(lob):
if not lob:
return None
db = __test_DB()
return db._read_lob(lob)
class _Callable(object):
def __init__(self, name):
self._name = name
self._implementor = None
def __getattr__(self, name):
return self.__class__("%s.%s" % (self._name, name))
def __call__(self, *args):
proc = self._implementor.__call__(self._name)
return proc(*args)
class _Procedure(_Callable):
def __init__(self, name):
_Callable.__init__(self, name)
self._implementor = Procedure
class _Function(_Callable):
def __init__(self, name):
_Callable.__init__(self, name)
self._implementor = Function
class _CallableWrapper(object):
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, x):
return self._wrapped(x)
procedure = _CallableWrapper(_Procedure)
function = _CallableWrapper(_Function)
| gpl-2.0 |
piantado/LOTlib | LOTlib/Examples/Prolog/Model.py | 2 | 6530 | """
Simple prolog example.
I'm not sure why, but the use of a temporary file seems very finicky -- perhaps there is a problem
with rapidly flushing the output, etc?
# """
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Define a simple grammar
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from LOTlib.Grammar import Grammar
grammar = Grammar(start='PROGRAM')
grammar.add_rule('PROGRAM', '%s\n%s', ['LINE', 'PROGRAM'], 1.0)
grammar.add_rule('PROGRAM', '%s', ['LINE'], 1.50)
grammar.add_rule('LINE', '', ['VARRULE'], 2.0)
grammar.add_rule('VARRULE','', ['RULE'], 1.0, bv_type='ATOM', bv_prefix='X', bv_p=1.0) # a term with a variable
grammar.add_rule('RULE', '', ['VARRULE'], 0.50)
grammar.add_rule('RULE', '%s :- %s.', ['HEAD', 'BODY'], 1.0)
grammar.add_rule('HEAD', '', ['TERM'], 1.0)
grammar.add_rule('BODY', '', ['TERM'], 1.0)
grammar.add_rule('BODY', '%s, %s', ['TERM', 'BODY'], 1.0)
grammar.add_rule('BODY', '%s; %s', ['TERM', 'BODY'], 1.0)
grammar.add_rule('TERM', '', ['F/1'], 1.0)
grammar.add_rule('F/1', 'male', ['ATOM'], 1.0)
grammar.add_rule('F/1', 'female', ['ATOM'], 1.0)
grammar.add_rule('TERM', '', ['F/2'], 1.0)
grammar.add_rule('F/2', 'grandparent', ['ATOM', 'ATOM'], 1.0)
grammar.add_rule('F/2', 'parent', ['ATOM', 'ATOM'], 1.0)
# grammar.add_rule('F/2', 'sibling', ['ATOM', 'ATOM'], 1.0)
# grammar.add_rule('F/2', 'cousin', ['ATOM', 'ATOM'], 1.0)
PEOPLE = ['barak', 'michelle', 'sasha', 'malia', 'baraksr', 'ann', 'hussein', 'akumu']
for x in PEOPLE:
grammar.add_rule('ATOM', x, None, 1.0/len(PEOPLE))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Define PrologHypothesis
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from LOTlib.Hypotheses.LOTHypothesis import LOTHypothesis
from math import log
import pyswip
class PrologHypothesis(LOTHypothesis):
def __init__(self, value=None, base_facts="", **kwargs):
self.base_facts = base_facts # must be set before initializer
LOTHypothesis.__init__(self, grammar, value=value, args=None, **kwargs)
def __call__(self, expr):
# Wrap expr in some limited inference and parse the output
# TODO: Currently only handles a single QUERY as query variable
# wrap to make it bounded: http://www.swi-prolog.org/pldoc/man?predicate=call_with_depth_limit/3
thequery = "call_with_depth_limit(call_with_inference_limit(%s, 1000, _),1000,_)" % expr
try:
matches = list(self.fvalue.query(thequery))
except pyswip.prolog.PrologError:
return []
# print matches
# For mutiple solutions to "uncle(QUERY,john)", this will give me back something like
# [{'QUERY': 'bob'}, {'QUERY': 'john'}, {'QUERY': 'mark'}]
# so reformat
return { a.get('QUERY') for a in matches if 'QUERY' in a }
# def compile_function(self):
# ## Store the prolog interpreter as self.fvalue
# # tmpfile = "/tmp/lotlib-prolog-tmp33.pl" # "tmp"+re.sub(r"[\-0-9]", "", str(uuid.uuid1()))+".pl"
# tmpfile = "/tmp/lotlib-prolog-"+str(uuid.uuid1())+".pl"
# # This is probably slow, but we write it to a file
# with open(tmpfile, 'w') as f:
# f.write(self.base_facts)
# f.write("\n\n")
# f.write(str(self))
# f.write("\n\n")
# f.flush()
#
# prolog = pyswip.Prolog()
# prolog.consult(tmpfile)
#
# os.remove(tmpfile)
#
# return prolog
def compile_function(self):
## Store the prolog interpreter as self.fvalue
# This is probably slow, but we write it to a file
with open('tmp.pl', 'w') as f: ## TODO : replace with tempfile
print >>f, self.base_facts
print >>f, str(self), "\n"
prolog = pyswip.Prolog()
prolog.consult('tmp.pl')
return prolog
def compute_single_likelihood(self, datum):
assert len(datum.input) == 1
matches = self(*datum.input)
p = (1.0-datum.alpha)*(1.0/len(PEOPLE)) # base rate
if datum.output in matches: # or choose from the matches
p += datum.alpha/len(matches)
return log(p)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set up default base facts and data
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from LOTlib.DataAndObjects import FunctionData
from LOTlib.Inference.Samplers.MetropolisHastings import MHSampler
from LOTlib import break_ctrlc
BASE_FACTS = """
:- discontiguous(female/1).
:- discontiguous(male/1).
:- discontiguous(parent/2).
:- discontiguous(grandparent/2).
:- style_check(-singleton).
spouse(barak, michelle).
male(barak).
female(michelle).
parent(michelle, sasha).
parent(michelle, malia).
parent(barak, sasha).
parent(barak, malia).
female(sasha).
female(malia).
parent(baraksr, barak).
parent(ann, barak).
parent(hussein, baraksr).
parent(akumu, baraksr).
"""
data = [FunctionData(input=["grandparent(baraksr, QUERY)"], output="sahsa", alpha=0.99),
FunctionData(input=["grandparent(baraksr, QUERY)"], output="malia", alpha=0.99),
FunctionData(input=["grandparent(ann, QUERY)"], output="sahsa", alpha=0.99),
FunctionData(input=["grandparent(ann, QUERY)"], output="malia", alpha=0.99),
FunctionData(input=["grandparent(hussein, QUERY)"], output="barak", alpha=0.99),
FunctionData(input=["grandparent(akumu, QUERY)"], output="barak", alpha=0.99)
]
def make_hypothesis(**kwargs):
return PrologHypothesis(base_facts=BASE_FACTS, **kwargs)
def make_data(n=1):
return data*n
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Main
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == "__main__":
h0 = make_hypothesis(likelihood_temperature=1.0)
for h in break_ctrlc(MHSampler(h0, data)):
print h
print h.posterior_score, h.prior, h.likelihood, "\n"
# h.value.fullprint()
| gpl-3.0 |
Jgarcia-IAS/SITE | addons/l10n_hu/__openerp__.py | 320 | 1815 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 InnOpen Group Kft (<http://www.innopen.eu>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hungarian - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
Base module for Hungarian localization
==========================================
This module consists :
- Generic Hungarian chart of accounts
- Hungarian taxes
- Hungarian Bank information
""",
'author': 'InnOpen Group Kft',
'website': 'http://www.innopen.eu',
'license': 'AGPL-3',
'depends': ['account','account_chart'],
'data': [
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account.chart.template.csv',
'data/account.tax.template.csv',
'data/account.fiscal.position.template.csv',
'data/account.fiscal.position.tax.template.csv',
'data/res.bank.csv',
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
groschovskiy/lerigos_music | Server/API/lib/Crypto/SelfTest/Hash/test_RIPEMD.py | 116 | 2685 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/test_RIPEMD.py: Self-test for the RIPEMD-160 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
#"""Self-test suite for Crypto.Hash.RIPEMD"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# Test vectors downloaded 2008-09-12 from
# http://homes.esat.kuleuven.be/~bosselae/ripemd160.html
('9c1185a5c5e9fc54612808977ee8f548b2258d31', '', "'' (empty string)"),
('0bdc9d2d256b3ee9daae347be6f4dc835a467ffe', 'a'),
('8eb208f7e05d987a9b044a8e98c6b087f15a0bfc', 'abc'),
('5d0689ef49d2fae572b881b123a85ffa21595f36', 'message digest'),
('f71c27109c692c1b56bbdceb5b9d2865b3708dbc',
'abcdefghijklmnopqrstuvwxyz',
'a-z'),
('12a053384a9c0c88e405a06c27dcf49ada62eb2b',
'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq',
'abcdbcd...pnopq'),
('b0e20b6e3116640286ed3a87a5713079b21f5189',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'A-Z, a-z, 0-9'),
('9b752e45573d4b39f4dbd3323cab82bf63326bfb',
'1234567890' * 8,
"'1234567890' * 8"),
('52783243c1697bdbe16d37f97f68f08325dc1528',
'a' * 10**6,
'"a" * 10**6'),
]
def get_tests(config={}):
from Crypto.Hash import RIPEMD
from common import make_hash_tests
return make_hash_tests(RIPEMD, "RIPEMD", test_data,
digest_size=20,
oid="\x06\x05\x2b\x24\x03\02\x01")
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
gnu-sandhi/gnuradio | gr-wxgui/src/python/histo_window.py | 8 | 4971 | #
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
DEFAULT_WIN_SIZE = (600, 300)
##################################################
# histo window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter and histo sink.
"""
def __init__(self, parent):
"""
Create a new control panel.
@param parent the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = wx.BoxSizer(wx.VERTICAL)
SIZE = (100, -1)
control_box = forms.static_box_sizer(
parent=self, label='Options',
bold=True, orient=wx.VERTICAL,
)
#num bins
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Num Bins',
converter=forms.int_converter(),
ps=parent, key=NUM_BINS_KEY,
)
#frame size
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Frame Size',
converter=forms.int_converter(),
ps=parent, key=FRAME_SIZE_KEY,
)
#run/stop
control_box.AddStretchSpacer()
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# histo window with plotter and control panel
##################################################
class histo_window(wx.Panel, pubsub.pubsub):
def __init__(
self,
parent,
controller,
size,
title,
maximum_key,
minimum_key,
num_bins_key,
frame_size_key,
msg_key,
):
pubsub.pubsub.__init__(self)
#setup
self.samples = list()
#proxy the keys
self.proxy(MAXIMUM_KEY, controller, maximum_key)
self.proxy(MINIMUM_KEY, controller, minimum_key)
self.proxy(NUM_BINS_KEY, controller, num_bins_key)
self.proxy(FRAME_SIZE_KEY, controller, frame_size_key)
self.proxy(MSG_KEY, controller, msg_key)
#initialize values
self[RUNNING_KEY] = True
self[X_DIVS_KEY] = 8
self[Y_DIVS_KEY] = 4
#init panel and plot
wx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)
self.plotter = plotter.bar_plotter(self)
self.plotter.SetSize(wx.Size(*size))
self.plotter.SetSizeHints(*size)
self.plotter.set_title(title)
self.plotter.enable_point_label(True)
self.plotter.enable_grid_lines(False)
#setup the box with plot and controls
self.control_panel = control_panel(self)
main_box = wx.BoxSizer(wx.HORIZONTAL)
main_box.Add(self.plotter, 1, wx.EXPAND)
main_box.Add(self.control_panel, 0, wx.EXPAND)
self.SetSizerAndFit(main_box)
#register events
self.subscribe(MSG_KEY, self.handle_msg)
self.subscribe(X_DIVS_KEY, self.update_grid)
self.subscribe(Y_DIVS_KEY, self.update_grid)
def handle_msg(self, msg):
"""
Handle the message from the fft sink message queue.
@param msg the frame as a character array
"""
if not self[RUNNING_KEY]: return
#convert to floating point numbers
self.samples = 100*numpy.fromstring(msg, numpy.float32)[:self[NUM_BINS_KEY]] #only take first frame
self.plotter.set_bars(
bars=self.samples,
bar_width=0.6,
color_spec=(0, 0, 1),
)
self.update_grid()
def update_grid(self):
if not len(self.samples): return
#calculate the maximum y value
y_off = math.ceil(numpy.max(self.samples))
y_off = min(max(y_off, 1.0), 100.0) #between 1% and 100%
#update the x grid
self.plotter.set_x_grid(
self[MINIMUM_KEY], self[MAXIMUM_KEY],
common.get_clean_num((self[MAXIMUM_KEY] - self[MINIMUM_KEY])/self[X_DIVS_KEY]),
)
self.plotter.set_x_label('Counts')
#update the y grid
self.plotter.set_y_grid(0, y_off, y_off/self[Y_DIVS_KEY])
self.plotter.set_y_label('Frequency', '%')
self.plotter.update()
| gpl-3.0 |
auduny/home-assistant | tests/helpers/test_state.py | 4 | 8089 | """Test state helpers."""
import asyncio
from datetime import timedelta
import unittest
from unittest.mock import patch
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from homeassistant.const import (SERVICE_TURN_ON, SERVICE_TURN_OFF)
from homeassistant.util.async_ import run_coroutine_threadsafe
from homeassistant.util import dt as dt_util
from homeassistant.helpers import state
from homeassistant.const import (
STATE_OPEN, STATE_CLOSED,
STATE_LOCKED, STATE_UNLOCKED,
STATE_ON, STATE_OFF,
STATE_HOME, STATE_NOT_HOME)
from homeassistant.components.sun import (STATE_ABOVE_HORIZON,
STATE_BELOW_HORIZON)
from tests.common import get_test_home_assistant, mock_service
import pytest
@asyncio.coroutine
def test_async_track_states(hass):
"""Test AsyncTrackStates context manager."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=5)
point3 = point2 + timedelta(seconds=5)
with patch('homeassistant.core.dt_util.utcnow') as mock_utcnow:
mock_utcnow.return_value = point2
with state.AsyncTrackStates(hass) as states:
mock_utcnow.return_value = point1
hass.states.async_set('light.test', 'on')
mock_utcnow.return_value = point2
hass.states.async_set('light.test2', 'on')
state2 = hass.states.get('light.test2')
mock_utcnow.return_value = point3
hass.states.async_set('light.test3', 'on')
state3 = hass.states.get('light.test3')
assert [state2, state3] == \
sorted(states, key=lambda state: state.entity_id)
@asyncio.coroutine
def test_call_to_component(hass):
"""Test calls to components state reproduction functions."""
with patch(('homeassistant.components.media_player.'
'async_reproduce_states')) as media_player_fun:
media_player_fun.return_value = asyncio.Future()
media_player_fun.return_value.set_result(None)
with patch(('homeassistant.components.climate.'
'async_reproduce_states')) as climate_fun:
climate_fun.return_value = asyncio.Future()
climate_fun.return_value.set_result(None)
state_media_player = ha.State('media_player.test', 'bad')
state_climate = ha.State('climate.test', 'bad')
context = "dummy_context"
yield from state.async_reproduce_state(
hass,
[state_media_player, state_climate],
blocking=True,
context=context)
media_player_fun.assert_called_once_with(
hass,
[state_media_player],
context=context)
climate_fun.assert_called_once_with(
hass,
[state_climate],
context=context)
class TestStateHelpers(unittest.TestCase):
"""Test the Home Assistant event helpers."""
def setUp(self): # pylint: disable=invalid-name
"""Run when tests are started."""
self.hass = get_test_home_assistant()
run_coroutine_threadsafe(async_setup_component(
self.hass, 'homeassistant', {}), self.hass.loop).result()
def tearDown(self): # pylint: disable=invalid-name
"""Stop when tests are finished."""
self.hass.stop()
def test_get_changed_since(self):
"""Test get_changed_since."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=5)
point3 = point2 + timedelta(seconds=5)
with patch('homeassistant.core.dt_util.utcnow', return_value=point1):
self.hass.states.set('light.test', 'on')
state1 = self.hass.states.get('light.test')
with patch('homeassistant.core.dt_util.utcnow', return_value=point2):
self.hass.states.set('light.test2', 'on')
state2 = self.hass.states.get('light.test2')
with patch('homeassistant.core.dt_util.utcnow', return_value=point3):
self.hass.states.set('light.test3', 'on')
state3 = self.hass.states.get('light.test3')
assert [state2, state3] == \
state.get_changed_since([state1, state2, state3], point2)
def test_reproduce_with_no_entity(self):
"""Test reproduce_state with no entity."""
calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)
state.reproduce_state(self.hass, ha.State('light.test', 'on'))
self.hass.block_till_done()
assert len(calls) == 0
assert self.hass.states.get('light.test') is None
def test_reproduce_turn_on(self):
"""Test reproduce_state with SERVICE_TURN_ON."""
calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)
self.hass.states.set('light.test', 'off')
state.reproduce_state(self.hass, ha.State('light.test', 'on'))
self.hass.block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert 'light' == last_call.domain
assert SERVICE_TURN_ON == last_call.service
assert ['light.test'] == last_call.data.get('entity_id')
def test_reproduce_turn_off(self):
"""Test reproduce_state with SERVICE_TURN_OFF."""
calls = mock_service(self.hass, 'light', SERVICE_TURN_OFF)
self.hass.states.set('light.test', 'on')
state.reproduce_state(self.hass, ha.State('light.test', 'off'))
self.hass.block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert 'light' == last_call.domain
assert SERVICE_TURN_OFF == last_call.service
assert ['light.test'] == last_call.data.get('entity_id')
def test_reproduce_complex_data(self):
"""Test reproduce_state with complex service data."""
calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)
self.hass.states.set('light.test', 'off')
complex_data = ['hello', {'11': '22'}]
state.reproduce_state(self.hass, ha.State('light.test', 'on', {
'complex': complex_data
}))
self.hass.block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert 'light' == last_call.domain
assert SERVICE_TURN_ON == last_call.service
assert complex_data == last_call.data.get('complex')
def test_reproduce_bad_state(self):
"""Test reproduce_state with bad state."""
calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)
self.hass.states.set('light.test', 'off')
state.reproduce_state(self.hass, ha.State('light.test', 'bad'))
self.hass.block_till_done()
assert len(calls) == 0
assert 'off' == self.hass.states.get('light.test').state
def test_as_number_states(self):
"""Test state_as_number with states."""
zero_states = (STATE_OFF, STATE_CLOSED, STATE_UNLOCKED,
STATE_BELOW_HORIZON, STATE_NOT_HOME)
one_states = (STATE_ON, STATE_OPEN, STATE_LOCKED, STATE_ABOVE_HORIZON,
STATE_HOME)
for _state in zero_states:
assert 0 == state.state_as_number(
ha.State('domain.test', _state, {}))
for _state in one_states:
assert 1 == state.state_as_number(
ha.State('domain.test', _state, {}))
def test_as_number_coercion(self):
"""Test state_as_number with number."""
for _state in ('0', '0.0', 0, 0.0):
assert 0.0 == state.state_as_number(
ha.State('domain.test', _state, {}))
for _state in ('1', '1.0', 1, 1.0):
assert 1.0 == state.state_as_number(
ha.State('domain.test', _state, {}))
def test_as_number_invalid_cases(self):
"""Test state_as_number with invalid cases."""
for _state in ('', 'foo', 'foo.bar', None, False, True, object,
object()):
with pytest.raises(ValueError):
state.state_as_number(ha.State('domain.test', _state, {}))
| apache-2.0 |
ananthkamal/itu-idea | node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
anoobs/xen-api | java/applet_test.py | 17 | 2740 | #!/usr/bin/python
#
# Copyright (c) 2008-2011 Citrix Systems, Inc.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import cgitb; cgitb.enable()
import cgi, os
# Note: XenAPI.py is shipped as part of the SDK
# see http://community.citrix.com/display/xs/Download+SDKs
import XenAPI
def main():
print "Content-type: text/html\n"
form = cgi.FieldStorage()
session = None
if ("hostName" in form):
hostName = form.getfirst("hostName", "")
userName = form.getfirst("userName", "")
password = form.getfirst("password", "")
session = XenAPI.Session("http://%s/"%hostName)
session.xenapi.login_with_password(userName, password)
if ("vmId" in form):
vmId = form.getfirst('vmId')
vmRef = session.xenapi.VM.get_by_uuid(vmId)
con = session.xenapi.VM.get_consoles(vmRef)
url = session.xenapi.console.get_location(con[0])
print '<html><body><center><table><tr><td>'
print '<applet code="com/citrix/xenserver/console/Initialize.class"'
print ' archive="/XenServerConsole.jar"'
print ' width="800" height="600">'
print ' <PARAM NAME=SESSION VALUE="%s">'%session._session
print ' <PARAM NAME=URL VALUE="%s">'%url
print ' <PARAM NAME=USEURL VALUE="true">'
print '</applet>'
print '</td></tr></table></center></body></html>'
session.logout()
return
else:
# List the running VMs
vms = session.xenapi.VM.get_all()
for vm in vms:
if int(session.xenapi.VM.get_domid(vm)) > 0:
print '<a href="%s?hostName=%s&userName=%s&password=%s&vmId=%s"' \
'target="_new">Connect to %s</a><br>'% \
(os.environ['REQUEST_URI'],
hostName, userName, password,
session.xenapi.VM.get_uuid(vm),
session.xenapi.VM.get_name_label(vm))
session.logout()
print '<form method="POST" action="%s">'%os.environ['REQUEST_URI']
print '<p> Server: <input type="text" name="hostName"/>'
print '<p> Username: <input type="text" name="userName"/>'
print '<p> Password: <input type="password" name="password"/>'
print '<input type="submit" name="connect" value="List VMs"/><br>'
main()
| lgpl-2.1 |
s05427226/stm32-study | components/external/freetype/src/tools/chktrcmp.py | 192 | 3823 | #!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009, 2013
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
| gpl-2.0 |
viral98/AttendanceRecordingAndMonitoringSystem | RPICode.py | 1 | 1691 | import serial
import MySQLdb
import time
import datetime
db = MySQLdb.connect("DESKTOP-7PF3GNC", "viral", "viral", "student")
ldb= MySQLdb.connect("localhost", "classroom", "password", "timetable")
print "Connected"
CDay =datetime.date.today().strftime("%A");
curs=db.cursor()
curs2= ldb.cursor()
curs.execute("Select COUNT(*) from attendance")
srno = curs.fetchone()
sr_no = int(srno[0])
ser = serial.Serial('/dev/ttyUSB0',2400)
ser.baudrate = 9600
while True:
curs.execute("Select sub_code from timetable where Tday = %s and Stime < NOW() AND Etime > NOW()",(CDay))
SubCode= curs.fetchone()
int1 = SubCode[0]
string = ser.readline(12)
print "start"
if len(string) == 0:
print "Insert a tag"
else:
string = string[1:11]
if string == '5003BD0FE5':
curs.execute("Select lecture from subject where sub_code = %s",(int1))
lectno= curs.fetchone()
lect = lectno[0]
lect = lect + 1
curs.execute("""UPDATE student.subject set lecture = %s where sub_code = %s""",(lect,int1))
print "Welcome Teacher XYZ"
db.commit()
curs.execute("""UPDATE student.attendance SET datetime= NOW(), attend_sub_code = %s, status = 0 WHERE status IS null""",(int1))
query = "SELECT stud_id,rfid FROM student"
curs.execute(query)
for stud_id,rfid in curs.fetchall():
curs.execute("INSERT into attendance(sr_num,attend_stud_id,attend_rfid_id) values ({0},{1},'{2}')".format(sr_no, stud_id,rfid))
print(stud_id, rfid)
sr_no = sr_no + 1
db.commit()
curs.execute("""UPDATE student.attendance SET datetime= NOW(), attend_sub_code = %s, status = 1 WHERE attend_rfid_id = %s AND status IS null""",(int1,string))
print string
print SubCode
| apache-2.0 |
ntymtsiv/CloudFerry | make_filters/make_filters.py | 7 | 1972 | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import data_storage
import json
import yaml
import os
from cloudferrylib.utils import utils as utl
LOG = utl.get_log(__name__)
MIGRATE_VM_PREFIX = 'migrate_vm_'
def delete_relations():
LOG.info("started deleting old VM to hypervisor relations")
keys = data_storage.keys(MIGRATE_VM_PREFIX + '*')
data_storage.delete_batch(keys)
LOG.info("Relation deleting done. %s records was removed." % len(keys))
def check_filter_folder(filter_folder):
if not os.path.exists(filter_folder):
os.makedirs(filter_folder)
def make(filter_folder, images_date):
delete_relations()
check_filter_folder(filter_folder)
LOG.info("started creating filter files and all needed resources")
cursor = 0
while True:
step = data_storage.get("%s_source" % cursor)
if step is None:
break
cursor += 1
ids = []
for migrate in json.loads(step)['migrate']:
vm_id = migrate[0]
ids.append(vm_id)
data_storage.put(MIGRATE_VM_PREFIX + vm_id, migrate[1])
vm_filter = {'images': {'date': images_date},
'instances': {'id': ids}}
with file("%s/filter_%s.yaml" % (filter_folder, cursor), 'w') as \
filter_file:
filter_file.write(yaml.safe_dump(vm_filter))
LOG.info("Creating filter files done. %s filters was created." % cursor)
| apache-2.0 |
0dayZh/btsbots_exchange | languages/id.py | 148 | 11233 | # coding: utf8
{
'!langcode!': 'id',
'!langname!': 'Indonesian',
'%d days ago': '%d hari yang lalu',
'%d hours ago': '%d jam yang lalu',
'%d minutes ago': '%d menit yang lalu',
'%d months ago': '%d bulan yang lalu',
'%d seconds ago': '%d detik yang lalu',
'%d seconds from now': '%d detik dari sekarang',
'%d weeks ago': '%d minggu yang lalu',
'%d years ago': '%d tahun yang lalu',
'%s %%{row} deleted': '%s %%{row} dihapus',
'%s %%{row} updated': '%s %%{row} diperbarui',
'%s selected': '%s dipilih',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'(requires internet access, experimental)': '(membutuhkan akses internet, eksperimental)',
'(something like "it-it")': '(sesuatu seperti "it-it")',
'1 day ago': '1 hari yang lalu',
'1 hour ago': '1 jam yang lalu',
'1 minute ago': '1 menit yang lalu',
'1 month ago': '1 bulan yang lalu',
'1 second ago': '1 detik yang lalu',
'1 week ago': '1 minggu yang lalu',
'1 year ago': '1 tahun yang lalu',
'< Previous': '< Sebelumnya',
'About': 'Tentang',
'About application': 'Tentang Aplikasi',
'Add': 'Tambah',
'Additional code for your application': 'Tambahan kode untuk aplikasi Anda',
'Address': 'Alamat',
'Admin language': 'Bahasa Admin',
'administrative interface': 'antarmuka administrative',
'Administrator Password:': 'Administrator Kata Sandi:',
'Ajax Recipes': 'Resep Ajax',
'An error occured, please %s the page': 'Terjadi kesalahan, silakan %s halaman',
'And': 'Dan',
'and rename it:': 'dan memberi nama baru itu:',
'Answer': 'Jawaban',
'appadmin is disabled because insecure channel': 'AppAdmin dinonaktifkan karena kanal tidak aman',
'application "%s" uninstalled': 'applikasi "%s" dihapus',
'application compiled': 'aplikasi dikompilasi',
'Application name:': 'Nama Applikasi:',
'are not used yet': 'tidak digunakan lagi',
'Are you sure you want to delete this object?': 'Apakah Anda yakin ingin menghapus ini?',
'Are you sure you want to uninstall application "%s"?': 'Apakah Anda yakin ingin menghapus aplikasi "%s"?',
'Available Databases and Tables': 'Database dan Tabel yang tersedia',
'Back': 'Kembali',
'Buy this book': 'Beli buku ini',
'cache, errors and sessions cleaned': 'cache, kesalahan dan sesi dibersihkan',
'can be a git repo': 'bisa menjadi repo git',
'Cancel': 'Batalkan',
'Cannot be empty': 'Tidak boleh kosong',
'Change admin password': 'Ubah kata sandi admin',
'Change password': 'Ubah kata sandi',
'Check for upgrades': 'Periksa upgrade',
'Check to delete': 'Centang untuk menghapus',
'Checking for upgrades...': 'Memeriksa untuk upgrade...',
'Clean': 'Bersih',
'Clear': 'Hapus',
'Clear CACHE?': 'Hapus CACHE?',
'Clear DISK': 'Hapus DISK',
'Clear RAM': 'Hapus RAM',
'Click row to expand traceback': 'Klik baris untuk memperluas traceback',
'Close': 'Tutup',
'collapse/expand all': 'kempis / memperluas semua',
'Community': 'Komunitas',
'Compile': 'Kompilasi',
'compiled application removed': 'aplikasi yang dikompilasi dihapus',
'Components and Plugins': 'Komponen dan Plugin',
'contains': 'mengandung',
'Controllers': 'Kontrolir',
'controllers': 'kontrolir',
'Copyright': 'Hak Cipta',
'Count': 'Hitung',
'Create': 'Buat',
'create file with filename:': 'buat file dengan nama:',
'created by': 'dibuat oleh',
'CSV (hidden cols)': 'CSV (kolom tersembunyi)',
'currently running': 'sedang berjalan',
'data uploaded': 'data diunggah',
'Database %s select': 'Memilih Database %s',
'database administration': 'administrasi database',
'defines tables': 'mendefinisikan tabel',
'Delete': 'Hapus',
'delete all checked': 'menghapus semua yang di centang',
'Delete this file (you will be asked to confirm deletion)': 'Hapus file ini (Anda akan diminta untuk mengkonfirmasi penghapusan)',
'Delete:': 'Hapus:',
'Description': 'Keterangan',
'design': 'disain',
'direction: ltr': 'petunjuk: ltr',
'Disk Cleared': 'Disk Dihapus',
'Documentation': 'Dokumentasi',
"Don't know what to do?": 'Tidak tahu apa yang harus dilakukan?',
'done!': 'selesai!',
'Download': 'Unduh',
'Download .w2p': 'Unduh .w2p',
'download layouts': 'unduh layouts',
'download plugins': 'unduh plugins',
'Duration': 'Durasi',
'Edit': 'Mengedit',
'Edit application': 'Mengedit Aplikasi',
'Email sent': 'Email dikirim',
'enter a valid email address': 'masukkan alamat email yang benar',
'enter a valid URL': 'masukkan URL yang benar',
'enter a value': 'masukkan data',
'Error': 'Kesalahan',
'Error logs for "%(app)s"': 'Catatan kesalahan untuk "%(app)s"',
'Errors': 'Kesalahan',
'export as csv file': 'ekspor sebagai file csv',
'Export:': 'Ekspor:',
'exposes': 'menghadapkan',
'extends': 'meluaskan',
'filter': 'menyaring',
'First Name': 'Nama Depan',
'Forgot username?': 'Lupa nama pengguna?',
'Free Applications': 'Aplikasi Gratis',
'Gender': 'Jenis Kelamin',
'Group %(group_id)s created': 'Grup %(group_id)s dibuat',
'Group uniquely assigned to user %(id)s': 'Grup unik yang diberikan kepada pengguna %(id)s',
'Groups': 'Grup',
'Guest': 'Tamu',
'Hello World': 'Halo Dunia',
'Help': 'Bantuan',
'Home': 'Halaman Utama',
'How did you get here?': 'Bagaimana kamu bisa di sini?',
'Image': 'Gambar',
'import': 'impor',
'Import/Export': 'Impor/Ekspor',
'includes': 'termasuk',
'Install': 'Memasang',
'Installation': 'Instalasi',
'Installed applications': 'Aplikasi yang diinstal',
'Introduction': 'Pengenalan',
'Invalid email': 'Email tidak benar',
'Language': 'Bahasa',
'languages': 'bahasa',
'Languages': 'Bahasa',
'Last Name': 'Nama Belakang',
'License for': 'Lisensi untuk',
'loading...': 'sedang memuat...',
'Logged in': 'Masuk',
'Logged out': 'Keluar',
'Login': 'Masuk',
'Login to the Administrative Interface': 'Masuk ke antarmuka Administrasi',
'Logout': 'Keluar',
'Lost Password': 'Lupa Kata Sandi',
'Lost password?': 'Lupa kata sandi?',
'Maintenance': 'Pemeliharaan',
'Manage': 'Mengelola',
'Manage Cache': 'Mengelola Cache',
'models': 'model',
'Models': 'Model',
'Modules': 'Modul',
'modules': 'modul',
'My Sites': 'Situs Saya',
'New': 'Baru',
'new application "%s" created': 'aplikasi baru "%s" dibuat',
'New password': 'Kata sandi baru',
'New simple application': 'Aplikasi baru sederhana',
'News': 'Berita',
'next 100 rows': '100 baris berikutnya',
'Next >': 'Berikutnya >',
'Next Page': 'Halaman Berikutnya',
'No databases in this application': 'Tidak ada database dalam aplikasi ini',
'No ticket_storage.txt found under /private folder': 'Tidak ditemukan ticket_storage.txt dalam folder /private',
'not a Zip Code': 'bukan Kode Pos',
'Note': 'Catatan',
'Old password': 'Kata sandi lama',
'Online examples': 'Contoh Online',
'Or': 'Atau',
'or alternatively': 'atau alternatif',
'Or Get from URL:': 'Atau Dapatkan dari URL:',
'or import from csv file': 'atau impor dari file csv',
'Other Plugins': 'Plugin Lainnya',
'Other Recipes': 'Resep Lainnya',
'Overview': 'Ikhtisar',
'Overwrite installed app': 'Ikhtisar app yang terinstall',
'Pack all': 'Pak semua',
'Pack compiled': 'Pak yang telah dikompilasi',
'Pack custom': 'Pak secara kustomisasi',
'Password': 'Kata sandi',
'Password changed': 'Kata sandi berubah',
"Password fields don't match": 'Kata sandi tidak sama',
'please input your password again': 'silahkan masukan kata sandi anda lagi',
'plugins': 'plugin',
'Plugins': 'Plugin',
'Plural-Forms:': 'Bentuk-Jamak:',
'Powered by': 'Didukung oleh',
'Preface': 'Pendahuluan',
'previous 100 rows': '100 baris sebelumnya',
'Previous Page': 'Halaman Sebelumnya',
'private files': 'file pribadi',
'Private files': 'File pribadi',
'Profile': 'Profil',
'Profile updated': 'Profil diperbarui',
'Project Progress': 'Perkembangan Proyek',
'Quick Examples': 'Contoh Cepat',
'Ram Cleared': 'Ram Dihapus',
'Recipes': 'Resep',
'Register': 'Daftar',
'Registration successful': 'Pendaftaran berhasil',
'reload': 'memuat kembali',
'Reload routes': 'Memuat rute kembali',
'Remember me (for 30 days)': 'Ingat saya (selama 30 hari)',
'Remove compiled': 'Hapus Kompilasi',
'Request reset password': 'Meminta reset kata sandi',
'Rows in Table': 'Baris dalam Tabel',
'Rows selected': 'Baris dipilih',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Jalankan tes di file ini (untuk menjalankan semua file, Anda juga dapat menggunakan tombol berlabel 'test')",
'Running on %s': 'Berjalan di %s',
'Save model as...': 'Simpan model sebagai ...',
'Save profile': 'Simpan profil',
'Search': 'Cari',
'Select Files to Package': 'Pilih Berkas untuk Paket',
'Send Email': 'Kirim Email',
'Service': 'Layanan',
'Site': 'Situs',
'Size of cache:': 'Ukuran cache:',
'starts with': 'dimulai dengan',
'static': 'statis',
'Static': 'Statis',
'Statistics': 'Statistik',
'Support': 'Mendukung',
'Table': 'Tabel',
'test': 'tes',
'The application logic, each URL path is mapped in one exposed function in the controller': 'Logika aplikasi, setiap jalur URL dipetakan dalam satu fungsi terpapar di kontrolir',
'The data representation, define database tables and sets': 'Representasi data, mendefinisikan tabel database dan set',
'There are no plugins': 'Tidak ada plugin',
'There are no private files': 'Tidak ada file pribadi',
'These files are not served, they are only available from within your app': 'File-file ini tidak dilayani, mereka hanya tersedia dari dalam aplikasi Anda',
'These files are served without processing, your images go here': 'File-file ini disajikan tanpa pengolahan, gambar Anda di sini',
'This App': 'App Ini',
'Time in Cache (h:m:s)': 'Waktu di Cache (h: m: s)',
'To create a plugin, name a file/folder plugin_[name]': 'Untuk membuat sebuah plugin, nama file / folder plugin_ [nama]',
'too short': 'terlalu pendek',
'Translation strings for the application': 'Terjemahan string untuk aplikasi',
'Try the mobile interface': 'Coba antarmuka ponsel',
'Unable to download because:': 'Tidak dapat mengunduh karena:',
'unable to parse csv file': 'tidak mampu mengurai file csv',
'update all languages': 'memperbarui semua bahasa',
'Update:': 'Perbarui:',
'Upload': 'Unggah',
'Upload a package:': 'Unggah sebuah paket:',
'Upload and install packed application': 'Upload dan pasang aplikasi yang dikemas',
'upload file:': 'unggah file:',
'upload plugin file:': 'unggah file plugin:',
'User %(id)s Logged-in': 'Pengguna %(id)s Masuk',
'User %(id)s Logged-out': 'Pengguna %(id)s Keluar',
'User %(id)s Password changed': 'Pengguna %(id)s Kata Sandi berubah',
'User %(id)s Password reset': 'Pengguna %(id)s Kata Sandi telah direset',
'User %(id)s Profile updated': 'Pengguna %(id)s Profil diperbarui',
'User %(id)s Registered': 'Pengguna %(id)s Terdaftar',
'value already in database or empty': 'data sudah ada dalam database atau kosong',
'value not allowed': 'data tidak benar',
'value not in database': 'data tidak ada dalam database',
'Verify Password': 'Verifikasi Kata Sandi',
'Version': 'Versi',
'View': 'Lihat',
'Views': 'Lihat',
'views': 'lihat',
'Web Framework': 'Kerangka Web',
'web2py is up to date': 'web2py terbaru',
'web2py Recent Tweets': 'Tweet web2py terbaru',
'Website': 'Situs Web',
'Welcome': 'Selamat Datang',
'Welcome to web2py!': 'Selamat Datang di web2py!',
'You are successfully running web2py': 'Anda berhasil menjalankan web2py',
'You can modify this application and adapt it to your needs': 'Anda dapat memodifikasi aplikasi ini dan menyesuaikan dengan kebutuhan Anda',
'You visited the url %s': 'Anda mengunjungi url %s',
}
| gpl-2.0 |
mydongistiny/gcc | contrib/gen_autofdo_event.py | 6 | 4877 | #!/usr/bin/python
# Generate Intel taken branches Linux perf event script for autofdo profiling.
# Copyright (C) 2016 Free Software Foundation, Inc.
#
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>. */
# Run it with perf record -b -e EVENT program ...
# The Linux Kernel needs to support the PMU of the current CPU, and
# It will likely not work in VMs.
# Add --all to print for all cpus, otherwise for current cpu.
# Add --script to generate shell script to run correct event.
#
# Requires internet (https) access. This may require setting up a proxy
# with export https_proxy=...
#
import urllib2
import sys
import json
import argparse
import collections
baseurl = "https://download.01.org/perfmon"
target_events = (u'BR_INST_RETIRED.NEAR_TAKEN',
u'BR_INST_EXEC.TAKEN',
u'BR_INST_RETIRED.TAKEN_JCC',
u'BR_INST_TYPE_RETIRED.COND_TAKEN')
ap = argparse.ArgumentParser()
ap.add_argument('--all', '-a', help='Print for all CPUs', action='store_true')
ap.add_argument('--script', help='Generate shell script', action='store_true')
args = ap.parse_args()
eventmap = collections.defaultdict(list)
def get_cpu_str():
with open('/proc/cpuinfo', 'r') as c:
vendor, fam, model = None, None, None
for j in c:
n = j.split()
if n[0] == 'vendor_id':
vendor = n[2]
elif n[0] == 'model' and n[1] == ':':
model = int(n[2])
elif n[0] == 'cpu' and n[1] == 'family':
fam = int(n[3])
if vendor and fam and model:
return "%s-%d-%X" % (vendor, fam, model), model
return None, None
def find_event(eventurl, model):
print >>sys.stderr, "Downloading", eventurl
u = urllib2.urlopen(eventurl)
events = json.loads(u.read())
u.close()
found = 0
for j in events:
if j[u'EventName'] in target_events:
event = "cpu/event=%s,umask=%s/" % (j[u'EventCode'], j[u'UMask'])
if u'PEBS' in j and j[u'PEBS'] > 0:
event += "p"
if args.script:
eventmap[event].append(model)
else:
print j[u'EventName'], "event for model", model, "is", event
found += 1
return found
if not args.all:
cpu, model = get_cpu_str()
if not cpu:
sys.exit("Unknown CPU type")
url = baseurl + "/mapfile.csv"
print >>sys.stderr, "Downloading", url
u = urllib2.urlopen(url)
found = 0
cpufound = 0
for j in u:
n = j.rstrip().split(',')
if len(n) >= 4 and (args.all or n[0] == cpu) and n[3] == "core":
if args.all:
vendor, fam, model = n[0].split("-")
model = int(model, 16)
cpufound += 1
found += find_event(baseurl + n[2], model)
u.close()
if args.script:
print '''#!/bin/sh
# Profile workload for gcc profile feedback (autofdo) using Linux perf.
# Auto generated. To regenerate for new CPUs run
# contrib/gen_autofdo_event.py --shell --all in gcc source
# usages:
# gcc-auto-profile program (profile program and children)
# gcc-auto-profile -a sleep X (profile all for X secs, may need root)
# gcc-auto-profile -p PID sleep X (profile PID)
# gcc-auto-profile --kernel -a sleep X (profile kernel)
# gcc-auto-profile --all -a sleep X (profile kernel and user space)
# Identify branches taken event for CPU.
#
FLAGS=u
if [ "$1" = "--kernel" ] ; then
FLAGS=k
shift
fi
if [ "$1" = "--all" ] ; then
FLAGS=uk
shift
fi
if ! grep -q Intel /proc/cpuinfo ; then
echo >&2 "Only Intel CPUs supported"
exit 1
fi
if grep -q hypervisor /proc/cpuinfo ; then
echo >&2 "Warning: branch profiling may not be functional in VMs"
fi
case `egrep -q "^cpu family\s*: 6" /proc/cpuinfo &&
egrep "^model\s*:" /proc/cpuinfo | head -n1` in'''
for event, mod in eventmap.iteritems():
for m in mod[:-1]:
print "model*:\ %s|\\" % m
print 'model*:\ %s) E="%s$FLAGS" ;;' % (mod[-1], event)
print '''*)
echo >&2 "Unknown CPU. Run contrib/gen_autofdo_event.py --all --script to update script."
exit 1 ;;'''
print "esac"
print 'exec perf record -e $E -b "$@"'
if cpufound == 0 and not args.all:
sys.exit('CPU %s not found' % cpu)
if found == 0:
sys.exit('Branch event not found')
| gpl-2.0 |
r-icarus/openstack_microserver | openstack_dashboard/dashboards/project/networks/subnets/tabs.py | 6 | 1583 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = "project/networks/subnets/_detail_overview.html"
def get_context_data(self, request):
subnet_id = self.tab_group.kwargs['subnet_id']
try:
subnet = api.neutron.subnet_get(self.request, subnet_id)
except Exception:
redirect = reverse('horizon:project:networks:index')
msg = _('Unable to retrieve subnet details.')
exceptions.handle(request, msg, redirect=redirect)
return {'subnet': subnet}
class SubnetDetailTabs(tabs.TabGroup):
slug = "subnet_details"
tabs = (OverviewTab,)
| apache-2.0 |
schlueter/ansible | lib/ansible/modules/network/eos/eos_eapi.py | 6 | 15097 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_eapi
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage and configure Arista EOS eAPI.
requirements:
- "EOS v4.12 or greater"
description:
- Use to enable or disable eAPI access, and set the port and state
of http, https, local_http and unix-socket servers.
- When enabling eAPI access the default is to enable HTTP on port
80, enable HTTPS on port 443, disable local HTTP, and disable
Unix socket server. Use the options listed below to override the
default configuration.
- Requires EOS v4.12 or greater.
extends_documentation_fragment: eos
options:
http:
description:
- The C(http) argument controls the operating state of the HTTP
transport protocol when eAPI is present in the running-config.
When the value is set to True, the HTTP protocol is enabled and
when the value is set to False, the HTTP protocol is disabled.
By default, when eAPI is first configured, the HTTP protocol is
disabled.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_http']
http_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 80
https:
description:
- The C(https) argument controls the operating state of the HTTPS
transport protocol when eAPI is present in the running-config.
When the value is set to True, the HTTPS protocol is enabled and
when the value is set to False, the HTTPS protocol is disabled.
By default, when eAPI is first configured, the HTTPS protocol is
enabled.
required: false
default: yes
choices: ['yes', 'no']
aliases: ['enable_https']
https_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 443
local_http:
description:
- The C(local_http) argument controls the operating state of the
local HTTP transport protocol when eAPI is present in the
running-config. When the value is set to True, the HTTP protocol
is enabled and restricted to connections from localhost only. When
the value is set to False, the HTTP local protocol is disabled.
- Note is value is independent of the C(http) argument
required: false
default: false
choices: ['yes', 'no']
aliases: ['enable_local_http']
local_http_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 8080
socket:
description:
- The C(socket) argument controls the operating state of the UNIX
Domain Socket used to receive eAPI requests. When the value
of this argument is set to True, the UDS will listen for eAPI
requests. When the value is set to False, the UDS will not be
available to handle requests. By default when eAPI is first
configured, the UDS is disabled.
required: false
default: false
choices: ['yes', 'no']
aliases: ['enable_socket']
vrf:
description:
- The C(vrf) argument will configure eAPI to listen for connections
in the specified VRF. By default, eAPI transports will listen
for connections in the global table. This value requires the
VRF to already be created otherwise the task will fail.
required: false
default: default
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: nul
version_added: "2.2"
state:
description:
- The C(state) argument controls the operational state of eAPI
on the remote device. When this argument is set to C(started),
eAPI is enabled to receive requests and when this argument is
C(stopped), eAPI is disabled and will not receive requests.
required: false
default: started
choices: ['started', 'stopped']
"""
EXAMPLES = """
- name: Enable eAPI access with default configuration
eos_eapi:
state: started
- name: Enable eAPI with no HTTP, HTTPS at port 9443, local HTTP at port 80, and socket enabled
eos_eapi:
state: started
http: false
https_port: 9443
local_http: yes
local_http_port: 80
socket: yes
- name: Shutdown eAPI access
eos_eapi:
state: stopped
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- management api http-commands
- protocol http port 81
- no protocol https
urls:
description: Hash of URL endpoints eAPI is listening on per interface
returned: when eAPI is started
type: dict
sample: {'Management1': ['http://172.26.10.1:80']}
session_name:
description: The EOS config session name used to load the configuration
returned: when changed is True
type: str
sample: ansible_1479315771
"""
import re
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.eos.eos import run_commands, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def check_transport(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
if 'eapi' in (transport, provider_transport):
module.fail_json(msg='eos_eapi module is only supported over cli transport')
def validate_http_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
def validate_https_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
def validate_local_http_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
def validate_vrf(value, module):
out = run_commands(module, ['show vrf'])
configured_vrfs = []
lines = out[0].strip().splitlines()[3:]
for l in lines:
if not l:
continue
splitted_line = re.split(r'\s{2,}', l.strip())
if len(splitted_line) > 2:
configured_vrfs.append(splitted_line[0])
configured_vrfs.append('default')
if value not in configured_vrfs:
module.fail_json(msg='vrf `%s` is not configured on the system' % value)
def map_obj_to_commands(updates, module, warnings):
commands = list()
want, have = updates
def needs_update(x):
return want.get(x) is not None and (want.get(x) != have.get(x))
def add(cmd):
if 'management api http-commands' not in commands:
commands.insert(0, 'management api http-commands')
commands.append(cmd)
if any((needs_update('http'), needs_update('http_port'))):
if want['http'] is False:
add('no protocol http')
else:
if have['http'] is False and want['http'] in (False, None):
warnings.append('protocol http is not enabled, not configuring http port value')
else:
port = want['http_port'] or 80
add('protocol http port %s' % port)
if any((needs_update('https'), needs_update('https_port'))):
if want['https'] is False:
add('no protocol https')
else:
if have['https'] is False and want['https'] in (False, None):
warnings.append('protocol https is not enabled, not configuring https port value')
else:
port = want['https_port'] or 443
add('protocol https port %s' % port)
if any((needs_update('local_http'), needs_update('local_http_port'))):
if want['local_http'] is False:
add('no protocol http localhost')
else:
if have['local_http'] is False and want['local_http'] in (False, None):
warnings.append('protocol local_http is not enabled, not configuring local_http port value')
else:
port = want['local_http_port'] or 8080
add('protocol http localhost port %s' % port)
if any((needs_update('socket'), needs_update('socket'))):
if want['socket'] is False:
add('no protocol unix-socket')
else:
add('protocol unix-socket')
if needs_update('state') and not needs_update('vrf'):
if want['state'] == 'stopped':
add('shutdown')
elif want['state'] == 'started':
add('no shutdown')
if needs_update('vrf'):
add('vrf %s' % want['vrf'])
# switching operational vrfs here
# need to add the desired state as well
if want['state'] == 'stopped':
add('shutdown')
elif want['state'] == 'started':
add('no shutdown')
return commands
def parse_state(data):
if data[0]['enabled']:
return 'started'
else:
return 'stopped'
def map_config_to_obj(module):
out = run_commands(module, ['show management api http-commands | json'])
return {
'http': out[0]['httpServer']['configured'],
'http_port': out[0]['httpServer']['port'],
'https': out[0]['httpsServer']['configured'],
'https_port': out[0]['httpsServer']['port'],
'local_http': out[0]['localHttpServer']['configured'],
'local_http_port': out[0]['localHttpServer']['port'],
'socket': out[0]['unixSocketServer']['configured'],
'vrf': out[0]['vrf'],
'state': parse_state(out)
}
def map_params_to_obj(module):
obj = {
'http': module.params['http'],
'http_port': module.params['http_port'],
'https': module.params['https'],
'https_port': module.params['https_port'],
'local_http': module.params['local_http'],
'local_http_port': module.params['local_http_port'],
'socket': module.params['socket'],
'vrf': module.params['vrf'],
'state': module.params['state']
}
for key, value in iteritems(obj):
if value:
validator = globals().get('validate_%s' % key)
if validator:
validator(value, module)
return obj
def verify_state(updates, module):
want, have = updates
invalid_state = [('http', 'httpServer'),
('https', 'httpsServer'),
('local_http', 'localHttpServer'),
('socket', 'unixSocketServer')]
timeout = module.params['timeout'] or 30
state = module.params['state']
while invalid_state:
out = run_commands(module, ['show management api http-commands | json'])
for index, item in enumerate(invalid_state):
want_key, eapi_key = item
if want[want_key] is not None:
if want[want_key] == out[0][eapi_key]['running']:
del invalid_state[index]
elif state == 'stopped':
if not out[0][eapi_key]['running']:
del invalid_state[index]
else:
del invalid_state[index]
time.sleep(1)
timeout -= 1
if timeout == 0:
module.fail_json(msg='timeout expired before eapi running state changed')
def collect_facts(module, result):
out = run_commands(module, ['show management api http-commands | json'])
facts = dict(eos_eapi_urls=dict())
for each in out[0]['urls']:
intf, url = each.split(' : ')
key = str(intf).strip()
if key not in facts['eos_eapi_urls']:
facts['eos_eapi_urls'][key] = list()
facts['eos_eapi_urls'][key].append(str(url).strip())
result['ansible_facts'] = facts
def main():
""" main entry point for module execution
"""
argument_spec = dict(
http=dict(aliases=['enable_http'], type='bool'),
http_port=dict(type='int'),
https=dict(aliases=['enable_https'], type='bool'),
https_port=dict(type='int'),
local_http=dict(aliases=['enable_local_http'], type='bool'),
local_http_port=dict(type='int'),
socket=dict(aliases=['enable_socket'], type='bool'),
vrf=dict(default='default'),
config=dict(),
state=dict(default='started', choices=['stopped', 'started']),
)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
check_transport(module)
result = {'changed': False}
warnings = list()
if module.params['config']:
warnings.append('config parameter is no longer necessary and will be ignored')
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module, warnings)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
if result['changed']:
verify_state((want, have), module)
collect_facts(module, result)
if warnings:
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
JamesMura/sentry | src/sentry/plugins/interfaces/releasehook.py | 1 | 2926 | """
sentry.plugins.base.structs
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ['ReleaseHook']
from django.db import IntegrityError, transaction
from django.utils import timezone
from sentry.models import Activity, Release
class ReleaseHook(object):
def __init__(self, project):
self.project = project
def start_release(self, version, **values):
values.setdefault('date_started', timezone.now())
try:
with transaction.atomic():
release = Release.objects.create(
version=version,
organization_id=self.project.organization_id,
**values
)
except IntegrityError:
release = Release.objects.get(
version=version,
organization_id=self.project.organization_id,
)
release.update(**values)
release.add_project(self.project)
# TODO(dcramer): this is being used by the release details endpoint, but
# it'd be ideal if most if not all of this logic lived there, and this
# hook simply called out to the endpoint
def set_commits(self, version, commit_list):
"""
Commits should be ordered oldest to newest.
Calling this method will remove all existing commit history.
"""
project = self.project
try:
with transaction.atomic():
release = Release.objects.create(
organization_id=project.organization_id,
version=version
)
except IntegrityError:
release = Release.objects.get(
organization_id=project.organization_id,
version=version
)
release.add_project(project)
release.set_commits(commit_list)
def finish_release(self, version, **values):
values.setdefault('date_released', timezone.now())
try:
with transaction.atomic():
release = Release.objects.create(
version=version,
organization_id=self.project.organization_id,
**values
)
except IntegrityError:
release = Release.objects.get(
version=version,
organization_id=self.project.organization_id,
)
release.update(**values)
release.add_project(self.project)
Activity.objects.create(
type=Activity.RELEASE,
project=self.project,
ident=version,
data={'version': version},
datetime=values['date_released'],
)
def handle(self, request):
raise NotImplementedError
| bsd-3-clause |
tigawa/proofreadingchecker | vendor/bundle/ruby/1.9.1/gems/libv8-3.16.14.3/vendor/v8/tools/testrunner/local/commands.py | 22 | 5009 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import signal
import subprocess
import sys
import tempfile
import time
from ..local import utils
from ..objects import output
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = \
ctypes.windll.kernel32.SetErrorMode(mode) #@UndefinedVariable
except ImportError:
pass
return prev_error_mode
def RunProcess(verbose, timeout, args, **rest):
if verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
popen_args = subprocess.list2cmdline(args)
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
process = subprocess.Popen(
shell=utils.IsWindows(),
args=popen_args,
**rest
)
if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
try:
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (exit_code, timed_out)
except KeyboardInterrupt:
raise
def PrintError(string):
sys.stderr.write(string)
sys.stderr.write("\n")
def CheckedUnlink(name):
# On Windows, when run with -jN in parallel processes,
# OS often fails to unlink the temp file. Not sure why.
# Need to retry.
# Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
retry_count = 0
while retry_count < 30:
try:
os.unlink(name)
return
except OSError, e:
retry_count += 1
time.sleep(retry_count * 0.1)
PrintError("os.unlink() " + str(e))
def Execute(args, verbose=False, timeout=None):
args = [ c for c in args if c != "" ]
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
try:
(exit_code, timed_out) = RunProcess(
verbose,
timeout,
args=args,
stdout=fd_out,
stderr=fd_err
)
except:
raise
os.close(fd_out)
os.close(fd_err)
out = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return output.Output(exit_code, timed_out, out, errors)
| apache-2.0 |
hlin117/statsmodels | statsmodels/iolib/summary.py | 22 | 33071 | from statsmodels.compat.python import range, lrange, lmap, lzip, zip_longest
import numpy as np
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.tableformatting import (gen_fmt, fmt_2,
fmt_params, fmt_base, fmt_2cols)
#from statsmodels.iolib.summary2d import summary_params_2dflat
#from summary2d import summary_params_2dflat
def forg(x, prec=3):
if prec == 3:
#for 3 decimals
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%9.3g' % x
else:
return '%9.3f' % x
elif prec == 4:
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%10.4g' % x
else:
return '%10.4f' % x
else:
raise NotImplementedError
def summary(self, yname=None, xname=None, title=0, alpha=.05,
returns='text', model_info=None):
"""
Parameters
-----------
yname : string
optional, Default is `Y`
xname : list of strings
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : string
optional, Defualt is 'Generalized linear model'
returns : string
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Default :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist.
"""
import time as time
#TODO Make sure all self.model.__class__.__name__ are listed
model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'GLM' : 'Generalized linear model'
}
model_methods = {'OLS' : 'Least Squares',
'GLS' : 'Least Squares',
'GLSAR' : 'Least Squares',
'WLS' : 'Least Squares',
'RLM' : '?',
'GLM' : '?'
}
if title==0:
title = model_types[self.model.__class__.__name__]
if yname is None:
try:
yname = self.model.endog_names
except AttributeError:
yname = 'y'
if xname is None:
try:
xname = self.model.exog_names
except AttributeError:
xname = ['var_%d' % i for i in range(len(self.params))]
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
modeltype = self.model.__class__.__name__
#dist_family = self.model.family.__class__.__name__
nobs = self.nobs
df_model = self.df_model
df_resid = self.df_resid
#General part of the summary table, Applicable to all? models
#------------------------------------------------------------
#TODO: define this generically, overwrite in model classes
#replace definition of stubs data by single list
#e.g.
gen_left = [('Model type:', [modeltype]),
('Date:', [date]),
('Dependent Variable:', yname), #What happens with multiple names?
('df model', [df_model])
]
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_title = title
gen_header = None
## gen_stubs_left = ('Model type:',
## 'Date:',
## 'Dependent Variable:',
## 'df model'
## )
## gen_data_left = [[modeltype],
## [date],
## yname, #What happens with multiple names?
## [df_model]
## ]
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = gen_fmt
)
gen_stubs_right = ('Method:',
'Time:',
'Number of Obs:',
'df resid'
)
gen_data_right = ([modeltype], #was dist family need to look at more
time_of_day,
[nobs],
[df_resid]
)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = gen_fmt
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
tstats = {'OLS' : self.t(),
'GLS' : self.t(),
'GLSAR' : self.t(),
'WLS' : self.t(),
'RLM' : self.t(),
'GLM' : self.t()
}
prob_stats = {'OLS' : self.pvalues,
'GLS' : self.pvalues,
'GLSAR' : self.pvalues,
'WLS' : self.pvalues,
'RLM' : self.pvalues,
'GLM' : self.pvalues
}
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
param_header = {
'OLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLSAR' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'WLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLM' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'], #glm uses t-distribution
'RLM' : ['coef', 'std err', 'z', 'P>|z|', alp + ' Conf. Interval'] #checke z
}
params_stubs = xname
params = self.params
conf_int = self.conf_int(alpha)
std_err = self.bse
exog_len = lrange(len(xname))
tstat = tstats[modeltype]
prob_stat = prob_stats[modeltype]
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.4g" % (params[i]) for i in exog_len],
["%#6.4f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["(%#5g, %#5g)" % tuple(conf_int[i]) for i in \
exog_len]
)
parameter_table = SimpleTable(params_data,
param_header[modeltype],
params_stubs,
title = None,
txt_fmt = fmt_2, #gen_fmt,
)
#special table
#-------------
#TODO: exists in linear_model, what about other models
#residual diagnostics
#output options
#--------------
#TODO: JP the rest needs to be fixed, similar to summary in linear_model
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table
def ols_to_csv():
"""
exports ols summary data to csv
"""
pass
def glm_printer():
table = str(general_table)+'\n'+str(parameter_table)
return table
pass
printers = {'OLS': ols_printer,
'GLM' : glm_printer
}
if returns=='print':
try:
return printers[modeltype]()
except KeyError:
return printers['OLS']()
def _getnames(self, yname=None, xname=None):
'''extract names from model or construct names
'''
if yname is None:
if hasattr(self.model, 'endog_names') and (
not self.model.endog_names is None):
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if hasattr(self.model, 'exog_names') and (
not self.model.exog_names is None):
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname
def summary_top(results, title=None, gleft=None, gright=None, yname=None, xname=None):
'''generate top table(s)
TODO: this still uses predefined model_methods
? allow gleft, gright to be 1 element tuples instead of filling with None?
'''
#change of names ?
gen_left, gen_right = gleft, gright
#time and names are always included
import time
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
yname, xname = _getnames(results, yname=yname, xname=xname)
#create dictionary with default
#use lambdas because some values raise exception if they are not available
#alternate spellings are commented out to force unique labels
default_items = dict([
('Dependent Variable:', lambda: [yname]),
('Dep. Variable:', lambda: [yname]),
('Model:', lambda: [results.model.__class__.__name__]),
#('Model type:', lambda: [results.model.__class__.__name__]),
('Date:', lambda: [date]),
('Time:', lambda: time_of_day),
('Number of Obs:', lambda: [results.nobs]),
#('No. of Observations:', lambda: ["%#6d" % results.nobs]),
('No. Observations:', lambda: ["%#6d" % results.nobs]),
#('Df model:', lambda: [results.df_model]),
('Df Model:', lambda: ["%#6d" % results.df_model]),
#TODO: check when we have non-integer df
('Df Residuals:', lambda: ["%#6d" % results.df_resid]),
#('Df resid:', lambda: [results.df_resid]),
#('df resid:', lambda: [results.df_resid]), #check capitalization
('Log-Likelihood:', lambda: ["%#8.5g" % results.llf]) #doesn't exist for RLM - exception
#('Method:', lambda: [???]), #no default for this
])
if title is None:
title = results.model.__class__.__name__ + 'Regression Results'
if gen_left is None:
#default: General part of the summary table, Applicable to all? models
gen_left = [('Dep. Variable:', None),
('Model type:', None),
('Date:', None),
('No. Observations:', None),
('Df model:', None),
('Df resid:', None)]
try:
llf = results.llf
gen_left.append(('Log-Likelihood', None))
except: #AttributeError, NotImplementedError
pass
gen_right = []
gen_title = title
gen_header = None
#needed_values = [k for k,v in gleft + gright if v is None] #not used anymore
#replace missing (None) values with default values
gen_left_ = []
for item, value in gen_left:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_left_.append((item, value))
gen_left = gen_left_
if gen_right:
gen_right_ = []
for item, value in gen_right:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_right_.append((item, value))
gen_right = gen_right_
#check
missing_values = [k for k,v in gen_left + gen_right if v is None]
assert missing_values == [], missing_values
#pad both tables to equal number of rows
if gen_right:
if len(gen_right) < len(gen_left):
#fill up with blank lines to same length
gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))
elif len(gen_right) > len(gen_left):
#fill up with blank lines to same length, just to keep it symmetric
gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))
#padding in SimpleTable doesn't work like I want
#force extra spacing and exact string length in right table
gen_right = [('%-21s' % (' '+k), v) for k,v in gen_right]
gen_stubs_right, gen_data_right = zip_longest(*gen_right) #transpose row col
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = fmt_2cols #gen_fmt
)
else:
gen_table_right = [] #because .extend_right seems works with []
#moved below so that we can pad if needed to match length of gen_right
#transpose rows and columns, `unzip`
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = fmt_2cols
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
return general_table #, gen_table_left, gen_table_right
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, title=None):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if skip_header:
param_header = None
_, xname = _getnames(results, yname=yname, xname=xname)
params_stubs = xname
exog_idx = lrange(len(xname))
params_data = lzip([forg(params[i], prec=4) for i in exog_idx],
[forg(std_err[i]) for i in exog_idx],
[forg(tvalues[i]) for i in exog_idx],
["%#6.3f" % (pvalues[i]) for i in exog_idx],
[forg(conf_int[i,0]) for i in exog_idx],
[forg(conf_int[i,1]) for i in exog_idx]
)
parameter_table = SimpleTable(params_data,
param_header,
params_stubs,
title = title,
txt_fmt = fmt_params #gen_fmt #fmt_2, #gen_fmt,
)
return parameter_table
def summary_params_frame(results, yname=None, xname=None, alpha=.05,
use_t=True):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'Conf. Int. Low', 'Conf. Int. Upp.']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'Conf. Int. Low', 'Conf. Int. Upp.']
_, xname = _getnames(results, yname=yname, xname=xname)
#------------------
from pandas import DataFrame
table = np.column_stack((params, std_err, tvalues, pvalues, conf_int))
return DataFrame(table, columns=param_header, index=xname)
def summary_params_2d(result, extras=None, endog_names=None, exog_names=None,
title=None):
'''create summary table of regression parameters with several equations
This allows interleaving of parameters with bse and/or tvalues
Parameters
----------
result : result instance
the result instance with params and attributes in extras
extras : list of strings
additional attributes to add below a parameter row, e.g. bse or tvalues
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
title : None or string
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
if endog_names is None:
#TODO: note the [1:] is specific to current MNLogit
endog_names = ['endog_%d' % i for i in
np.unique(result.model.endog)[1:]]
if exog_names is None:
exog_names = ['var%d' %i for i in range(len(result.params))]
#TODO: check formatting options with different values
#res_params = [['%10.4f'%item for item in row] for row in result.params]
res_params = [[forg(item, prec=4) for item in row] for row in result.params]
if extras: #not None or non-empty
#maybe this should be a simple triple loop instead of list comprehension?
#below_list = [[['%10s' % ('('+('%10.3f'%v).strip()+')')
extras_list = [[['%10s' % ('(' + forg(v, prec=3).strip() + ')')
for v in col]
for col in getattr(result, what)]
for what in extras
]
data = lzip(res_params, *extras_list)
data = [i for j in data for i in j] #flatten
stubs = lzip(endog_names, *[['']*len(endog_names)]*len(extras))
stubs = [i for j in stubs for i in j] #flatten
#return SimpleTable(data, headers=exog_names, stubs=stubs)
else:
data = res_params
stubs = endog_names
# return SimpleTable(data, headers=exog_names, stubs=stubs,
# data_fmts=['%10.4f'])
import copy
txt_fmt = copy.deepcopy(fmt_params)
txt_fmt.update(dict(data_fmts = ["%s"]*result.params.shape[1]))
return SimpleTable(data, headers=exog_names,
stubs=stubs,
title=title,
# data_fmts = ["%s"]),
txt_fmt = txt_fmt)
def summary_params_2dflat(result, endog_names=None, exog_names=None, alpha=0.05,
use_t=True, keep_headers=True, endog_cols=False):
#skip_headers2=True):
'''summary table for parameters that are 2d, e.g. multi-equation models
Parameters
----------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
res = result
params = res.params
if params.ndim == 2: # we've got multiple equations
n_equ = params.shape[1]
if not len(endog_names) == params.shape[1]:
raise ValueError('endog_names has wrong length')
else:
if not len(endog_names) == len(params):
raise ValueError('endog_names has wrong length')
n_equ = 1
#VAR doesn't have conf_int
#params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
#this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = 'endog'
else:
endog_basename = endog_names
#TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
#check if we have the right length of names
tables = []
for eq in range(n_equ):
restup = (res, res.params[:,eq], res.bse[:,eq], res.tvalues[:,eq],
res.pvalues[:,eq], res.conf_int(alpha)[eq])
#not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(restup, yname=endog_names[eq],
xname=exog_names, alpha=alpha, use_t=use_t,
skip_header=skiph)
tables.append(tble)
#add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
def table_extend(tables, keep_headers=True):
'''extend a list of SimpleTables, adding titles to header of subtables
This function returns the merged table as a deepcopy, in contrast to the
SimpleTable extend method.
Parameters
----------
tables : list of SimpleTable instances
keep_headers : bool
If true, then all headers are kept. If falls, then the headers of
subtables are blanked out.
Returns
-------
table_all : SimpleTable
merged tables as a single SimpleTable instance
'''
from copy import deepcopy
for ii, t in enumerate(tables[:]): #[1:]:
t = deepcopy(t)
#move title to first cell of header
#TODO: check if we have multiline headers
if t[0].datatype == 'header':
t[0][0].data = t.title
t[0][0]._datatype = None
t[0][0].row = t[0][1].row
if not keep_headers and (ii > 0):
for c in t[0][1:]:
c.data = ''
#add separating line and extend tables
if ii == 0:
table_all = t
else:
r1 = table_all[-1]
r1.add_format('txt', row_dec_below='-')
table_all.extend(t)
table_all.title = None
return table_all
def summary_return(tables, return_fmt='text'):
######## Return Summary Tables ########
# join table parts then print
if return_fmt == 'text':
strdrop = lambda x: str(x).rsplit('\n',1)[0]
#convert to string drop last line
return '\n'.join(lmap(strdrop, tables[:-1]) + [str(tables[-1])])
elif return_fmt == 'tables':
return tables
elif return_fmt == 'csv':
return '\n'.join(map(lambda x: x.as_csv(), tables))
elif return_fmt == 'latex':
#TODO: insert \hline after updating SimpleTable
import copy
table = copy.deepcopy(tables[0])
del table[-1]
for part in tables[1:]:
table.extend(part)
return table.as_latex_tabular()
elif return_fmt == 'html':
return "\n".join(table.as_html() for table in tables)
else:
raise ValueError('available output formats are text, csv, latex, html')
class Summary(object):
'''class to hold tables for result summary presentation
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated tables are not saved separately.
extra_txt : string
extra lines that are added to the text output, used for warnings and explanations.
'''
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
#return '<' + str(type(self)) + '>\n"""\n' + self.__str__() + '\n"""'
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table_2cols(self, res, title=None, gleft=None, gright=None,
yname=None, xname=None):
'''add a double table, 2 tables with one column merged horizontally
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
title : string or None
if None, then a default title is used.
gleft : list of tuples
elements for the left table, tuples are (name, value) pairs
If gleft is None, then a default table is created
gright : list of tuples or None
elements for the right table, tuples are (name, value) pairs
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
Returns
-------
None : tables are attached
'''
table = summary_top(res, title=title, gleft=gleft, gright=gright,
yname=yname, xname=xname)
self.tables.append(table)
def add_table_params(self, res, yname=None, xname=None, alpha=.05,
use_t=True):
'''create and add a table for the parameter estimates
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
Returns
-------
None : table is attached
'''
if res.params.ndim == 1:
table = summary_params(res, yname=yname, xname=xname, alpha=alpha,
use_t=use_t)
elif res.params.ndim == 2:
# _, table = summary_params_2dflat(res, yname=yname, xname=xname,
# alpha=alpha, use_t=use_t)
_, table = summary_params_2dflat(res, endog_names=yname,
exog_names=xname,
alpha=alpha, use_t=use_t)
else:
raise ValueError('params has to be 1d or 2d')
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : string
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : string
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if not self.extra_txt is None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : string
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
return summary_return(self.tables, return_fmt='latex')
def as_csv(self):
'''return tables as string
Returns
-------
csv : string
concatenated summary tables in comma delimited format
'''
return summary_return(self.tables, return_fmt='csv')
def as_html(self):
'''return tables as string
Returns
-------
html : string
concatenated summary tables in HTML format
'''
return summary_return(self.tables, return_fmt='html')
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog)
res = sm.OLS(data.endog, data.exog).fit()
#summary(
| bsd-3-clause |
parttimenerd/temci | tests/test_report.py | 1 | 3730 | """
Tests for reporters
"""
import json
from tests.utils import run_temci, run_temci_proc
def test_console_reporter_auto_mode():
d = lambda d: {
"attributes": {"description": "XYZ" + d},
"data": {"p": [1]}
}
out = run_temci("report in.yaml --console_mode auto",
files={
"in.yaml": [d(""), d(""), d(""), d("W"), d("X")]
}).out
assert "Report for XYZ" in out
assert any("XYZ [1]" in l and "XYZ [2]" in l for l in out.split("\n"))
assert "XYZX" in out
def test_support_multiple_inputs():
d = lambda: {
"attributes": {"description": "XYZ"},
"data": {"p": [1]}
}
out = run_temci("report in1.yaml in2.yaml --console_mode auto",
files={
"in1.yaml": [d()],
"in2.yaml": [d(), d()]
}).out
assert any("XYZ [1]" in l and "XYZ [2]" in l for l in out.split("\n"))
def test_html2_with_single():
assert "report.html" in run_temci("report --reporter html2 in.yaml", files={
"in.yaml": [
{
"attributes": {"description": "XYZ"},
"data": {"p": [1]}
}
]
}).file_contents
def test_properties_regexp():
out = run_temci(r"report in.yaml --properties 'p.*'", files={
"in.yaml": [
{
"attributes": {"description": "XYZ"},
"data": {"p456": [1], "z111": [2]}
}
]
}).out
assert "p456" in out and "z111" not in out
def test_console_baseline():
run_temci(r"report in.yaml --console_baseline base", files={
"in.yaml": [
{
"attributes": {"description": "XYZ"},
"data": {"p456": [1], "z111": [2]}
},
{
"attributes": {"description": "base"},
"data": {"p456": [1], "z111": [2]}
}
]
}).out
def test_all_reporters():
from temci.report.report import ReporterRegistry
for name, rep in ReporterRegistry.registry.items():
print(name)
run_temci_proc("report --reporter {} in.yaml".format(name), files={
"in.yaml": [
{
"attributes": {"description": "XYZ"},
"data": {"p": [1, 2]}
}
]
})
def test_codespeed_reporter():
d = lambda: {
"attributes": {"description": "XYZ"},
"data": {"p": [1]}
}
out = run_temci("report in.yaml",
settings={
"report": {
"reporter": "codespeed",
"codespeed_misc": {"project": "test"}
}
},
files={
"in.yaml": [d()],
}).out
j = json.loads(out)
assert len(j) == 1
assert j[0]["benchmark"] == "XYZ: p"
def test_codespeed_reporter_failed():
d = lambda: {
"attributes": {"description": "XYZ"},
"data": {"p": [1]}
}
e = lambda: {
"attributes": {"description": "ZYX"},
"data": {},
"error": {"message": "no", "error_output": "", "output": "", "return_code": 1}
}
out = run_temci("report in.yaml",
settings={
"report": {
"reporter": "codespeed",
"codespeed_misc": {"project": "test"}
}
},
files={
"in.yaml": [d(), e()],
}).out
j = json.loads(out)
assert len(j) == 1
| gpl-3.0 |
namecoin-qt/namecoin-qt | client/jsonrpc/_tests/test_modpywrapper.py | 53 | 2646 |
"""
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import unittest
import jsonrpc
from types import *
class Service(object):
@jsonrpc.ServiceMethod
def echo(self, arg):
return arg
class ApacheRequestMockup(object):
def __init__(self, filename, fin, fout):
self.fin=fin
self.fout = fout
self.filename = filename
def write(self,data):
self.fout.write(data)
def flush(self):
pass
def read(self):
return self.fin.read()
class ModPyMockup(object):
def __init__(self):
self.apache=ApacheModuleMockup()
class ApacheModuleMockup(object):
def __getattr__(self, name):
return name
def import_module(self, moduleName, log=1):
return Service()
class TestModPyWrapper(unittest.TestCase):
def setUp(self):
import sys
sys.modules['mod_python'] =ModPyMockup()
def tearDown(self):
pass
def test_runHandler(self):
from StringIO import StringIO
json=u'{"method":"echo","params":["foobar"], "id":""}'
fin=StringIO(json)
fout=StringIO()
req = ApacheRequestMockup(__file__ , fin, fout)
jsonrpc.handler(req)
data = fout.getvalue()
self.assertEquals(jsonrpc.loads(data), {"result":"foobar", "error":None, "id":""})
def test_ServiceImplementationNotFound(self):
from StringIO import StringIO
json=u'{"method":"echo","params":["foobar"], "id":""}'
fin=StringIO(json)
fout=StringIO()
req = ApacheRequestMockup("foobar" , fin, fout)
rslt = jsonrpc.handler(req)
self.assertEquals(rslt, "OK")
data = fout.getvalue()
self.assertEquals(jsonrpc.loads(data), {u'id': '', u'result': None, u'error': {u'message': '', u'name': u'ServiceImplementaionNotFound'}} )
| mit |
google/nomulus | config/show_upgrade_diffs.py | 1 | 6648 | # Copyright 2021 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Show the set of dependency diffs introduced by a branch.
Usage:
show-upgrade-diffs.py [-d <directory>] <user> <branch>
Assumes that there is a <user>/nomulus repository on github with the specified
branch name.
"""
import argparse
import os
import six
import subprocess
import sys
import tempfile
from typing import cast, Dict, Set, Tuple, Union
def run(*args):
if subprocess.call(args):
raise Abort(f'"{" ".join(args)}" failed')
PackageName = Tuple[bytes, bytes]
VersionSet = Set[bytes]
PackageMap = Dict[PackageName, VersionSet]
RED = b'\033[40;31;1m'
GREEN = b'\033[40;32;1m'
class Abort(Exception):
"""Raised to abort processing and record an error."""
def merge(dest: PackageMap, new: PackageMap) -> None:
for key, val in new.items():
dest[key] = dest.setdefault(key, set()) | val
def parse_lockfile(filename: str) -> PackageMap:
result: PackageMap = {}
for line in open(filename, 'rb'):
if line.startswith(b'#'):
continue
line = line.rstrip()
package = cast(Tuple[bytes, bytes, bytes], tuple(line.split(b':')))
result.setdefault(package[:-1], set()).add(package[-1])
return result
def get_all_package_versions(dir: str) -> PackageMap:
"""Return list of all package versions in the directory."""
packages = {}
for file in os.listdir(dir):
file = os.path.join(dir, file)
if file.endswith('.lockfile'):
merge(packages, parse_lockfile(file))
elif os.path.isdir(file):
merge(packages, get_all_package_versions(file))
return packages
def pr(*args: Union[str, bytes]) -> None:
"""Print replacement that prints bytes without weird conversions."""
for text in args:
sys.stdout.buffer.write(six.ensure_binary(text))
sys.stdout.buffer.flush()
def format_versions(a: VersionSet, b: VersionSet, missing_esc: bytes) -> bytes:
"""Returns a formatted string of the elements of "a".
Returns the elements of "a" as a comma-separated string, colorizes the
elements of "a" that are not also in "b" with "missing_esc".
Args:
a: Elements to print.
b: Other set, if a printed element is not a member of "b" it is
colorized.
missing_esc: ANSI terminal sequence to use to colorize elements that
are missing from "b".
"""
elems = []
for item in a:
if item in b:
elems.append(item)
else:
elems.append(missing_esc + item + b'\033[0m')
return b', '.join(elems)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--directory', '-d', type=str, default='',
dest='directory',
help=('Directory to use for a local git '
'repository. By default, this script clones '
'the nomulus repo into a temporary directory '
'which is deleted after the script is run. '
'This option allows you to specify the '
'directory and causes it to be retained (not '
'deleted) after the script is run, allowing '
'it to be reused for subsequent runs, speeding '
'them up considerably.'))
parser.add_argument('user', type=str,
help=('The name of the user on github. The full '
'github repository name is presumed to be '
'"$user/nomulus".'))
parser.add_argument('branch', type=str,
help='The git branch containing the changes.')
args = parser.parse_args()
user = args.user
branch = args.branch
if not args.directory:
tempdir = tempfile.TemporaryDirectory()
dir = tempdir.name
else:
dir = args.directory
# Either clone or fetch the master branch if it exists.
if args.directory and os.path.exists(dir):
pr(f'Reusing directory {dir}\n')
os.chdir(dir)
run('git', 'fetch', 'git@github.com:google/nomulus', 'master:master')
run('git', 'checkout', 'master')
else:
run('git', 'clone', 'git@github.com:google/nomulus', dir)
os.chdir(dir)
old_packages = get_all_package_versions('.')
run('git', 'fetch', f'https://github.com/{user}/nomulus.git',
f'{branch}:{branch}')
run('git', 'checkout', branch)
new_packages = get_all_package_versions('.')
if new_packages != old_packages:
pr('\n\nPackage version change report:\n')
pr('change package-name: {old versions} -> {new versions}\n')
pr('=====================================================\n\n')
for package, new_versions in new_packages.items():
old_versions = old_packages.get(package)
if not old_versions:
pr('added ', b':'.join(package), ': {',
format_versions(new_versions, set(), GREEN),
'}\n')
elif new_versions != old_versions:
# Print out "package-name: {old versions} -> {new versions} with
# pretty colors.
formatted_old_versions = (
format_versions(old_versions, new_versions, RED))
formatted_new_versions = (
format_versions(new_versions, old_versions, GREEN))
pr('updated ', b':'.join(package), ': {',
formatted_old_versions, '} -> {',
formatted_new_versions, '}\n')
# Print the list of packages that were removed.
for package in old_packages:
if package not in new_packages:
pr('removed ', b':'.join(package))
else:
pr('Package versions not updated!\n')
if args.directory:
pr(f'\nRetaining git directory {dir}, to delete: rm -rf {dir}\n')
if __name__ == '__main__':
main()
| apache-2.0 |
zenx/suds-naive-timezone | suds/xsd/schema.py | 192 | 14328 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{schema} module provides a intelligent representation of
an XSD schema. The I{raw} model is the XML tree and the I{model}
is the denormalized, objectified and intelligent view of the schema.
Most of the I{value-add} provided by the model is centered around
tranparent referenced type resolution and targeted denormalization.
"""
import suds.metrics
from suds import *
from suds.xsd import *
from suds.xsd.sxbuiltin import *
from suds.xsd.sxbasic import Factory as BasicFactory
from suds.xsd.sxbuiltin import Factory as BuiltinFactory
from suds.xsd.sxbase import SchemaObject
from suds.xsd.deplist import DepList
from suds.sax.element import Element
from suds.sax import splitPrefix, Namespace
from logging import getLogger
log = getLogger(__name__)
class SchemaCollection:
"""
A collection of schema objects. This class is needed because WSDLs
may contain more then one <schema/> node.
@ivar wsdl: A wsdl object.
@type wsdl: L{suds.wsdl.Definitions}
@ivar children: A list contained schemas.
@type children: [L{Schema},...]
@ivar namespaces: A dictionary of contained schemas by namespace.
@type namespaces: {str:L{Schema}}
"""
def __init__(self, wsdl):
"""
@param wsdl: A wsdl object.
@type wsdl: L{suds.wsdl.Definitions}
"""
self.wsdl = wsdl
self.children = []
self.namespaces = {}
def add(self, schema):
"""
Add a schema node to the collection. Schema(s) within the same target
namespace are consolidated.
@param schema: A schema object.
@type schema: (L{Schema})
"""
key = schema.tns[1]
existing = self.namespaces.get(key)
if existing is None:
self.children.append(schema)
self.namespaces[key] = schema
else:
existing.root.children += schema.root.children
existing.root.nsprefixes.update(schema.root.nsprefixes)
def load(self, options):
"""
Load the schema objects for the root nodes.
- de-references schemas
- merge schemas
@param options: An options dictionary.
@type options: L{options.Options}
@return: The merged schema.
@rtype: L{Schema}
"""
if options.autoblend:
self.autoblend()
for child in self.children:
child.build()
for child in self.children:
child.open_imports(options)
for child in self.children:
child.dereference()
log.debug('loaded:\n%s', self)
merged = self.merge()
log.debug('MERGED:\n%s', merged)
return merged
def autoblend(self):
"""
Ensure that all schemas within the collection
import each other which has a blending effect.
@return: self
@rtype: L{SchemaCollection}
"""
namespaces = self.namespaces.keys()
for s in self.children:
for ns in namespaces:
tns = s.root.get('targetNamespace')
if tns == ns:
continue
for imp in s.root.getChildren('import'):
if imp.get('namespace') == ns:
continue
imp = Element('import', ns=Namespace.xsdns)
imp.set('namespace', ns)
s.root.append(imp)
return self
def locate(self, ns):
"""
Find a schema by namespace. Only the URI portion of
the namespace is compared to each schema's I{targetNamespace}
@param ns: A namespace.
@type ns: (prefix,URI)
@return: The schema matching the namesapce, else None.
@rtype: L{Schema}
"""
return self.namespaces.get(ns[1])
def merge(self):
"""
Merge the contained schemas into one.
@return: The merged schema.
@rtype: L{Schema}
"""
if len(self):
schema = self.children[0]
for s in self.children[1:]:
schema.merge(s)
return schema
else:
return None
def __len__(self):
return len(self.children)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
result = ['\nschema collection']
for s in self.children:
result.append(s.str(1))
return '\n'.join(result)
class Schema:
"""
The schema is an objectification of a <schema/> (xsd) definition.
It provides inspection, lookup and type resolution.
@ivar root: The root node.
@type root: L{sax.element.Element}
@ivar baseurl: The I{base} URL for this schema.
@type baseurl: str
@ivar container: A schema collection containing this schema.
@type container: L{SchemaCollection}
@ivar children: A list of direct top level children.
@type children: [L{SchemaObject},...]
@ivar all: A list of all (includes imported) top level children.
@type all: [L{SchemaObject},...]
@ivar types: A schema types cache.
@type types: {name:L{SchemaObject}}
@ivar imports: A list of import objects.
@type imports: [L{SchemaObject},...]
@ivar elements: A list of <element/> objects.
@type elements: [L{SchemaObject},...]
@ivar attributes: A list of <attribute/> objects.
@type attributes: [L{SchemaObject},...]
@ivar groups: A list of group objects.
@type groups: [L{SchemaObject},...]
@ivar agrps: A list of attribute group objects.
@type agrps: [L{SchemaObject},...]
@ivar form_qualified: The flag indicating:
(@elementFormDefault).
@type form_qualified: bool
"""
Tag = 'schema'
def __init__(self, root, baseurl, options, container=None):
"""
@param root: The xml root.
@type root: L{sax.element.Element}
@param baseurl: The base url used for importing.
@type baseurl: basestring
@param options: An options dictionary.
@type options: L{options.Options}
@param container: An optional container.
@type container: L{SchemaCollection}
"""
self.root = root
self.id = objid(self)
self.tns = self.mktns()
self.baseurl = baseurl
self.container = container
self.children = []
self.all = []
self.types = {}
self.imports = []
self.elements = {}
self.attributes = {}
self.groups = {}
self.agrps = {}
if options.doctor is not None:
options.doctor.examine(root)
form = self.root.get('elementFormDefault')
if form is None:
self.form_qualified = False
else:
self.form_qualified = ( form == 'qualified' )
if container is None:
self.build()
self.open_imports(options)
log.debug('built:\n%s', self)
self.dereference()
log.debug('dereferenced:\n%s', self)
def mktns(self):
"""
Make the schema's target namespace.
@return: The namespace representation of the schema's
targetNamespace value.
@rtype: (prefix, uri)
"""
tns = [None, self.root.get('targetNamespace')]
if tns[1] is not None:
tns[0] = self.root.findPrefix(tns[1])
return tuple(tns)
def build(self):
"""
Build the schema (object graph) using the root node
using the factory.
- Build the graph.
- Collate the children.
"""
self.children = BasicFactory.build(self.root, self)
collated = BasicFactory.collate(self.children)
self.children = collated[0]
self.attributes = collated[2]
self.imports = collated[1]
self.elements = collated[3]
self.types = collated[4]
self.groups = collated[5]
self.agrps = collated[6]
def merge(self, schema):
"""
Merge the contents from the schema. Only objects not already contained
in this schema's collections are merged. This is to provide for bidirectional
import which produce cyclic includes.
@returns: self
@rtype: L{Schema}
"""
for item in schema.attributes.items():
if item[0] in self.attributes:
continue
self.all.append(item[1])
self.attributes[item[0]] = item[1]
for item in schema.elements.items():
if item[0] in self.elements:
continue
self.all.append(item[1])
self.elements[item[0]] = item[1]
for item in schema.types.items():
if item[0] in self.types:
continue
self.all.append(item[1])
self.types[item[0]] = item[1]
for item in schema.groups.items():
if item[0] in self.groups:
continue
self.all.append(item[1])
self.groups[item[0]] = item[1]
for item in schema.agrps.items():
if item[0] in self.agrps:
continue
self.all.append(item[1])
self.agrps[item[0]] = item[1]
schema.merged = True
return self
def open_imports(self, options):
"""
Instruct all contained L{sxbasic.Import} children to import
the schema's which they reference. The contents of the
imported schema are I{merged} in.
@param options: An options dictionary.
@type options: L{options.Options}
"""
for imp in self.imports:
imported = imp.open(options)
if imported is None:
continue
imported.open_imports(options)
log.debug('imported:\n%s', imported)
self.merge(imported)
def dereference(self):
"""
Instruct all children to perform dereferencing.
"""
all = []
indexes = {}
for child in self.children:
child.content(all)
deplist = DepList()
for x in all:
x.qualify()
midx, deps = x.dependencies()
item = (x, tuple(deps))
deplist.add(item)
indexes[x] = midx
for x, deps in deplist.sort():
midx = indexes.get(x)
if midx is None: continue
d = deps[midx]
log.debug('(%s) merging %s <== %s', self.tns[1], Repr(x), Repr(d))
x.merge(d)
def locate(self, ns):
"""
Find a schema by namespace. Only the URI portion of
the namespace is compared to each schema's I{targetNamespace}.
The request is passed to the container.
@param ns: A namespace.
@type ns: (prefix,URI)
@return: The schema matching the namesapce, else None.
@rtype: L{Schema}
"""
if self.container is not None:
return self.container.locate(ns)
else:
return None
def custom(self, ref, context=None):
"""
Get whether the specified reference is B{not} an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if B{not} a builtin, else False.
@rtype: bool
"""
if ref is None:
return True
else:
return ( not self.builtin(ref, context) )
def builtin(self, ref, context=None):
"""
Get whether the specified reference is an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if builtin, else False.
@rtype: bool
"""
w3 = 'http://www.w3.org'
try:
if isqref(ref):
ns = ref[1]
return ( ref[0] in Factory.tags and ns.startswith(w3) )
if context is None:
context = self.root
prefix = splitPrefix(ref)[0]
prefixes = context.findPrefixes(w3, 'startswith')
return ( prefix in prefixes and ref[0] in Factory.tags )
except:
return False
def instance(self, root, baseurl, options):
"""
Create and return an new schema object using the
specified I{root} and I{url}.
@param root: A schema root node.
@type root: L{sax.element.Element}
@param baseurl: A base URL.
@type baseurl: str
@param options: An options dictionary.
@type options: L{options.Options}
@return: The newly created schema object.
@rtype: L{Schema}
@note: This is only used by Import children.
"""
return Schema(root, baseurl, options)
def str(self, indent=0):
tab = '%*s'%(indent*3, '')
result = []
result.append('%s%s' % (tab, self.id))
result.append('%s(raw)' % tab)
result.append(self.root.str(indent+1))
result.append('%s(model)' % tab)
for c in self.children:
result.append(c.str(indent+1))
result.append('')
return '\n'.join(result)
def __repr__(self):
myrep = '<%s tns="%s"/>' % (self.id, self.tns[1])
return myrep.encode('utf-8')
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.str()
| lgpl-3.0 |
pya/mochi | mochi/core/exceptions.py | 3 | 1751 | from .utils import emit_sexp, issequence_except_str
class UnquoteSplicingError(Exception):
def __init__(self):
self.msg = 'unquote_splicing appeared in invalid context'
def __repr__(self):
return self.msg
def __str__(self):
return self.msg
class MochiSyntaxError(Exception):
def __init__(self, exp, filename):
lineno = 0
if hasattr(exp, 'lineno'):
lineno = exp.lineno
elif issequence_except_str(exp) and hasattr(exp[0], 'lineno'):
lineno = exp[0].lineno
self.msg = 'syntax error on ' + \
'file "' + filename + '", ' + 'line ' + str(lineno) + ': ' + emit_sexp(exp)
def __str__(self):
return self.msg
def __repr__(self):
return self.msg
class DuplicatedDefError(Exception):
def __init__(self, exp, filename):
lineno = 0
if hasattr(exp, 'lineno'):
lineno = exp.lineno
elif issequence_except_str(exp) and hasattr(exp[0], 'lineno'):
lineno = exp[0].lineno
self.msg = 'duplicated_def error: ' + \
'file "' + filename + '", ' + 'line ' + str(lineno) + ': ' + emit_sexp(exp)
def __str__(self):
return self.msg
def __repr__(self):
return self.msg
class ReadError(Exception):
def __init__(self, file, line, nest_level):
if nest_level > 0:
self.msg = 'read error: "' + file + \
'":line ' + str(line) + ': EOF inside a list'
else:
self.msg = 'read error: "' + file + \
'":line ' + str(line) + ': extra close parenthesis'
def __str__(self):
return self.msg
def __repr__(self):
return self.msg | mit |
tomasreimers/tensorflow-emscripten | tensorflow/python/framework/proto_test.py | 178 | 1704 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Protobuf related tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ProtoTest(test.TestCase):
# TODO(vrv): re-enable this test once we figure out how this can
# pass the pip install test (where the user is expected to have
# protobuf installed).
def _testLargeProto(self):
# create a constant of size > 64MB.
a = constant_op.constant(np.zeros([1024, 1024, 17]))
# Serialize the resulting graph def.
gdef = a.op.graph.as_graph_def()
serialized = gdef.SerializeToString()
unserialized = ops.Graph().as_graph_def()
# Deserialize back. Protobuf python library should support
# protos larger than 64MB.
unserialized.ParseFromString(serialized)
self.assertProtoEquals(unserialized, gdef)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mengxn/tensorflow | tensorflow/python/kernel_tests/string_to_number_op_test.py | 104 | 4041 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToNumber op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
_ERROR_MESSAGE = "StringToNumberOp could not correctly convert string: "
class StringToNumberOpTest(test.TestCase):
def _test(self, tf_type, good_pairs, bad_pairs):
with self.test_session():
# Build a small testing graph.
input_string = array_ops.placeholder(dtypes.string)
output = parsing_ops.string_to_number(
input_string, out_type=tf_type)
# Check all the good input/output pairs.
for instr, outnum in good_pairs:
result, = output.eval(feed_dict={input_string: [instr]})
self.assertAllClose([outnum], [result])
# Check that the bad inputs produce the right errors.
for instr, outstr in bad_pairs:
with self.assertRaisesOpError(outstr):
output.eval(feed_dict={input_string: [instr]})
def testToFloat(self):
self._test(dtypes.float32,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", float("INF")),
("-3.40283e+38", float("-INF")),
# Less than min value of float.
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToDouble(self):
self._test(dtypes.float64,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", 3.40283e+38),
# Less than min value of float.
("-3.40283e+38", -3.40283e+38),
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToInt32(self):
self._test(dtypes.int32,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647)],
[ # Less than min value of int32.
("-2147483649", _ERROR_MESSAGE + "-2147483649"),
# Greater than max value of int32.
("2147483648", _ERROR_MESSAGE + "2147483648"),
("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToInt64(self):
self._test(dtypes.int64,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647),
("-2147483649", -2147483649), # Less than min value of int32.
("2147483648", 2147483648)], # Greater than max value of int32.
[("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
if __name__ == "__main__":
test.main()
| apache-2.0 |
BehavioralInsightsTeam/edx-platform | common/lib/capa/capa/tests/test_customrender.py | 37 | 2295 | from lxml import etree
import unittest
import xml.sax.saxutils as saxutils
from capa.tests.helpers import test_capa_system
from capa import customrender
# just a handy shortcut
lookup_tag = customrender.registry.get_class_for_tag
def extract_context(xml):
"""
Given an xml element corresponding to the output of test_capa_system.render_template, get back the
original context
"""
return eval(xml.text)
def quote_attr(s):
return saxutils.quoteattr(s)[1:-1] # don't want the outer quotes
class HelperTest(unittest.TestCase):
'''
Make sure that our helper function works!
'''
def check(self, d):
xml = etree.XML(test_capa_system().render_template('blah', d))
self.assertEqual(d, extract_context(xml))
def test_extract_context(self):
self.check({})
self.check({1, 2})
self.check({'id', 'an id'})
self.check({'with"quote', 'also"quote'})
class SolutionRenderTest(unittest.TestCase):
'''
Make sure solutions render properly.
'''
def test_rendering(self):
solution = 'To compute unicorns, count them.'
xml_str = """<solution id="solution_12">{s}</solution>""".format(s=solution)
element = etree.fromstring(xml_str)
renderer = lookup_tag('solution')(test_capa_system(), element)
self.assertEqual(renderer.id, 'solution_12')
# Our test_capa_system "renders" templates to a div with the repr of the context.
xml = renderer.get_html()
context = extract_context(xml)
self.assertEqual(context, {'id': 'solution_12'})
class MathRenderTest(unittest.TestCase):
'''
Make sure math renders properly.
'''
def check_parse(self, latex_in, mathjax_out):
xml_str = """<math>{tex}</math>""".format(tex=latex_in)
element = etree.fromstring(xml_str)
renderer = lookup_tag('math')(test_capa_system(), element)
self.assertEqual(renderer.mathstr, mathjax_out)
def test_parsing(self):
self.check_parse('$abc$', '[mathjaxinline]abc[/mathjaxinline]')
self.check_parse('$abc', '$abc')
self.check_parse(r'$\displaystyle 2+2$', '[mathjax] 2+2[/mathjax]')
# NOTE: not testing get_html yet because I don't understand why it's doing what it's doing.
| agpl-3.0 |
birkholz/homeboard | bills/migrations/0001_initial.py | 1 | 1080 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0002_auto_20150321_0006'),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('amount', models.DecimalField(max_digits=8, decimal_places=2)),
('author', models.ForeignKey(related_name='authored_bills', to=settings.AUTH_USER_MODEL)),
('home', models.ForeignKey(related_name='bills', to='home.Home')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| gpl-2.0 |
joeynyan/csss-minion | info.py | 2 | 9392 | #pylint: disable=C
import discord
from discord.ext import commands
import datetime
from mcstatus import MinecraftServer
from pagination import Pages
class Info():
def __init__(self, bot):
self.bot = bot
# Voting done, command disabled
# @commands.command()
# async def vote():
# embed = discord.Embed(colour=discord.Colour(0xdc4643), timestamp=datetime.datetime.utcfromtimestamp(1490339531))
# embed.set_thumbnail(url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
# embed.set_author(name="CSSS-Minion", icon_url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
# embed.set_footer(text="CSSS-Minion", icon_url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
# embed.add_field(name="CSSS Voting Information", value="The voting period for the Computing Science Student Society General Elections for the 2017-2018 term begins on Monday March 20th, 2017 at 11:59 PM and closes on Monday March 27th, 2017 at 11:59 PM. \n\nVisit https://www.sfu.ca/~pjalali/speeches.html to view candidate speeches, and http://websurvey.sfu.ca/survey/273372327 to vote.")
# await bot.say(embed=embed)
# @commands.command(pass_context = True)
# async def voteresult(self, ctx):
# """Return the voting results from the previous CSSS election."""
# if ctx.invoked_subcommand is None:
# embed = discord.Embed(title="CSSS Exec Positions", colour=discord.Colour(0xdc4643), timestamp=datetime.datetime.utcfromtimestamp(1490339531))
# embed.set_thumbnail(url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
# embed.set_author(name="CSSS-Minion", icon_url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
# embed.set_footer(text="CSSS-Minion", icon_url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
# embed.add_field(name="President", value="David Miiller")
# embed.add_field(name="Vice President", value="Jon Loewen")
# embed.add_field(name="Treasurer", value="Dustin Cao")
# embed.add_field(name="Director of Resources", value="Kiarash Mirsalehi")
# embed.add_field(name="Director of Events", value="Brendan Chan")
# embed.add_field(name="Director of Communications", value="Henry Zhao")
# embed.add_field(name="Director of Archives", value="Josh Wu")
# embed.add_field(name="Source Code", value="https://github.com/henrymzhao/csss-minion/")
# await self.bot.say(embed=embed)
# the following several functions are inspired by formatterhelper and default_help command
def is_cog(self,ctx, command):
return not self.is_bot(ctx,command) and not isinstance(command, Command)
def is_bot(self,ctx,command):
return command is ctx.bot
def clean_prefix(self,context):
"""The cleaned up invoke prefix. i.e. mentions are ``@name`` instead of ``<@id>``."""
user = context.bot.user
# this breaks if the prefix mention is not the bot itself but I
# consider this to be an *incredibly* strange use case. I'd rather go
# for this common use case rather than waste performance for the
# odd one.
return context.prefix.replace(user.mention, '@' + user.name)
def get_command_signature(self,ctx, command):
"""Retrieves the signature portion of the help page."""
result = []
prefix = self.clean_prefix(ctx)
cmd = command
parent = cmd.full_parent_name
if len(cmd.aliases) > 0:
aliases = '|'.join(cmd.aliases)
fmt = '{0}[{1.name}|{2}]'
if parent:
fmt = '{0}{3} [{1.name}|{2}]'
result.append(fmt.format(prefix, cmd, aliases, parent))
else:
name = prefix + cmd.name if not parent else prefix + parent + ' ' + cmd.name
result.append(name)
params = cmd.clean_params
if len(params) > 0:
for name, param in params.items():
if param.default is not param.empty:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append('[{}={}]'.format(name, param.default))
else:
result.append('[{}]'.format(name))
elif param.kind == param.VAR_POSITIONAL:
result.append('[{}...]'.format(name))
else:
result.append('<{}>'.format(name))
return ' '.join(result)
@commands.group(pass_context = True)
async def help(self, ctx):
"""Display this help menu"""
if ctx.invoked_subcommand is None:
items = []
print(type(self.bot.commands))
for k in self.bot.commands: # grab all commands registered with the bot
com = self.bot.commands[k]
sig = self.get_command_signature(ctx,com) # grabs command signature
items.append([sig,com.help]) # append command signature and pydoc to list
items.append(["Source Code", "https://github.com/henrymzhao/csss-minion/"]) # keep src as last entry
p = Pages(self.bot, message=ctx.message, entries = items, per_page=4)
p.embed = discord.Embed(title="CSSS-Minion Commands", colour=discord.Colour(0xdc4643),timestamp=datetime.datetime.utcfromtimestamp(1490339531))
p.embed.set_thumbnail(url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
p.embed.set_author(name="CSSS-Minion", icon_url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
p.embed.set_footer(text="CSSS-Minion", icon_url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
await p.paginate()
# await self.bot.say(embed=embed)
@help.command(pass_context = True)
async def mc(self, ctx):
"""Display the help menu for the minecraft server"""
if ctx.message.channel.name != "minecraft":
await self.bot.say("Please move to #minecraft for this command.")
else:
embed = discord.Embed(title="CSSS-Minion Minecraft Commands", colour=discord.Colour(
0xdc4643), timestamp=datetime.datetime.utcfromtimestamp(1490339531))
embed.set_thumbnail(
url="https://media-elerium.cursecdn.com/avatars/13/940/635581309636616244.png")
embed.set_author(
name="CSSS-Minion", icon_url="https://cdn.discordapp.com/app-icons/293110345076047893/15e2a6722723827ff9bd53ca787df959.jpg")
embed.set_footer(
text="CSSS-Minion", icon_url="https://s-media-cache-ak0.pinimg.com/originals/aa/65/70/aa657074a12fb0d961a1789c671b73e3.jpg")
embed.add_field(name=".help mc", value="Displays this help menu.\n")
embed.add_field(name=".status", value="Displays the current server status.\n")
embed.add_field(name=".info", value="Information about how to connect to server.\n")
await self.bot.say(embed=embed)
@commands.command(pass_context = True)
async def status(self, ctx):
"""Display the number of players on the minecraft server"""
if ctx.message.channel.name != "minecraft":
await self.bot.say("Please move to #minecraft for this command.")
else:
server = MinecraftServer.lookup(self.bot.mcip)
try:
status = server.status()
except IOError as e:
await self.bot.say("It's dead Jim.")
# try:
# query = server.query()
# except Sock as e:
# await bot.say("Server too slow for query!")
em = discord.Embed(title='CSSS FTB Server Status', description=
"""The server has {0} players and replied in {1} ms.\n""".format(status.players.online, status.latency), colour=0x3D85C6 )
# + "\n{} are currently online.".format(", ".join(query.players.names)), colour=0x3D85C6)
await self.bot.send_message(ctx.message.channel, embed=em)
@commands.command(pass_context = True)
async def info(self, ctx):
"""Display the minecraft server information"""
if ctx.message.channel.name != "minecraft":
await self.bot.say("Please move to #minecraft for this command.")
else:
em = discord.Embed(title='CSSS FTB Server Information', description="""IP: 172.93.48.238
Modpack: FTB Infinity 2.7 (Not 3.0 !)
Minecraft: 1.7.10
Cracked: YES
See pinned message to download cracked client.""", colour=0x3D85C6)
await self.bot.send_message(ctx.message.channel, embed=em)
def setup(bot):
bot.add_cog(Info(bot))
| gpl-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.5/django/contrib/auth/context_processors.py | 97 | 1955 | # PermWrapper and PermLookupDict proxy the permissions system into objects that
# the template system can understand.
class PermLookupDict(object):
def __init__(self, user, module_name):
self.user, self.module_name = user, module_name
def __repr__(self):
return str(self.user.get_all_permissions())
def __getitem__(self, perm_name):
return self.user.has_perm("%s.%s" % (self.module_name, perm_name))
def __iter__(self):
# To fix 'item in perms.someapp' and __getitem__ iteraction we need to
# define __iter__. See #18979 for details.
raise TypeError("PermLookupDict is not iterable.")
def __bool__(self):
return self.user.has_module_perms(self.module_name)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
class PermWrapper(object):
def __init__(self, user):
self.user = user
def __getitem__(self, module_name):
return PermLookupDict(self.user, module_name)
def __iter__(self):
# I am large, I contain multitudes.
raise TypeError("PermWrapper is not iterable.")
def __contains__(self, perm_name):
"""
Lookup by "someapp" or "someapp.someperm" in perms.
"""
if '.' not in perm_name:
# The name refers to module.
return bool(self[perm_name])
module_name, perm_name = perm_name.split('.', 1)
return self[module_name][perm_name]
def auth(request):
"""
Returns context variables required by apps that use Django's authentication
system.
If there is no 'user' attribute in the request, uses AnonymousUser (from
django.contrib.auth).
"""
if hasattr(request, 'user'):
user = request.user
else:
from django.contrib.auth.models import AnonymousUser
user = AnonymousUser()
return {
'user': user,
'perms': PermWrapper(user),
}
| mit |
grimmjow8/ansible | lib/ansible/modules/files/tempfile.py | 18 | 3459 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2016 Krzysztof Magosa <krzysztof@magosa.pl>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: tempfile
version_added: "2.3"
author:
- Krzysztof Magosa
short_description: Creates temporary files and directories.
description:
- The C(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible you need to use M(file) module.
options:
state:
description:
- Whether to create file or directory.
required: false
choices: [ "file", "directory" ]
default: file
path:
description:
- Location where temporary file or directory should be created. If path is not specified default system temporary directory will be used.
required: false
default: null
prefix:
description:
- Prefix of file/directory name created by module.
required: false
default: ansible.
suffix:
description:
- Suffix of file/directory name created by module.
required: false
default: ""
'''
EXAMPLES = """
- name: create temporary build directory
tempfile:
state: directory
suffix: build
- name: create temporary file
tempfile:
state: file
suffix: temp
"""
RETURN = '''
path:
description: Path to created file or directory
returned: success
type: string
sample: "/tmp/ansible.bMlvdk"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from tempfile import mkstemp, mkdtemp
from os import close
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='file', choices=['file', 'directory']),
path = dict(default=None),
prefix = dict(default='ansible.'),
suffix = dict(default='')
)
)
try:
if module.params['state'] == 'file':
handle, path = mkstemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path']
)
close(handle)
elif module.params['state'] == 'directory':
path = mkdtemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path']
)
module.exit_json(changed=True, path=path)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
blacklin/kbengine | kbe/src/lib/python/Lib/asyncio/streams.py | 63 | 16169 | """Stream-related things."""
__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
'open_connection', 'start_server',
'IncompleteReadError',
]
import socket
if hasattr(socket, 'AF_UNIX'):
__all__.extend(['open_unix_connection', 'start_unix_server'])
from . import coroutines
from . import events
from . import futures
from . import protocols
from .coroutines import coroutine
from .log import logger
_DEFAULT_LIMIT = 2**16
class IncompleteReadError(EOFError):
"""
Incomplete read error. Attributes:
- partial: read bytes string before the end of stream was reached
- expected: total number of expected bytes
"""
def __init__(self, partial, expected):
EOFError.__init__(self, "%s bytes read on a total of %s expected bytes"
% (len(partial), expected))
self.partial = partial
self.expected = expected
@coroutine
def open_connection(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = yield from loop.create_connection(
lambda: protocol, host, port, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
@coroutine
def start_server(client_connected_cb, host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service.
"""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return (yield from loop.create_server(factory, host, port, **kwds))
if hasattr(socket, 'AF_UNIX'):
# UNIX Domain Sockets are supported on this platform
@coroutine
def open_unix_connection(path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = yield from loop.create_unix_connection(
lambda: protocol, path, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
@coroutine
def start_unix_server(client_connected_cb, path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return (yield from loop.create_unix_server(factory, path, **kwds))
class FlowControlMixin(protocols.Protocol):
"""Reusable flow control logic for StreamWriter.drain().
This implements the protocol methods pause_writing(),
resume_reading() and connection_lost(). If the subclass overrides
these it must call the super methods.
StreamWriter.drain() must wait for _drain_helper() coroutine.
"""
def __init__(self, loop=None):
self._loop = loop # May be None; we may never need it.
self._paused = False
self._drain_waiter = None
self._connection_lost = False
def pause_writing(self):
assert not self._paused
self._paused = True
if self._loop.get_debug():
logger.debug("%r pauses writing", self)
def resume_writing(self):
assert self._paused
self._paused = False
if self._loop.get_debug():
logger.debug("%r resumes writing", self)
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
def connection_lost(self, exc):
self._connection_lost = True
# Wake up the writer if currently paused.
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
return
self._drain_waiter = None
if waiter.done():
return
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
@coroutine
def _drain_helper(self):
if self._connection_lost:
raise ConnectionResetError('Connection lost')
if not self._paused:
return
waiter = self._drain_waiter
assert waiter is None or waiter.cancelled()
waiter = futures.Future(loop=self._loop)
self._drain_waiter = waiter
yield from waiter
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
"""Helper class to adapt between Protocol and StreamReader.
(This is a helper class instead of making StreamReader itself a
Protocol subclass, because the StreamReader has other potential
uses, and to prevent the user of the StreamReader to accidentally
call inappropriate methods of the protocol.)
"""
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
super().__init__(loop=loop)
self._stream_reader = stream_reader
self._stream_writer = None
self._client_connected_cb = client_connected_cb
def connection_made(self, transport):
self._stream_reader.set_transport(transport)
if self._client_connected_cb is not None:
self._stream_writer = StreamWriter(transport, self,
self._stream_reader,
self._loop)
res = self._client_connected_cb(self._stream_reader,
self._stream_writer)
if coroutines.iscoroutine(res):
self._loop.create_task(res)
def connection_lost(self, exc):
if exc is None:
self._stream_reader.feed_eof()
else:
self._stream_reader.set_exception(exc)
super().connection_lost(exc)
def data_received(self, data):
self._stream_reader.feed_data(data)
def eof_received(self):
self._stream_reader.feed_eof()
class StreamWriter:
"""Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
get_extra_info() and close(). It adds drain() which returns an
optional Future on which you can wait for flow control. It also
adds a transport property which references the Transport
directly.
"""
def __init__(self, transport, protocol, reader, loop):
self._transport = transport
self._protocol = protocol
# drain() expects that the reader has a exception() method
assert reader is None or isinstance(reader, StreamReader)
self._reader = reader
self._loop = loop
def __repr__(self):
info = [self.__class__.__name__, 'transport=%r' % self._transport]
if self._reader is not None:
info.append('reader=%r' % self._reader)
return '<%s>' % ' '.join(info)
@property
def transport(self):
return self._transport
def write(self, data):
self._transport.write(data)
def writelines(self, data):
self._transport.writelines(data)
def write_eof(self):
return self._transport.write_eof()
def can_write_eof(self):
return self._transport.can_write_eof()
def close(self):
return self._transport.close()
def get_extra_info(self, name, default=None):
return self._transport.get_extra_info(name, default)
@coroutine
def drain(self):
"""Flush the write buffer.
The intended use is to write
w.write(data)
yield from w.drain()
"""
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
yield from self._protocol._drain_helper()
class StreamReader:
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
# The line length limit is a security feature;
# it also doubles as half the buffer limit.
self._limit = limit
if loop is None:
loop = events.get_event_loop()
self._loop = loop
self._buffer = bytearray()
self._eof = False # Whether we're done.
self._waiter = None # A future.
self._exception = None
self._transport = None
self._paused = False
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_exception(exc)
def set_transport(self, transport):
assert self._transport is None, 'Transport already set'
self._transport = transport
def _maybe_resume_transport(self):
if self._paused and len(self._buffer) <= self._limit:
self._paused = False
self._transport.resume_reading()
def feed_eof(self):
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(True)
def at_eof(self):
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
def feed_data(self, data):
assert not self._eof, 'feed_data after feed_eof'
if not data:
return
self._buffer.extend(data)
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(False)
if (self._transport is not None and
not self._paused and
len(self._buffer) > 2*self._limit):
try:
self._transport.pause_reading()
except NotImplementedError:
# The transport can't be paused.
# We'll just have to buffer all data.
# Forget the transport so we don't keep trying.
self._transport = None
else:
self._paused = True
def _create_waiter(self, func_name):
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError('%s() called while another coroutine is '
'already waiting for incoming data' % func_name)
return futures.Future(loop=self._loop)
@coroutine
def readline(self):
if self._exception is not None:
raise self._exception
line = bytearray()
not_enough = True
while not_enough:
while self._buffer and not_enough:
ichar = self._buffer.find(b'\n')
if ichar < 0:
line.extend(self._buffer)
self._buffer.clear()
else:
ichar += 1
line.extend(self._buffer[:ichar])
del self._buffer[:ichar]
not_enough = False
if len(line) > self._limit:
self._maybe_resume_transport()
raise ValueError('Line is too long')
if self._eof:
break
if not_enough:
self._waiter = self._create_waiter('readline')
try:
yield from self._waiter
finally:
self._waiter = None
self._maybe_resume_transport()
return bytes(line)
@coroutine
def read(self, n=-1):
if self._exception is not None:
raise self._exception
if not n:
return b''
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
block = yield from self.read(self._limit)
if not block:
break
blocks.append(block)
return b''.join(blocks)
else:
if not self._buffer and not self._eof:
self._waiter = self._create_waiter('read')
try:
yield from self._waiter
finally:
self._waiter = None
if n < 0 or len(self._buffer) <= n:
data = bytes(self._buffer)
self._buffer.clear()
else:
# n > 0 and len(self._buffer) > n
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data
@coroutine
def readexactly(self, n):
if self._exception is not None:
raise self._exception
# There used to be "optimized" code here. It created its own
# Future and waited until self._buffer had at least the n
# bytes, then called read(n). Unfortunately, this could pause
# the transport if the argument was larger than the pause
# limit (which is twice self._limit). So now we just read()
# into a local buffer.
blocks = []
while n > 0:
block = yield from self.read(n)
if not block:
partial = b''.join(blocks)
raise IncompleteReadError(partial, len(partial) + n)
blocks.append(block)
n -= len(block)
return b''.join(blocks)
| lgpl-3.0 |
YingHsuan/termite_data_server | web2py/applications-original/admin/controllers/webservices.py | 37 | 3569 | from gluon.admin import *
from gluon.fileutils import abspath, read_file, write_file
from gluon.tools import Service
from glob import glob
import shutil
import platform
import time
import base64
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
service = Service(globals())
@service.jsonrpc
def login():
"dummy function to test credentials"
return True
@service.jsonrpc
def list_apps():
"list installed applications"
regex = re.compile('^\w+$')
apps = [f for f in os.listdir(apath(r=request)) if regex.match(f)]
return apps
@service.jsonrpc
def list_files(app, pattern='.*\.py$'):
files = listdir(apath('%s/' % app, r=request), pattern)
return [x.replace('\\', '/') for x in files]
@service.jsonrpc
def read_file(filename, b64=False):
""" Visualize object code """
f = open(apath(filename, r=request), "rb")
try:
data = f.read()
if not b64:
data = data.replace('\r', '')
else:
data = base64.b64encode(data)
finally:
f.close()
return data
@service.jsonrpc
def write_file(filename, data, b64=False):
f = open(apath(filename, r=request), "wb")
try:
if not b64:
data = data.replace('\r\n', '\n').strip() + '\n'
else:
data = base64.b64decode(data)
f.write(data)
finally:
f.close()
@service.jsonrpc
def hash_file(filename):
data = read_file(filename)
file_hash = md5_hash(data)
path = apath(filename, r=request)
saved_on = os.stat(path)[stat.ST_MTIME]
size = os.path.getsize(path)
return dict(saved_on=saved_on, file_hash=file_hash, size=size)
@service.jsonrpc
def install(app_name, filename, data, overwrite=True):
f = StringIO(base64.b64decode(data))
installed = app_install(app_name, f, request, filename,
overwrite=overwrite)
return installed
@service.jsonrpc
def attach_debugger(host='localhost', port=6000, authkey='secret password'):
import gluon.contrib.qdb as qdb
import gluon.debug
from multiprocessing.connection import Listener
if isinstance(authkey, unicode):
authkey = authkey.encode('utf8')
if not hasattr(gluon.debug, 'qdb_listener'):
# create a remote debugger server and wait for connection
address = (host, port) # family is deduced to be 'AF_INET'
gluon.debug.qdb_listener = Listener(address, authkey=authkey)
gluon.debug.qdb_connection = gluon.debug.qdb_listener.accept()
# create the backend
gluon.debug.qdb_debugger = qdb.Qdb(gluon.debug.qdb_connection)
gluon.debug.dbg = gluon.debug.qdb_debugger
# welcome message (this should be displayed on the frontend)
print 'debugger connected to', gluon.debug.qdb_listener.last_accepted
return True # connection successful!
@service.jsonrpc
def detach_debugger():
import gluon.contrib.qdb as qdb
import gluon.debug
# stop current debugger
if gluon.debug.qdb_debugger:
try:
gluon.debug.qdb_debugger.do_quit()
except:
pass
if hasattr(gluon.debug, 'qdb_listener'):
if gluon.debug.qdb_connection:
gluon.debug.qdb_connection.close()
del gluon.debug.qdb_connection
if gluon.debug.qdb_listener:
gluon.debug.qdb_listener.close()
del gluon.debug.qdb_listener
gluon.debug.qdb_debugger = None
return True
def call():
session.forget()
return service()
| bsd-3-clause |
hucaloof/shadowsocks | shadowsocks/eventloop.py | 949 | 7288 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# from ssloop
# https://github.com/clowwindy/ssloop
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import time
import socket
import select
import errno
import logging
from collections import defaultdict
from shadowsocks import shell
__all__ = ['EventLoop', 'POLL_NULL', 'POLL_IN', 'POLL_OUT', 'POLL_ERR',
'POLL_HUP', 'POLL_NVAL', 'EVENT_NAMES']
POLL_NULL = 0x00
POLL_IN = 0x01
POLL_OUT = 0x04
POLL_ERR = 0x08
POLL_HUP = 0x10
POLL_NVAL = 0x20
EVENT_NAMES = {
POLL_NULL: 'POLL_NULL',
POLL_IN: 'POLL_IN',
POLL_OUT: 'POLL_OUT',
POLL_ERR: 'POLL_ERR',
POLL_HUP: 'POLL_HUP',
POLL_NVAL: 'POLL_NVAL',
}
# we check timeouts every TIMEOUT_PRECISION seconds
TIMEOUT_PRECISION = 10
class KqueueLoop(object):
MAX_EVENTS = 1024
def __init__(self):
self._kqueue = select.kqueue()
self._fds = {}
def _control(self, fd, mode, flags):
events = []
if mode & POLL_IN:
events.append(select.kevent(fd, select.KQ_FILTER_READ, flags))
if mode & POLL_OUT:
events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags))
for e in events:
self._kqueue.control([e], 0)
def poll(self, timeout):
if timeout < 0:
timeout = None # kqueue behaviour
events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout)
results = defaultdict(lambda: POLL_NULL)
for e in events:
fd = e.ident
if e.filter == select.KQ_FILTER_READ:
results[fd] |= POLL_IN
elif e.filter == select.KQ_FILTER_WRITE:
results[fd] |= POLL_OUT
return results.items()
def register(self, fd, mode):
self._fds[fd] = mode
self._control(fd, mode, select.KQ_EV_ADD)
def unregister(self, fd):
self._control(fd, self._fds[fd], select.KQ_EV_DELETE)
del self._fds[fd]
def modify(self, fd, mode):
self.unregister(fd)
self.register(fd, mode)
def close(self):
self._kqueue.close()
class SelectLoop(object):
def __init__(self):
self._r_list = set()
self._w_list = set()
self._x_list = set()
def poll(self, timeout):
r, w, x = select.select(self._r_list, self._w_list, self._x_list,
timeout)
results = defaultdict(lambda: POLL_NULL)
for p in [(r, POLL_IN), (w, POLL_OUT), (x, POLL_ERR)]:
for fd in p[0]:
results[fd] |= p[1]
return results.items()
def register(self, fd, mode):
if mode & POLL_IN:
self._r_list.add(fd)
if mode & POLL_OUT:
self._w_list.add(fd)
if mode & POLL_ERR:
self._x_list.add(fd)
def unregister(self, fd):
if fd in self._r_list:
self._r_list.remove(fd)
if fd in self._w_list:
self._w_list.remove(fd)
if fd in self._x_list:
self._x_list.remove(fd)
def modify(self, fd, mode):
self.unregister(fd)
self.register(fd, mode)
def close(self):
pass
class EventLoop(object):
def __init__(self):
if hasattr(select, 'epoll'):
self._impl = select.epoll()
model = 'epoll'
elif hasattr(select, 'kqueue'):
self._impl = KqueueLoop()
model = 'kqueue'
elif hasattr(select, 'select'):
self._impl = SelectLoop()
model = 'select'
else:
raise Exception('can not find any available functions in select '
'package')
self._fdmap = {} # (f, handler)
self._last_time = time.time()
self._periodic_callbacks = []
self._stopping = False
logging.debug('using event model: %s', model)
def poll(self, timeout=None):
events = self._impl.poll(timeout)
return [(self._fdmap[fd][0], fd, event) for fd, event in events]
def add(self, f, mode, handler):
fd = f.fileno()
self._fdmap[fd] = (f, handler)
self._impl.register(fd, mode)
def remove(self, f):
fd = f.fileno()
del self._fdmap[fd]
self._impl.unregister(fd)
def add_periodic(self, callback):
self._periodic_callbacks.append(callback)
def remove_periodic(self, callback):
self._periodic_callbacks.remove(callback)
def modify(self, f, mode):
fd = f.fileno()
self._impl.modify(fd, mode)
def stop(self):
self._stopping = True
def run(self):
events = []
while not self._stopping:
asap = False
try:
events = self.poll(TIMEOUT_PRECISION)
except (OSError, IOError) as e:
if errno_from_exception(e) in (errno.EPIPE, errno.EINTR):
# EPIPE: Happens when the client closes the connection
# EINTR: Happens when received a signal
# handles them as soon as possible
asap = True
logging.debug('poll:%s', e)
else:
logging.error('poll:%s', e)
import traceback
traceback.print_exc()
continue
for sock, fd, event in events:
handler = self._fdmap.get(fd, None)
if handler is not None:
handler = handler[1]
try:
handler.handle_event(sock, fd, event)
except (OSError, IOError) as e:
shell.print_exception(e)
now = time.time()
if asap or now - self._last_time >= TIMEOUT_PRECISION:
for callback in self._periodic_callbacks:
callback()
self._last_time = now
def __del__(self):
self._impl.close()
# from tornado
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instatiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
# from tornado
def get_sock_error(sock):
error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
return socket.error(error_number, os.strerror(error_number))
| apache-2.0 |
clintongormley/elasticsearch | dev-tools/get-bwc-version.py | 136 | 3149 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
'''
Downloads and extracts elasticsearch for backwards compatibility tests.
'''
import argparse
import os
import platform
import shutil
import subprocess
import urllib.request
import zipfile
def parse_config():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--path', metavar='DIR', default='./backwards',
help='Where to extract elasticsearch')
parser.add_argument('--force', action='store_true', default=False,
help='Delete and redownload if the version already exists')
parser.add_argument('version', metavar='X.Y.Z',
help='Version of elasticsearch to grab')
return parser.parse_args()
def main():
c = parse_config()
if not os.path.exists(c.path):
print('Creating %s' % c.path)
os.mkdir(c.path)
is_windows = platform.system() == 'Windows'
os.chdir(c.path)
version_dir = 'elasticsearch-%s' % c.version
if os.path.exists(version_dir):
if c.force:
print('Removing old download %s' % version_dir)
shutil.rmtree(version_dir)
else:
print('Version %s exists at %s' % (c.version, version_dir))
return
# before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts
if is_windows:
filename = '%s.zip' % version_dir
else:
filename = '%s.tar.gz' % version_dir
if c.version == '1.2.0':
# 1.2.0 was pulled from download.elasticsearch.org because of routing bug:
url = 'http://central.maven.org/maven2/org/elasticsearch/elasticsearch/1.2.0/%s' % filename
elif c.version.startswith('0.') or c.version.startswith('1.'):
url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename
else:
url = 'http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/%s/%s' % (c.version, filename)
print('Downloading %s' % url)
urllib.request.urlretrieve(url, filename)
print('Extracting to %s' % version_dir)
if is_windows:
archive = zipfile.ZipFile(filename)
archive.extractall()
else:
# for some reason python's tarfile module has trouble with ES tgz?
subprocess.check_call('tar -xzf %s' % filename, shell=True)
print('Cleaning up %s' % filename)
os.remove(filename)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Ctrl-C caught, exiting')
| apache-2.0 |
logituit/Recbot | PY/imdbpie/objects.py | 2 | 7777 | from __future__ import absolute_import, unicode_literals
class Person(object):
def __init__(self, data):
# primary attributes that should be set in all cases
self.name = self._extract_name(data)
self.imdb_id = self._extract_imdb_id(data)
self.photo_url = self._extract_photo_url(data)
# secondary attribs, will only get data when called via get_title_by_id
# token and label are the persons categorisation
# e.g token: writers label: Series writing credits
self.token = data.get('token')
self.label = data.get('label')
# attr is a note about this persons work
# e.g. (1990 - 1992 20 episodes)
self.attr = data.get('attr')
# other primary information about their part
self.roles = (
data.get('char').split('/') if data.get('char') else []
)
self.job = data.get('job')
@staticmethod
def _extract_name(data):
# Person object can given response of get_title_by_id
# or get_person_by_id call.
# This function covers the slight data structure differences
# to extract the name
name = data.get('name')
if isinstance(name, dict):
return name.get('name')
return name
@staticmethod
def _extract_imdb_id(data):
name = data.get('name')
if isinstance(name, dict):
return name.get('nconst')
return data.get('nconst')
@staticmethod
def _extract_photo_url(data):
photo_url = data.get('image', {}).get('url')
return photo_url
def __repr__(self):
return '<Person: {0} ({1})>'.format(repr(self.name),
repr(self.imdb_id))
def __unicode__(self):
return '<Person: {0} ({1})>'.format(self.name.encode('utf-8'),
self.imdb_id)
class Title(object):
def __init__(self, data):
self.imdb_id = data.get('tconst')
self.title = data.get('title')
self.type = data.get('type')
self.year = self._extract_year(data)
self.tagline = data.get('tagline')
self.plots = data.get('plots')
self.plot_outline = data.get('plot', {}).get('outline')
self.rating = data.get('rating')
self.genres = data.get('genres')
self.votes = data.get('num_votes')
self.runtime = data.get('runtime', {}).get('time')
self.poster_url = data.get('image', {}).get('url')
self.cover_url = self._extract_cover_url(data)
self.release_date = data.get('release_date', {}).get('normal')
self.certification = data.get('certificate', {}).get(
'certificate')
self.trailer_image_urls = self._extract_trailer_image_urls(data)
self.directors_summary = self._extract_directors_summary(data)
self.creators = self._extract_creators(data)
self.cast_summary = self._extract_cast_summary(data)
self.writers_summary = self._extract_writers_summary(data)
self.credits = self._extract_credits(data)
self.trailers = self._extract_trailers(data)
def _extract_directors_summary(self, data):
return [Person(p) for p in data.get('directors_summary', [])]
def _extract_creators(self, data):
return [Person(p) for p in data.get('creators', [])]
def _extract_trailers(self, data):
def build_dict(val):
return {'url': val['url'], 'format': val['format']}
trailers = data.get('trailer', {}).get('encodings', {}).values()
return [build_dict(trailer) for trailer in trailers]
def _extract_writers_summary(self, data):
return [Person(p) for p in data.get('writers_summary', [])]
def _extract_cast_summary(self, data):
return [Person(p) for p in data.get('cast_summary', [])]
def _extract_credits(self, data):
credits = []
if not data.get('credits'):
return []
for credit_group in data['credits']:
"""
Possible tokens: directors, cast, writers, producers and others
"""
for person in credit_group['list']:
person_extra = {
'token': credit_group.get('token'),
'label': credit_group.get('label'),
'job': person.get('job'),
'attr': person.get('attr')
}
person_data = person.copy()
person_data.update(person_extra)
if 'name' in person_data.keys():
# some 'special' credits such as script rewrites
# have different formatting.
# we skip those here, losing some data due to this check
credits.append(Person(person_data))
return credits
def _extract_year(self, data):
year = data.get('year')
# if there's no year the API returns ????...
if not year or year == '????':
return None
return int(year)
def _extract_cover_url(self, data):
if self.poster_url:
return '{0}_SX214_.jpg'.format(self.poster_url.replace('.jpg', ''))
def _extract_trailer_image_urls(self, data):
slates = data.get('trailer', {}).get('slates', [])
return [s['url'] for s in slates]
def __repr__(self):
return '<Title: {0} - {1}>'.format(repr(self.title),
repr(self.imdb_id))
def __unicode__(self):
return '<Title: {0} - {1}>'.format(self.title, self.imdb_id)
class Image(object):
def __init__(self, data):
self.caption = data.get('caption')
self.url = data.get('image', {}).get('url')
self.width = data.get('image', {}).get('width')
self.height = data.get('image', {}).get('height')
def __repr__(self):
return '<Image: {0}>'.format(repr(self.caption))
def __unicode__(self):
return '<Image: {0}>'.format(self.caption.encode('utf-8'))
class Episode(object):
def __init__(self, data):
self.imdb_id = data.get('tconst')
self.release_date = data.get('release_date', {}).get('normal')
self.title = data.get('title')
self.series_name = data.get('series_name')
self.type = data.get('type')
self.year = self._extract_year(data)
self.season = self._extract_season_episode(data.get('season'))
self.episode = self._extract_season_episode(data.get('episode'))
def _extract_season_episode(self, value):
return int(value) if value and value != 'unknown' else None
def _extract_year(self, data):
year = data.get('year')
# if there's no year the API returns ????...
if not year or year == '????':
return None
return int(year)
def __repr__(self):
return '<Episode: {0} - {1}>'.format(repr(self.title),
repr(self.imdb_id))
def __unicode__(self):
return '<Episode: {0} - {1}>'.format(self.title, self.imdb_id)
class Review(object):
def __init__(self, data):
self.username = data.get('user_name')
self.text = data.get('text')
self.date = data.get('date')
self.rating = data.get('user_rating')
self.summary = data.get('summary')
self.status = data.get('status')
self.user_location = data.get('user_location')
self.user_score = data.get('user_score')
self.user_score_count = data.get('user_score_count')
def __repr__(self):
return '<Review: {0}>'.format(repr(self.text[:20]))
def __unicode__(self):
return '<Review: {0}>'.format(self.text[:20].encode('utf-8'))
| mit |
Vixionar/django | django/contrib/gis/gdal/__init__.py | 327 | 2635 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import ( # NOQA
GDALException, OGRException, OGRIndexError, SRSException, check_err,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDALRaster', 'GDAL_VERSION', 'SpatialReference', 'CoordTransform',
'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
| bsd-3-clause |
jnerin/ansible | test/units/module_utils/gcp/test_utils.py | 127 | 15724 | # -*- coding: utf-8 -*-
# (c) 2016, Tom Melendez <tom@supertom.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from ansible.compat.tests import mock, unittest
from ansible.module_utils.gcp import check_min_pkg_version, GCPUtils, GCPInvalidURLError
def build_distribution(version):
obj = mock.MagicMock()
obj.version = '0.5.0'
return obj
class GCPUtilsTestCase(unittest.TestCase):
params_dict = {
'url_map_name': 'foo_url_map_name',
'description': 'foo_url_map description',
'host_rules': [
{
'description': 'host rules description',
'hosts': [
'www.example.com',
'www2.example.com'
],
'path_matcher': 'host_rules_path_matcher'
}
],
'path_matchers': [
{
'name': 'path_matcher_one',
'description': 'path matcher one',
'defaultService': 'bes-pathmatcher-one-default',
'pathRules': [
{
'service': 'my-one-bes',
'paths': [
'/',
'/aboutus'
]
}
]
},
{
'name': 'path_matcher_two',
'description': 'path matcher two',
'defaultService': 'bes-pathmatcher-two-default',
'pathRules': [
{
'service': 'my-two-bes',
'paths': [
'/webapp',
'/graphs'
]
}
]
}
]
}
@mock.patch("pkg_resources.get_distribution", side_effect=build_distribution)
def test_check_minimum_pkg_version(self, mockobj):
self.assertTrue(check_min_pkg_version('foobar', '0.4.0'))
self.assertTrue(check_min_pkg_version('foobar', '0.5.0'))
self.assertFalse(check_min_pkg_version('foobar', '0.6.0'))
def test_parse_gcp_url(self):
# region, resource, entity, method
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/regions/us-east1/instanceGroupManagers/my-mig/recreateInstances'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertEquals('us-east1', actual['region'])
self.assertEquals('instanceGroupManagers', actual['resource_name'])
self.assertEquals('my-mig', actual['entity_name'])
self.assertEquals('recreateInstances', actual['method_name'])
# zone, resource, entity, method
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/zones/us-east1-c/instanceGroupManagers/my-mig/recreateInstances'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertEquals('us-east1-c', actual['zone'])
self.assertEquals('instanceGroupManagers', actual['resource_name'])
self.assertEquals('my-mig', actual['entity_name'])
self.assertEquals('recreateInstances', actual['method_name'])
# global, resource
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertTrue('global' in actual)
self.assertTrue(actual['global'])
self.assertEquals('urlMaps', actual['resource_name'])
# global, resource, entity
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps/my-url-map'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('myproject', actual['project'])
self.assertTrue('global' in actual)
self.assertTrue(actual['global'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('compute', actual['service'])
# global URL, resource, entity, method_name
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/mybackendservice/getHealth'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertTrue('global' in actual)
self.assertTrue(actual['global'])
self.assertEquals('backendServices', actual['resource_name'])
self.assertEquals('mybackendservice', actual['entity_name'])
self.assertEquals('getHealth', actual['method_name'])
# no location in URL
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies/mytargetproxy/setUrlMap'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertFalse('global' in actual)
self.assertEquals('targetHttpProxies', actual['resource_name'])
self.assertEquals('mytargetproxy', actual['entity_name'])
self.assertEquals('setUrlMap', actual['method_name'])
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies/mytargetproxy'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertFalse('global' in actual)
self.assertEquals('targetHttpProxies', actual['resource_name'])
self.assertEquals('mytargetproxy', actual['entity_name'])
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertFalse('global' in actual)
self.assertEquals('targetHttpProxies', actual['resource_name'])
# test exceptions
no_projects_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject/global/backendServices/mybackendservice/getHealth'
no_resource_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject/global'
no_resource_no_loc_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject'
with self.assertRaises(GCPInvalidURLError) as cm:
GCPUtils.parse_gcp_url(no_projects_input_url)
self.assertTrue(cm.exception, GCPInvalidURLError)
with self.assertRaises(GCPInvalidURLError) as cm:
GCPUtils.parse_gcp_url(no_resource_input_url)
self.assertTrue(cm.exception, GCPInvalidURLError)
with self.assertRaises(GCPInvalidURLError) as cm:
GCPUtils.parse_gcp_url(no_resource_no_loc_input_url)
self.assertTrue(cm.exception, GCPInvalidURLError)
def test_params_to_gcp_dict(self):
expected = {
'description': 'foo_url_map description',
'hostRules': [
{
'description': 'host rules description',
'hosts': [
'www.example.com',
'www2.example.com'
],
'pathMatcher': 'host_rules_path_matcher'
}
],
'name': 'foo_url_map_name',
'pathMatchers': [
{
'defaultService': 'bes-pathmatcher-one-default',
'description': 'path matcher one',
'name': 'path_matcher_one',
'pathRules': [
{
'paths': [
'/',
'/aboutus'
],
'service': 'my-one-bes'
}
]
},
{
'defaultService': 'bes-pathmatcher-two-default',
'description': 'path matcher two',
'name': 'path_matcher_two',
'pathRules': [
{
'paths': [
'/webapp',
'/graphs'
],
'service': 'my-two-bes'
}
]
}
]
}
actual = GCPUtils.params_to_gcp_dict(self.params_dict, 'url_map_name')
self.assertEqual(expected, actual)
def test_get_gcp_resource_from_methodId(self):
input_data = 'compute.urlMaps.list'
actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
self.assertEqual('urlMaps', actual)
input_data = None
actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
self.assertFalse(actual)
input_data = 666
actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
self.assertFalse(actual)
def test_get_entity_name_from_resource_name(self):
input_data = 'urlMaps'
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual('urlMap', actual)
input_data = 'targetHttpProxies'
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual('targetHttpProxy', actual)
input_data = 'globalForwardingRules'
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual('forwardingRule', actual)
input_data = ''
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual(None, actual)
input_data = 666
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual(None, actual)
def test_are_params_equal(self):
params1 = {'one': 1}
params2 = {'one': 1}
actual = GCPUtils.are_params_equal(params1, params2)
self.assertTrue(actual)
params1 = {'one': 1}
params2 = {'two': 2}
actual = GCPUtils.are_params_equal(params1, params2)
self.assertFalse(actual)
params1 = {'three': 3, 'two': 2, 'one': 1}
params2 = {'one': 1, 'two': 2, 'three': 3}
actual = GCPUtils.are_params_equal(params1, params2)
self.assertTrue(actual)
params1 = {
"creationTimestamp": "2017-04-21T11:19:20.718-07:00",
"defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/default-backend-service",
"description": "",
"fingerprint": "ickr_pwlZPU=",
"hostRules": [
{
"description": "",
"hosts": [
"*."
],
"pathMatcher": "path-matcher-one"
}
],
"id": "8566395781175047111",
"kind": "compute#urlMap",
"name": "newtesturlmap-foo",
"pathMatchers": [
{
"defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/bes-pathmatcher-one-default",
"description": "path matcher one",
"name": "path-matcher-one",
"pathRules": [
{
"paths": [
"/data",
"/aboutus"
],
"service": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/my-one-bes"
}
]
}
],
"selfLink": "https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps/newtesturlmap-foo"
}
params2 = {
"defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/default-backend-service",
"hostRules": [
{
"description": "",
"hosts": [
"*."
],
"pathMatcher": "path-matcher-one"
}
],
"name": "newtesturlmap-foo",
"pathMatchers": [
{
"defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/bes-pathmatcher-one-default",
"description": "path matcher one",
"name": "path-matcher-one",
"pathRules": [
{
"paths": [
"/data",
"/aboutus"
],
"service": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/my-one-bes"
}
]
}
],
}
# params1 has exclude fields, params2 doesn't. Should be equal
actual = GCPUtils.are_params_equal(params1, params2)
self.assertTrue(actual)
def test_filter_gcp_fields(self):
input_data = {
u'kind': u'compute#httpsHealthCheck',
u'description': u'',
u'timeoutSec': 5,
u'checkIntervalSec': 5,
u'port': 443,
u'healthyThreshold': 2,
u'host': u'',
u'requestPath': u'/',
u'unhealthyThreshold': 2,
u'creationTimestamp': u'2017-05-16T15:09:36.546-07:00',
u'id': u'8727093129334146639',
u'selfLink': u'https://www.googleapis.com/compute/v1/projects/myproject/global/httpsHealthChecks/myhealthcheck',
u'name': u'myhealthcheck'}
expected = {
'name': 'myhealthcheck',
'checkIntervalSec': 5,
'port': 443,
'unhealthyThreshold': 2,
'healthyThreshold': 2,
'host': '',
'timeoutSec': 5,
'requestPath': '/'}
actual = GCPUtils.filter_gcp_fields(input_data)
self.assertEquals(expected, actual)
| gpl-3.0 |
jonashagstedt/tornado | demos/s3server/s3server.py | 13 | 9651 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an S3-like storage server based on local files.
Useful to test features that will eventually run on S3, or if you want to
run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
S3 client with this module:
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
c.create_bucket("mybucket")
c.put("mybucket", "mykey", "a value")
print c.get("mybucket", "mykey").body
"""
import bisect
import datetime
import hashlib
import os
import os.path
import urllib
from tornado import escape
from tornado import httpserver
from tornado import ioloop
from tornado import web
def start(port, root_directory="/tmp/s3", bucket_depth=0):
"""Starts the mock S3 server on the given port at the given path."""
application = S3Application(root_directory, bucket_depth)
http_server = httpserver.HTTPServer(application)
http_server.listen(port)
ioloop.IOLoop.instance().start()
class S3Application(web.Application):
"""Implementation of an S3-like storage server based on local files.
If bucket depth is given, we break files up into multiple directories
to prevent hitting file system limits for number of files in each
directories. 1 means one level of directories, 2 means 2, etc.
"""
def __init__(self, root_directory, bucket_depth=0):
web.Application.__init__(self, [
(r"/", RootHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/([^/]+)/", BucketHandler),
])
self.directory = os.path.abspath(root_directory)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.bucket_depth = bucket_depth
class BaseRequestHandler(web.RequestHandler):
SUPPORTED_METHODS = ("PUT", "GET", "DELETE")
def render_xml(self, value):
assert isinstance(value, dict) and len(value) == 1
self.set_header("Content-Type", "application/xml; charset=UTF-8")
name = value.keys()[0]
parts = []
parts.append('<' + escape.utf8(name) +
' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
self._render_parts(value.values()[0], parts)
parts.append('</' + escape.utf8(name) + '>')
self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
''.join(parts))
def _render_parts(self, value, parts=[]):
if isinstance(value, (unicode, bytes)):
parts.append(escape.xhtml_escape(value))
elif isinstance(value, int) or isinstance(value, long):
parts.append(str(value))
elif isinstance(value, datetime.datetime):
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
elif isinstance(value, dict):
for name, subvalue in value.iteritems():
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subsubvalue in subvalue:
parts.append('<' + escape.utf8(name) + '>')
self._render_parts(subsubvalue, parts)
parts.append('</' + escape.utf8(name) + '>')
else:
raise Exception("Unknown S3 value type %r", value)
def _object_path(self, bucket, object_name):
if self.application.bucket_depth < 1:
return os.path.abspath(os.path.join(
self.application.directory, bucket, object_name))
hash = hashlib.md5(object_name).hexdigest()
path = os.path.abspath(os.path.join(
self.application.directory, bucket))
for i in range(self.application.bucket_depth):
path = os.path.join(path, hash[:2 * (i + 1)])
return os.path.join(path, object_name)
class RootHandler(BaseRequestHandler):
def get(self):
names = os.listdir(self.application.directory)
buckets = []
for name in names:
path = os.path.join(self.application.directory, name)
info = os.stat(path)
buckets.append({
"Name": name,
"CreationDate": datetime.datetime.utcfromtimestamp(
info.st_ctime),
})
self.render_xml({"ListAllMyBucketsResult": {
"Buckets": {"Bucket": buckets},
}})
class BucketHandler(BaseRequestHandler):
def get(self, bucket_name):
prefix = self.get_argument("prefix", u"")
marker = self.get_argument("marker", u"")
max_keys = int(self.get_argument("max-keys", 50000))
path = os.path.abspath(os.path.join(self.application.directory,
bucket_name))
terse = int(self.get_argument("terse", 0))
if not path.startswith(self.application.directory) or \
not os.path.isdir(path):
raise web.HTTPError(404)
object_names = []
for root, dirs, files in os.walk(path):
for file_name in files:
object_names.append(os.path.join(root, file_name))
skip = len(path) + 1
for i in range(self.application.bucket_depth):
skip += 2 * (i + 1) + 1
object_names = [n[skip:] for n in object_names]
object_names.sort()
contents = []
start_pos = 0
if marker:
start_pos = bisect.bisect_right(object_names, marker, start_pos)
if prefix:
start_pos = bisect.bisect_left(object_names, prefix, start_pos)
truncated = False
for object_name in object_names[start_pos:]:
if not object_name.startswith(prefix):
break
if len(contents) >= max_keys:
truncated = True
break
object_path = self._object_path(bucket_name, object_name)
c = {"Key": object_name}
if not terse:
info = os.stat(object_path)
c.update({
"LastModified": datetime.datetime.utcfromtimestamp(
info.st_mtime),
"Size": info.st_size,
})
contents.append(c)
marker = object_name
self.render_xml({"ListBucketResult": {
"Name": bucket_name,
"Prefix": prefix,
"Marker": marker,
"MaxKeys": max_keys,
"IsTruncated": truncated,
"Contents": contents,
}})
def put(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or \
os.path.exists(path):
raise web.HTTPError(403)
os.makedirs(path)
self.finish()
def delete(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or \
not os.path.isdir(path):
raise web.HTTPError(404)
if len(os.listdir(path)) > 0:
raise web.HTTPError(403)
os.rmdir(path)
self.set_status(204)
self.finish()
class ObjectHandler(BaseRequestHandler):
def get(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if not path.startswith(self.application.directory) or \
not os.path.isfile(path):
raise web.HTTPError(404)
info = os.stat(path)
self.set_header("Content-Type", "application/unknown")
self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
info.st_mtime))
object_file = open(path, "rb")
try:
self.finish(object_file.read())
finally:
object_file.close()
def put(self, bucket, object_name):
object_name = urllib.unquote(object_name)
bucket_dir = os.path.abspath(os.path.join(
self.application.directory, bucket))
if not bucket_dir.startswith(self.application.directory) or \
not os.path.isdir(bucket_dir):
raise web.HTTPError(404)
path = self._object_path(bucket, object_name)
if not path.startswith(bucket_dir) or os.path.isdir(path):
raise web.HTTPError(403)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
object_file = open(path, "w")
object_file.write(self.request.body)
object_file.close()
self.finish()
def delete(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if not path.startswith(self.application.directory) or \
not os.path.isfile(path):
raise web.HTTPError(404)
os.unlink(path)
self.set_status(204)
self.finish()
| apache-2.0 |
DevelopForLizardz/Liber | Liber/dist/Liber.app/Contents/Resources/__main__.py | 2 | 1070 | __author__ = 'Ryan Drew'
import logging
import logging.config
import wx
logging.config.dictConfig({
'version': 1,
'propagate': False,
'disable_existing_loggers': False,
'formatters': {
'file': {
'format': '%(levelname)s:%(asctime)s:%(name)s:%(message)s',
'datefmt': "%m/%d/%Y %I:%M:%S %p"
},
'stream': {
'format': '%(levelname)s:%(name)s:%(message)s'
}
},
'handlers': {
'stream': {
'level': 'INFO',
'formatter': 'stream',
'class': 'logging.StreamHandler'
},
'file': {
'level': 'DEBUG',
'formatter': 'file',
'class': 'logging.FileHandler',
'filename': 'log.txt'
}
},
'loggers': {
'': {
'handlers': ['file', 'stream'],
'level': 'DEBUG',
},
}
})
logger = logging.getLogger(__name__)
wxApp = wx.App()
import GUI
wxFrame = GUI.MainFrame(None)
wxApp.MainLoop()
# OSError raised when ffmpeg cannot be found
| mit |
tensorflow/ranking | tensorflow_ranking/python/metrics_impl_test.py | 1 | 60642 | # Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ranking metrics implementation."""
import math
import tensorflow as tf
from tensorflow_ranking.python import metrics_impl
def log2p1(x):
return math.log2(1. + x)
class MRRMetricTest(tf.test.TestCase):
def test_mrr_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 1.]]
metric = metrics_impl.MRRMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1. / 2.]])
def test_mrr_should_be_0_when_no_rel_item(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.MRRMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_mrr_should_be_0_when_no_rel_item_in_topn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 1.]]
metric = metrics_impl.MRRMetric(name=None, topn=1)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_mrr_should_handle_topn(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 2., 1.], [3., 2., 1.]]
labels = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]
metric_top1 = metrics_impl.MRRMetric(name=None, topn=1)
metric_top2 = metrics_impl.MRRMetric(name=None, topn=2)
metric_top6 = metrics_impl.MRRMetric(name=None, topn=6)
output_top1, _ = metric_top1.compute(labels, scores, None)
output_top2, _ = metric_top2.compute(labels, scores, None)
output_top6, _ = metric_top6.compute(labels, scores, None)
self.assertAllClose(output_top1, [[1.], [0.], [0.]])
self.assertAllClose(output_top2, [[1.], [1. / 2.], [0.]])
self.assertAllClose(output_top6, [[1.], [1. / 2.], [1. / 3.]])
def test_mrr_should_ignore_padded_labels(self):
with tf.Graph().as_default():
scores = [[1., 2., 3.]]
labels = [[0., 1., -1.]]
metric = metrics_impl.MRRMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1.]])
def test_mrr_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[1., 2., 3.]]
labels = [[0., 1., 0.]]
mask = [[True, True, False]]
metric = metrics_impl.MRRMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None, mask=mask)
self.assertAllClose(output, [[1.]])
def test_mrr_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[1., 2., 3.], [1., 2.]])
labels = tf.ragged.constant([[0., 1., 0.], [0., 1.]])
metric = metrics_impl.MRRMetric(name=None, topn=None, ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[1. / 2.], [1.]])
def test_mrr_should_give_a_value_for_each_list_in_batch_inputs(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.], [1., 2., 3.]]
labels = [[0., 0., 1.], [0., 1., 1.]]
metric = metrics_impl.MRRMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1. / 2.], [1.]])
def test_mrr_weights_should_be_average_weight_of_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.], [1., 2., 3.]]
labels = [[1., 0., 0.], [0., 1., 1.]]
weights = [[2., 5., 1.], [1., 2., 3.]]
metric = metrics_impl.MRRMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[2.], [(2. + 3.) / 2.]])
def test_mrr_weights_should_be_0_without_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
weights = [[2., 5., 1.]]
metric = metrics_impl.MRRMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
def test_mrr_weights_should_be_regardless_of_topn(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [1., 3., 2.]]
labels = [[1., 0., 1.], [0., 1., 1.]]
weights = [[2., 0., 5.], [1., 4., 2.]]
metric = metrics_impl.MRRMetric(name=None, topn=2)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(5. + 2.) / 2.], [(2. + 4.) / 2.]])
class ARPMetricTest(tf.test.TestCase):
def test_arp_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 1.]]
metric = metrics_impl.ARPMetric(name=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[2.]])
def test_arp_should_be_single_value_per_list(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.], [1., 2., 3.]]
labels = [[0., 0., 1.], [0., 1., 2.]]
metric = metrics_impl.ARPMetric(name=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[2.],
[((1. * 2.) + (2. * 1.)) / (2. + 1.)]])
def test_arp_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.ARPMetric(name=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_arp_should_ignore_padded_items(self):
with tf.Graph().as_default():
scores = [[1., 5., 4., 3., 2.]]
labels = [[1., -1., 1., -1., 0.]]
metric = metrics_impl.ARPMetric(name=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[2.]])
def test_arp_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[1., 5., 4., 3., 2.]]
labels = [[1., 0., 1., 1., 0.]]
mask = [[True, False, True, False, True]]
metric = metrics_impl.ARPMetric(name=None)
output, _ = metric.compute(labels, scores, None, mask=mask)
self.assertAllClose(output, [[2.]])
def test_arp_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[1., 3., 2., 4.], [1., 2., 3.]])
labels = tf.ragged.constant([[0., 0., 1., 0.], [0., 1., 2.]])
metric = metrics_impl.ARPMetric(name=None, ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[3.],
[((1. * 2.) + (2. * 1.)) / (2. + 1.)]])
def test_arp_should_weight_items_with_weights_and_labels(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.], [1., 2., 3.]]
labels = [[0., 0., 1.], [0., 1., 2.]]
weights = [[1., 2., 3.], [4., 5., 6.]]
metric = metrics_impl.ARPMetric(name=None)
output, _ = metric.compute(labels, scores, weights)
self.assertAllClose(
output,
[[(2.)], [(2. * (5. / (5. + 6. * 2.)) +
1. * (6. * 2. / (5. + 6. * 2.)))]])
def test_arp_weights_should_be_sum_of_weighted_labels(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.], [1., 2., 3.]]
labels = [[0., 0., 1.], [0., 1., 2.]]
weights = [[1., 2., 3.], [4., 5., 6.]]
metric = metrics_impl.ARPMetric(name=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[3.],
[5. + 6. * 2.]])
class RecallMetricTest(tf.test.TestCase):
def test_recall_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 1.]]
metric = metrics_impl.RecallMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1.]])
def test_recall_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.RecallMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_recall_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[5., 4., 3., 2., 1.]]
labels = [[0., 1., 1., 0., 1.]]
mask = [[True, False, True, True, True]]
metric = metrics_impl.RecallMetric(name=None, topn=3)
output, _ = metric.compute(labels, scores, None, mask=mask)
self.assertAllClose(output, [[1. / 2.]])
def test_recall_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[1., 3., 2., 4.], [1., 2., 3.]])
labels = tf.ragged.constant([[0., 0., 1., 0.], [1., 0., 2.]])
metric = metrics_impl.RecallMetric(name=None, topn=2, ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[0. / 1.], [1. / 2.]])
def test_recall_should_handle_topn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 1.]]
metric_top1 = metrics_impl.RecallMetric(name=None, topn=1)
metric_top2 = metrics_impl.RecallMetric(name=None, topn=2)
metric_top6 = metrics_impl.RecallMetric(name=None, topn=6)
output_top1, _ = metric_top1.compute(labels, scores, None)
output_top2, _ = metric_top2.compute(labels, scores, None)
output_top6, _ = metric_top6.compute(labels, scores, None)
self.assertAllClose(output_top1, [[0.]])
self.assertAllClose(output_top2, [[1.]])
self.assertAllClose(output_top6, [[1.]])
def test_recall_should_be_single_value_per_list(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.], [1., 3., 4.]]
labels = [[1., 0., 1.], [0., 1., 1.]]
metric = metrics_impl.RecallMetric(name=None, topn=2)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1. / 2.], [1.]])
def test_recall_weights_should_be_avg_of_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 1., 0.]]
weights = [[3., 9., 2.]]
metric = metrics_impl.RecallMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 9.) / 2.]])
def test_recall_weights_should_ignore_graded_relevance(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[4., 0., 2.]]
weights = [[3., 9., 2.]]
metric = metrics_impl.RecallMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 2.) / 2.]])
def test_recall_weights_should_ignore_topn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 1., 0.]]
weights = [[3., 9., 2.]]
metric = metrics_impl.RecallMetric(name=None, topn=1)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 9.) / 2.]])
def test_recall_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.RecallMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, None)
self.assertAllClose(output_weights, [[0.]])
class PrecisionMetricTest(tf.test.TestCase):
def test_precision_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 1.]]
metric = metrics_impl.PrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1. / 3.]])
def test_precision_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.PrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_precision_should_be_single_value_per_list(self):
with tf.Graph().as_default():
scores = [[1., 3., 2., 4.], [4., 1., 3., 2.]]
labels = [[0., 0., 1., 1.], [0., 0., 1., 0.]]
metric = metrics_impl.PrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[2. / 4.], [1. / 4.]])
def test_precision_should_handle_topn(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 2., 1.], [3., 2., 1.]]
labels = [[1., 0., 1.], [0., 1., 0.], [0., 0., 1.]]
metric_top1 = metrics_impl.PrecisionMetric(name=None, topn=1)
metric_top2 = metrics_impl.PrecisionMetric(name=None, topn=2)
metric_top6 = metrics_impl.PrecisionMetric(name=None, topn=6)
output_top1, _ = metric_top1.compute(labels, scores, None)
output_top2, _ = metric_top2.compute(labels, scores, None)
output_top6, _ = metric_top6.compute(labels, scores, None)
self.assertAllClose(output_top1, [[1. / 1.], [0. / 1.], [0. / 1.]])
self.assertAllClose(output_top2, [[1. / 2.], [1. / 2.], [0. / 2.]])
self.assertAllClose(output_top6, [[2. / 3.], [1. / 3.], [1. / 3.]])
def test_precision_should_ignore_padded_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2., 4.], [4., 1., 3., 2.]]
labels = [[0., 0., 1., -1.], [0., -1., 1., -1.]]
metric = metrics_impl.PrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1. / 3.], [1. / 2.]])
def test_precision_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2., 4.], [4., 1., 3., 2.]]
labels = [[0., 0., 1., 0.], [0., 1., 1., 0.]]
mask = [[True, True, True, False], [True, False, True, False]]
metric = metrics_impl.PrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None, mask=mask)
self.assertAllClose(output, [[1. / 3.], [1. / 2.]])
def test_precision_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[1., 3., 2., 4.], [1., 2., 3.]])
labels = tf.ragged.constant([[0., 0., 1., 0.], [1., 0., 2.]])
metric = metrics_impl.PrecisionMetric(name=None, topn=None, ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[1. / 4.], [2. / 3.]])
def test_precision_weights_should_be_avg_of_weights_of_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[13., 7., 29.]]
metric = metrics_impl.PrecisionMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(13. + 29.) / 2.]])
def test_precision_weights_should_ignore_topn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 1., 0.]]
weights = [[3., 7., 15.]]
metric = metrics_impl.PrecisionMetric(name=None, topn=1)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 7.) / 2.]])
def test_precision_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
weights = [[3., 7., 15.]]
metric = metrics_impl.PrecisionMetric(name=None, topn=1)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
class MeanAveragePrecisionMetricTest(tf.test.TestCase):
def test_map_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 1., 0.]]
metric = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(1. / 2.) / 1.]])
def test_map_should_treat_graded_relevance_as_binary_relevance(self):
with tf.Graph().as_default():
scores = [[3., 4., 1., 2.]]
labels = [[0., 2., 1., 3.]]
metric = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(1. + 2. / 3. + 3. / 4.) / 3.]])
def test_map_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_map_should_be_single_value_per_list(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.], [1., 3., 2.]]
labels = [[0., 0., 1.], [0., 1., 1.]]
metric = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(1. / 2.) / 1.],
[(1. / 1. + 2. / 2.) / 2.]])
def test_map_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[1., 4., 3., 2.], [1., 3., 2.]])
labels = tf.ragged.constant([[0., 0., 1., 0.], [1., 1., 0.]])
metric = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=None,
ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[(1. / 2.) / 1.],
[(1. / 1. + 2. / 3.) / 2.]])
def test_map_should_handle_topn(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 2., 1.], [3., 2., 1.]]
labels = [[1., 0., 2.], [0., 1., 0.], [0., 0., 1.]]
metric_top1 = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=1)
metric_top2 = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=2)
metric_top6 = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=6)
output_top1, _ = metric_top1.compute(labels, scores, None)
output_top2, _ = metric_top2.compute(labels, scores, None)
output_top6, _ = metric_top6.compute(labels, scores, None)
self.assertAllClose(output_top1, [[1. / 2.],
[0. / 1.],
[0. / 1.]])
self.assertAllClose(output_top2, [[1. / 2.],
[(1. / 2.) / 1.],
[0. / 1.]])
self.assertAllClose(output_top6, [[(1. + 2. / 3.) / 2.],
[(1. / 2.) / 1.],
[(1. / 3.) / 1.]])
def test_map_weights_should_be_avg_of_weights_of_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[13., 7., 29.]]
metric = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(13. + 29.) / 2.]])
def test_map_weights_should_ignore_topn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 1., 0.]]
weights = [[3., 7., 15.]]
metric = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=1)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 7.) / 2.]])
def test_map_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
weights = [[3., 7., 15.]]
metric = metrics_impl.MeanAveragePrecisionMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
class NDCGMetricTest(tf.test.TestCase):
def test_ndcg_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 1., 0.]]
metric = metrics_impl.NDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
dcg = 1. / log2p1(2.)
max_dcg = 1. / log2p1(1.)
self.assertAllClose(output, [[dcg / max_dcg]])
def test_ndcg_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.NDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_ndcg_should_operate_on_graded_relevance(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[0., 3., 1., 0.]]
metric = metrics_impl.NDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
dcg = (2. ** 3. - 1.) / log2p1(2.) + 1. / log2p1(3.)
max_dcg = (2. ** 3. - 1.) / log2p1(1.) + 1. / log2p1(2.)
self.assertAllClose(output, [[dcg / max_dcg]])
def test_ndcg_should_operate_on_graded_relevance_with_custom_gain_fn(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[0., 3., 1., 0.]]
gain_fn = lambda label: label / 2.
metric = metrics_impl.NDCGMetric(name=None, topn=None, gain_fn=gain_fn)
output, _ = metric.compute(labels, scores, None)
dcg = (3. / 2.) / log2p1(2.) + (1. / 2.) / log2p1(3.)
max_dcg = (3. / 2.) / log2p1(1.) + (1. / 2.) / log2p1(2.)
self.assertAllClose(output, [[dcg / max_dcg]])
def test_ndcg_should_use_custom_rank_discount_fn(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[0., 3., 1., 0.]]
rank_discount_fn = lambda rank: 1.0 / (rank + 10.0)
metric = metrics_impl.NDCGMetric(name=None, topn=None,
rank_discount_fn=rank_discount_fn)
output, _ = metric.compute(labels, scores, None)
dcg = (2. ** 3. - 1.) / (2. + 10.) + 1. / (3. + 10.)
max_dcg = (2. ** 3. - 1.) / (1. + 10.) + 1. / (2. + 10.)
self.assertAllClose(output, [[dcg / max_dcg]])
def test_ndcg_should_ignore_padded_items(self):
with tf.Graph().as_default():
scores = [[1., 4., 3., 2.]]
labels = [[2., -1., 1., 0.]]
metric = metrics_impl.NDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
dcg = (2. ** 2. - 1.) / log2p1(3.) + 1. / log2p1(1.)
max_dcg = (2. ** 2. - 1.) / log2p1(1.) + 1. / log2p1(2.)
self.assertAllClose(output, [[dcg / max_dcg]])
def test_ndcg_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[1., 4., 3., 2.]]
labels = [[2., 2., 1., 0.]]
mask = [[True, False, True, True]]
metric = metrics_impl.NDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None, mask=mask)
dcg = (2. ** 2. - 1.) / log2p1(3.) + 1. / log2p1(1.)
max_dcg = (2. ** 2. - 1.) / log2p1(1.) + 1. / log2p1(2.)
self.assertAllClose(output, [[dcg / max_dcg]])
def test_ndcg_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[3., 2., 1.], [4., 1., 2., 3.]])
labels = tf.ragged.constant([[0., 1., 0.], [1., 1., 0., 0.]])
metric = metrics_impl.NDCGMetric(name=None, topn=None, ragged=True)
output, _ = metric.compute(labels, scores)
dcg = [1. / log2p1(2.), 1. / log2p1(1.) + 1. / log2p1(4.)]
max_dcg = [1. / log2p1(1.), 1. / log2p1(1.) + 1. / log2p1(2.)]
self.assertAllClose(output,
[[dcg[0] / max_dcg[0]], [dcg[1] / max_dcg[1]]])
def test_ndcg_should_be_single_value_per_list(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 1., 2.]]
labels = [[0., 1., 0.], [1., 1., 0.]]
metric = metrics_impl.NDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
dcg = [1. / log2p1(2.), 1. / log2p1(1.) + 1. / log2p1(3.)]
max_dcg = [1. / log2p1(1.), 1. / log2p1(1.) + 1. / log2p1(2.)]
self.assertAllClose(output,
[[dcg[0] / max_dcg[0]], [dcg[1] / max_dcg[1]]])
def test_ndcg_should_handle_topn(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 2., 1.], [3., 2., 1.]]
labels = [[1., 0., 2.], [0., 1., 0.], [0., 0., 1.]]
metric_top1 = metrics_impl.NDCGMetric(name=None, topn=1)
metric_top2 = metrics_impl.NDCGMetric(name=None, topn=2)
metric_top6 = metrics_impl.NDCGMetric(name=None, topn=6)
output_top1, _ = metric_top1.compute(labels, scores, None)
output_top2, _ = metric_top2.compute(labels, scores, None)
output_top6, _ = metric_top6.compute(labels, scores, None)
max_dcg_top1 = [(2. ** 2. - 1.) / log2p1(1.),
1. / log2p1(1.),
1. / log2p1(1.)]
max_dcg = [(2. ** 2. - 1.) / log2p1(1.) + 1. / log2p1(2.),
1. / log2p1(1.),
1. / log2p1(1.)]
self.assertAllClose(output_top1,
[[(1. / log2p1(1.)) / max_dcg_top1[0]],
[0. / max_dcg_top1[1]],
[0. / max_dcg_top1[2]]])
self.assertAllClose(output_top2,
[[(1. / log2p1(1.)) / max_dcg[0]],
[(1. / log2p1(2.)) / max_dcg[1]],
[0. / max_dcg[2]]])
self.assertAllClose(output_top6,
[[(1. / log2p1(1.) + (2. ** 2. - 1.) / log2p1(3.)) /
max_dcg[0]],
[(1. / log2p1(2.)) / max_dcg[1]],
[(1. / log2p1(3.)) / max_dcg[2]]])
def test_ndcg_weights_should_be_average_of_weighted_gain(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[3., 7., 9.]]
metric = metrics_impl.NDCGMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(
output_weights,
[[(1. * 3. + (2. ** 2. - 1.) * 9.) / (1. + (2. ** 2. - 1.))]])
def test_ndcg_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
weights = [[2., 4., 4.]]
metric = metrics_impl.NDCGMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
def test_ndcg_weights_should_use_custom_gain_fn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[3., 7., 9.]]
gain_fn = lambda label: label + 5.
metric = metrics_impl.NDCGMetric(name=None, topn=None, gain_fn=gain_fn)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(
output_weights,
[[((1. + 5.) * 3. + (0. + 5.) * 7. + (2. + 5.) * 9.) /
((1. + 5.) + (0. + 5.) + (2. + 5.))]])
class DCGMetricTest(tf.test.TestCase):
def test_dcg_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 1., 0.]]
metric = metrics_impl.DCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1. / log2p1(2.)]])
def test_dcg_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.DCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_dcg_should_operate_on_graded_relevance(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[0., 3., 1., 0.]]
metric = metrics_impl.DCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output,
[[(2. ** 3. - 1.) / log2p1(2.) + 1. / log2p1(3.)]])
def test_dcg_should_operate_on_graded_relevance_with_custom_gain_fn(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[0., 3., 1., 0.]]
gain_fn = lambda label: label / 2.
metric = metrics_impl.DCGMetric(name=None, topn=None, gain_fn=gain_fn)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output,
[[(3. / 2.) / log2p1(2.) + (1. / 2.) / log2p1(3.)]])
def test_dcg_should_use_custom_rank_discount_fn(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[0., 3., 1., 0.]]
rank_discount_fn = lambda rank: 1.0 / (rank + 10.0)
metric = metrics_impl.DCGMetric(name=None, topn=None,
rank_discount_fn=rank_discount_fn)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output,
[[(2. ** 3. - 1.) / (2. + 10.) + 1. / (3. + 10.)]])
def test_dcg_should_ignore_padded_items(self):
with tf.Graph().as_default():
scores = [[1., 4., 3., 2.]]
labels = [[2., -1., 1., 0.]]
metric = metrics_impl.DCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output,
[[(2. ** 2. - 1.) / log2p1(3.) + 1. / log2p1(1.)]])
def test_dcg_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[1., 4., 3., 2.]]
labels = [[2., 2., 1., 0.]]
mask = [[True, False, True, True]]
metric = metrics_impl.DCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None, mask=mask)
self.assertAllClose(output,
[[(2. ** 2. - 1.) / log2p1(3.) + 1. / log2p1(1.)]])
def test_dcg_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[3., 2., 1.], [4., 1., 2., 3.]])
labels = tf.ragged.constant([[0., 1., 0.], [1., 1., 0., 0.]])
metric = metrics_impl.DCGMetric(name=None, topn=None, ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[1. / log2p1(2.)],
[1. / log2p1(1.) + 1. / log2p1(4.)]])
def test_dcg_should_be_single_value_per_list(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 1., 2.]]
labels = [[0., 1., 0.], [1., 1., 0.]]
metric = metrics_impl.DCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1. / log2p1(2.)],
[1. / log2p1(1.) + 1. / log2p1(3.)]])
def test_dcg_should_handle_topn(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 2., 1.], [3., 2., 1.]]
labels = [[1., 0., 2.], [0., 1., 0.], [0., 0., 1.]]
metric_top1 = metrics_impl.DCGMetric(name=None, topn=1)
metric_top2 = metrics_impl.DCGMetric(name=None, topn=2)
metric_top6 = metrics_impl.DCGMetric(name=None, topn=6)
output_top1, _ = metric_top1.compute(labels, scores, None)
output_top2, _ = metric_top2.compute(labels, scores, None)
output_top6, _ = metric_top6.compute(labels, scores, None)
self.assertAllClose(output_top1, [[(1. / log2p1(1.))], [0.], [0.]])
self.assertAllClose(output_top2,
[[(1. / log2p1(1.))], [(1. / log2p1(2.))], [0.]])
self.assertAllClose(output_top6,
[[(1. / log2p1(1.) + (2. ** 2. - 1.) / log2p1(3.))],
[(1. / log2p1(2.))],
[(1. / log2p1(3.))]])
def test_dcg_weights_should_be_average_of_weighted_gain(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[3., 7., 9.]]
metric = metrics_impl.DCGMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(
output_weights,
[[(1. * 3. + (2. ** 2. - 1.) * 9.) / (1. + (2. ** 2. - 1.))]])
def test_dcg_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
weights = [[2., 4., 4.]]
metric = metrics_impl.DCGMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
def test_dcg_weights_should_use_custom_gain_fn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[4., 1., 9.]]
gain_fn = lambda label: label + 3.
metric = metrics_impl.DCGMetric(name=None, topn=None, gain_fn=gain_fn)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(
output_weights,
[[((1. + 3.) * 4. + (0. + 3.) * 1. + (2. + 3.) * 9.) /
((1. + 3.) + (0. + 3.) + (2. + 3.))]])
class OPAMetricTest(tf.test.TestCase):
def test_opa_should_return_correct_pair_matrix(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 1., 0.]]
metric = metrics_impl.OPAMetric(name=None)
output, output_weights = metric.compute(labels, scores, None)
# The correctly ordered pair is:
# scores[1] > scores[2]
self.assertAllClose(output, [[1. / 2.]])
self.assertAllClose(output_weights, [[2.]])
def test_opa_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.OPAMetric(name=None)
output, output_weights = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
self.assertAllClose(output_weights, [[0.]])
def test_opa_should_operate_on_graded_relevance(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[1., 3., 0., 1.]]
metric = metrics_impl.OPAMetric(name=None)
output, output_weights = metric.compute(labels, scores, None)
# The correctly ordered pairs are:
# scores[0] > scores[2]
# scores[1] > scores[2], scores[1] > scores[3]
self.assertAllClose(output, [[3. / 5.]])
self.assertAllClose(output_weights, [[5.]])
def test_opa_should_ignore_padded_items(self):
with tf.Graph().as_default():
scores = [[4., 1., 2., 3.]]
labels = [[2., -1., 1., 0.]]
metric = metrics_impl.OPAMetric(name=None)
output, output_weights = metric.compute(labels, scores, None)
# The correctly ordered pairs are:
# scores[0] > scores[2], scores[0] > scores[3]
self.assertAllClose(output, [[2. / 3.]])
self.assertAllClose(output_weights, [[3.]])
def test_opa_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[4., 1., 2., 3.]]
labels = [[2., 1., 1., 0.]]
mask = [[True, False, True, True]]
metric = metrics_impl.OPAMetric(name=None)
output, output_weights = metric.compute(labels, scores, None, mask=mask)
# The correctly ordered pairs are:
# scores[0] > scores[2], scores[0] > scores[3]
self.assertAllClose(output, [[2. / 3.]])
self.assertAllClose(output_weights, [[3.]])
def test_opa_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[3., 2., 1.], [4., 1., 2., 3.]])
labels = tf.ragged.constant([[0., 1., 0.], [1., 0., 1., 0.]])
metric = metrics_impl.OPAMetric(name=None, ragged=True)
output, output_weights = metric.compute(labels, scores)
# The correctly ordered pairs are:
# list 1: scores[1] > scores[2]
# list 2: scores[0] > scores[1], scores[0] > scores[3],
# scores[2] > scores[1]
self.assertAllClose(output, [[1. / 2.], [3. / 4.]])
self.assertAllClose(output_weights, [[2.], [4.]])
def test_opa_should_return_correct_pair_matrix_per_list(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 1., 2.]]
labels = [[0., 1., 0.], [1., 0., 1.]]
metric = metrics_impl.OPAMetric(name=None)
output, output_weights = metric.compute(labels, scores, None)
# The correctly ordered pairs are:
# list 1: scores[1] > scores[2]
# list 2: scores[0] > scores[1], scores[2] > scores[1]
self.assertAllClose(output, [[1. / 2.], [2. / 2.]])
self.assertAllClose(output_weights, [[2.], [2.]])
def test_opa_should_weight_pairs_with_weights(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[3., 7., 9.]]
metric = metrics_impl.OPAMetric(name=None)
output, _ = metric.compute(labels, scores, weights)
# The correctly ordered pair is:
# scores[2] > scores[0] (with weight 9.)
self.assertAllClose(output, [[9. / (9. + 9. + 3.)]])
def test_opa_weights_should_be_sum_of_pair(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[3., 7., 9.]]
metric = metrics_impl.OPAMetric(name=None)
_, output_weights = metric.compute(labels, scores, weights)
# The OPA weights are based on the label pairs:
# labels[0] > labels[1] (with weight 3.)
# labels[2] > labels[0] (with weight 9.)
# labels[2] > labels[1] (with weight 9.)
self.assertAllClose(output_weights, [[9. + 9. + 3.]])
def test_opa_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
weights = [[2., 4., 4.]]
metric = metrics_impl.OPAMetric(name=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
class PrecisionIAMetricTest(tf.test.TestCase):
def test_precisionia_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 0.], [1., 0.], [0., 1.]]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[2. / (2. * 3.)]])
def test_precisionia_should_handle_single_subtopic(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0.], [1.], [0.]]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1. / (1. * 3.)]])
def test_precisionia_should_handle_multiple_subtopics(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 0., 1., 0.], [1., 1., 1., 1.], [0., 1., 1., 0.]]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[7. / (4. * 3.)]])
def test_precisionia_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 4., 2.]]
labels = [[[0., 0.], [1., 0.], [1., 1.], [0., 1.]]]
mask = [[True, True, False, True]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None, mask=mask)
self.assertAllClose(output, [[2. / (2. * 3.)]])
def test_precisionia_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[1., 3., 4., 2.],
[1., 3., 2.]])
labels = tf.ragged.constant([[[0., 0.], [1., 0.], [1., 1.], [0., 1.]],
[[0., 0.], [1., 0.], [0., 1.]]],
inner_shape=(2,))
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None, ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[1. / 2.],
[2. / (2. * 3.)]])
def test_precisionia_should_ignore_subtopics_without_rel(self):
with tf.Graph().as_default():
scores = [[1., 3., 2., 4.]]
labels = [[[0., 0.], [0., 1.], [0., 1.], [0., 0.]]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[2. / (1. * 4.)]])
def test_precisionia_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 0.], [0., 0.], [0., 0.]]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_precisionia_should_be_single_value_per_list(self):
with tf.Graph().as_default():
scores = [[1., 3., 2., 4.], [4., 1., 3., 2.]]
labels = [[[0., 0.], [0., 0.], [1., 1.], [1., 0.]],
[[1., 0.], [1., 1.], [1., 0.], [0., 1.]]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[3. / (2. * 4.)], [5. / (2. * 4.)]])
def test_precisionia_should_handle_topn(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.], [3., 2., 1.], [3., 2., 1.], [3., 2., 1.]]
labels = [[[1., 1.], [0., 0.], [1., 0.]],
[[0., 0.], [0., 1.], [1., 0.]],
[[0., 1.], [0., 0.], [1., 0.]],
[[1., 1.], [1., 1.], [1., 1.]]]
metric_top1 = metrics_impl.PrecisionIAMetric(name=None, topn=1)
metric_top2 = metrics_impl.PrecisionIAMetric(name=None, topn=2)
metric_top6 = metrics_impl.PrecisionIAMetric(name=None, topn=6)
output_top1, _ = metric_top1.compute(labels, scores, None)
output_top2, _ = metric_top2.compute(labels, scores, None)
output_top6, _ = metric_top6.compute(labels, scores, None)
self.assertAllClose(output_top1, [[2. / (2. * 1.)],
[0. / (2. * 1.)],
[1. / (2. * 1.)],
[2. / (2. * 1.)]])
self.assertAllClose(output_top2, [[2. / (2. * 2.)],
[1. / (2. * 2.)],
[1. / (2. * 2.)],
[4. / (2. * 2.)]])
self.assertAllClose(output_top6, [[3. / (2. * 3.)],
[2. / (2. * 3.)],
[2. / (2. * 3.)],
[6. / (2. * 3.)]])
def test_precisionia_weights_should_be_avg_of_weights_of_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 1.], [0., 0.], [1., 1.]]]
weights = [[3., 7., 9.]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 9.) / 2.]])
def test_precisionia_weights_should_ignore_topn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[1., 1.], [1., 0.], [0., 0.]]]
weights = [[3., 4., 5.]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=1)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 4.) / 2.]])
def test_precisionia_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 0.], [0., 0.], [0., 0.]]]
weights = [[3., 7., 2.]]
metric = metrics_impl.PrecisionIAMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
class AlphaDCGMetricTest(tf.test.TestCase):
def test_alphadcg_should_be_single_value(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 0.], [1., 0.], [0., 1.]]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(2.)]])
def test_alphadcg_should_handle_single_subtopic(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0.], [0.], [1.]]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(1. * (1. - 0.5) ** 0.) / log2p1(2.)]])
def test_alphadcg_should_handle_multiple_subtopics(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 1., 0., 0.], [1., 1., 0., 1.], [0., 1., 1., 0.]]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 1.) / log2p1(2.) +
(1. * (1. - 0.5) ** 0.) / log2p1(2.) +
(1. * (1. - 0.5) ** 2.) / log2p1(3.)]])
def test_alphadcg_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 0.], [0., 0.], [0., 0.]]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_alphadcg_should_ignore_masked_items(self):
with tf.Graph().as_default():
scores = [[1., 4., 3., 2., 2.]]
labels = [[[0., 0.], [1., 1.], [1., 0.], [0., 1.], [1., 0.]]]
mask = [[True, False, True, True, False]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None, mask=mask)
self.assertAllClose(output, [[(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(2.)]])
def test_alphadcg_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[1., 3., 2., 4.],
[1., 3., 2.]])
labels = tf.ragged.constant([[[1., 0.], [1., 1.], [0., 1.], [1., 0.]],
[[0., 0.], [1., 0.], [0., 1.]]],
inner_shape=(2,))
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None, ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(2.) +
(1. * (1. - 0.5) ** 1.) / log2p1(2.) +
(1. * (1. - 0.5) ** 1.) / log2p1(3.) +
(1. * (1. - 0.5) ** 2.) / log2p1(4.)],
[(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(2.)]])
def test_alphadcg_should_be_single_value_per_list(self):
with tf.Graph().as_default():
scores = [[1., 3., 2., 4.], [4., 1., 3., 2.]]
labels = [[[0., 0.], [0., 0.], [1., 1.], [1., 0.]],
[[1., 0.], [1., 1.], [1., 0.], [0., 1.]]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(1.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(3.) +
(1. * (1. - 0.5) ** 1.) / log2p1(3.)],
[(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 1.) / log2p1(2.) +
(1. * (1. - 0.5) ** 0.) / log2p1(3.) +
(1. * (1. - 0.5) ** 1.) / log2p1(4.) +
(1. * (1. - 0.5) ** 2.) / log2p1(4.)]])
def test_alphadcg_should_handle_custom_alpha(self):
with tf.Graph().as_default():
scores = [[1., 3., 2., 4.]]
labels = [[[1., 1.], [0., 1.], [0., 1.], [1., 0.]]]
metric_alpha2 = metrics_impl.AlphaDCGMetric(name=None, topn=None,
alpha=0.2)
metric_alpha95 = metrics_impl.AlphaDCGMetric(name=None, topn=None,
alpha=0.95)
output_alpha2, _ = metric_alpha2.compute(labels, scores, None)
output_alpha95, _ = metric_alpha95.compute(labels, scores, None)
self.assertAllClose(output_alpha2,
[[(1. * (1. - 0.2) ** 0.) / log2p1(1.) +
(1. * (1. - 0.2) ** 0.) / log2p1(2.) +
(1. * (1. - 0.2) ** 1.) / log2p1(3.) +
(1. * (1. - 0.2) ** 1.) / log2p1(4.) +
(1. * (1. - 0.2) ** 2.) / log2p1(4.)]])
self.assertAllClose(output_alpha95,
[[(1. * (1. - 0.95) ** 0.) / log2p1(1.) +
(1. * (1. - 0.95) ** 0.) / log2p1(2.) +
(1. * (1. - 0.95) ** 1.) / log2p1(3.) +
(1. * (1. - 0.95) ** 1.) / log2p1(4.) +
(1. * (1. - 0.95) ** 2.) / log2p1(4.)]])
def test_alphadcg_should_handle_custom_rank_discount_fn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2., 4.]]
labels = [[[1., 0.], [1., 1.], [0., 1.], [1., 0.]]]
rank_discount_fn = lambda rank: 1. / (10. + rank)
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None,
rank_discount_fn=rank_discount_fn)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(1. * (1. - 0.5) ** 0.) / (10. + 1.) +
(1. * (1. - 0.5) ** 0.) / (10. + 2.) +
(1. * (1. - 0.5) ** 1.) / (10. + 2.) +
(1. * (1. - 0.5) ** 1.) / (10. + 3.) +
(1. * (1. - 0.5) ** 2.) / (10. + 4.)]])
def test_alphadcg_should_handle_topn(self):
with tf.Graph().as_default():
scores = [[3., 2., 1., 4., 5.]]
labels = [[[1., 0.], [0., 0.], [1., 0.], [1., 1.], [0., 1.]]]
metric_top1 = metrics_impl.AlphaDCGMetric(name=None, topn=1)
metric_top2 = metrics_impl.AlphaDCGMetric(name=None, topn=2)
metric_top6 = metrics_impl.AlphaDCGMetric(name=None, topn=6)
output_top1, _ = metric_top1.compute(labels, scores, None)
output_top2, _ = metric_top2.compute(labels, scores, None)
output_top6, _ = metric_top6.compute(labels, scores, None)
self.assertAllClose(output_top1, [[(1. * (1. - 0.5) ** 0.) / log2p1(1.)]])
self.assertAllClose(output_top2, [[(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(2.) +
(1. * (1. - 0.5) ** 1.) / log2p1(2.)]])
self.assertAllClose(output_top6, [[(1. * (1. - 0.5) ** 0.) / log2p1(1.) +
(1. * (1. - 0.5) ** 0.) / log2p1(2.) +
(1. * (1. - 0.5) ** 1.) / log2p1(2.) +
(1. * (1. - 0.5) ** 1.) / log2p1(3.) +
(1. * (1. - 0.5) ** 2.) / log2p1(5.)]])
def test_alphadcg_weights_should_be_avg_of_weights_of_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 1.], [0., 0.], [1., 1.]]]
weights = [[3., 7., 9.]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 9.) / 2.]])
def test_alphadcg_weights_should_ignore_topn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[1., 1.], [1., 0.], [0., 0.]]]
weights = [[3., 4., 5.]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=1)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 4.) / 2.]])
def test_alphadcg_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[[0., 0.], [0., 0.], [0., 0.]]]
weights = [[3., 7., 2.]]
metric = metrics_impl.AlphaDCGMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
class BPrefMetricTest(tf.test.TestCase):
def test_bpref_should_be_a_single_value(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[0., 1., 0., 1.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output,
[[1. / 2. * ((1. - 1. / 2.) + (1. - 2. / 2.))]])
def test_bpref_should_convert_graded_relevance_to_binary(self):
with tf.Graph().as_default():
scores = [[4., 3., 2., 1.]]
labels = [[0., 1., 0., 2.]] # should be equivalent to [[0., 1., 0., 1.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output,
[[1. / 2. * ((1. - 1. / 2.) + (1. - 2. / 2.))]])
def test_bpref_should_be_zero_when_only_irrelevant_items(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 0., 0.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_trec_bpref_should_be_one_when_only_relevant_items(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[1., 1., 1.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1.]])
def test_non_trec_bpref_should_be_one_when_only_relevant_items(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[1., 1., 1.]]
metric = metrics_impl.BPrefMetric(
name=None, topn=None, use_trec_version=False)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[1.]])
def test_bpref_should_be_zero_without_input_items(self):
with tf.Graph().as_default():
scores = [[]]
labels = [[]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_trec_bpref_divides_with_min_and_is_0_when_one_irrelevant_first(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 1., 1.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_bpref_divides_with_r_when_use_trec_version_is_false(self):
with tf.Graph().as_default():
scores = [[3., 2., 1.]]
labels = [[0., 1., 1.]]
metric = metrics_impl.BPrefMetric(
name=None, topn=None, use_trec_version=False)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.5]])
def test_bpref_should_be_0_when_no_rel_item_in_topn_but_relevant_later(self):
with tf.Graph().as_default():
scores = [[3., 2., 1., 0.]]
labels = [[0., 0., 0., 1.]]
metric = metrics_impl.BPrefMetric(name=None, topn=3)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[0.]])
def test_non_trec_bpref_should_handle_topn(self):
with tf.Graph().as_default():
# This is the example case in bpref_bug in TREC_EVAL-8.0+
scores = [[5., 4., 3., 2., 1., 0., 0., 0., 0., 0.]]
labels = [[0., 1., 1., 1., 1., 0., 0., 0., 1., 1.]]
metric = metrics_impl.BPrefMetric(
name=None, topn=5, use_trec_version=False)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(4. * (1. - 1. / 6.)) / 6.]])
def test_trec_bpref_should_handle_topn(self):
with tf.Graph().as_default():
# This is the example case in bpref_bug in TREC_EVAL-8.0+
scores = [[5., 4., 3., 2., 1., 0., 0., 0., 0., 0.]]
labels = [[0., 1., 1., 1., 1., 0., 0., 0., 1., 1.]]
metric = metrics_impl.BPrefMetric(name=None, topn=5)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output, [[(4. * (1. - 1. / 4.)) / 6.]]) # = 0.5
def test_bpref_should_ignore_padded_items(self):
with tf.Graph().as_default():
scores = [[6., 5., 4., 3., 2., 1.]]
labels = [[-1., 0., -1., 1., 0., 1.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose(output,
[[1. / 2. * ((1. - 1. / 2.) + (1. - 2. / 2.))]])
def test_bpref_should_handle_ragged_inputs(self):
with tf.Graph().as_default():
scores = tf.ragged.constant([[1., 3., 2., 4.], [1., 2., 3.]])
labels = tf.ragged.constant([[0., 0., 1., 0.], [0., 1., 1.]])
metric = metrics_impl.BPrefMetric(name=None, topn=None, ragged=True)
output, _ = metric.compute(labels, scores)
self.assertAllClose(output, [[0.], [1.]])
def test_bpref_weights_should_be_avg_of_weights_of_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 0., 2.]]
weights = [[13., 7., 29.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(13. + 29.) / 2.]])
def test_bpref_weights_should_ignore_topn(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[1., 1., 0.]]
weights = [[3., 7., 15.]]
metric = metrics_impl.BPrefMetric(name=None, topn=1)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[(3. + 7.) / 2.]])
def test_bpref_weights_should_be_0_when_no_rel_items(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.]]
labels = [[0., 0., 0.]]
weights = [[3., 7., 15.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
_, output_weights = metric.compute(labels, scores, weights)
self.assertAllClose(output_weights, [[0.]])
def test_bpref_should_give_a_value_for_each_list_in_batch_inputs(self):
with tf.Graph().as_default():
scores = [[1., 3., 2.], [1., 2., 3.]]
labels = [[0., 0., 1.], [0., 1., 1.]]
metric = metrics_impl.BPrefMetric(name=None, topn=None)
output, _ = metric.compute(labels, scores, None)
self.assertAllClose([[0.], [1.]], output)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| apache-2.0 |
ivandevp/django | django/db/backends/mysql/schema.py | 166 | 4774 | from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY "
"(%(column)s) REFERENCES %(to_table)s (%(to_column)s)"
)
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
def quote_value(self, value):
# Inner import to allow module to fail to load gracefully
import MySQLdb.converters
return MySQLdb.escape(value, MySQLdb.converters.conversions)
def skip_default(self, field):
"""
MySQL doesn't accept default values for TEXT and BLOB types, and
implicitly treats these columns as nullable.
"""
db_type = field.db_type(self.connection)
return (
db_type is not None and
db_type.lower() in {
'tinyblob', 'blob', 'mediumblob', 'longblob',
'tinytext', 'text', 'mediumtext', 'longtext',
}
)
def add_field(self, model, field):
super(DatabaseSchemaEditor, self).add_field(model, field)
# Simulate the effect of a one-off default.
if self.skip_default(field) and field.default not in {None, NOT_PROVIDED}:
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _model_indexes_sql(self, model):
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
if storage == "InnoDB":
for field in model._meta.local_fields:
if field.db_index and not field.unique and field.get_internal_type() == "ForeignKey":
# Temporary setting db_index to False (in memory) to disable
# index creation for FKs (index automatically created by MySQL)
field.db_index = False
return super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
def _delete_composed_index(self, model, fields, *args):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index starts like the simpler one.
http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757
We check here before removing the [unique|index]_together if we have to
recreate a FK index.
"""
first_field = model._meta.get_field(fields[0])
if first_field.get_internal_type() == 'ForeignKey':
constraint_names = self._constraint_names(model, [first_field.column], index=True)
if not constraint_names:
self.execute(self._create_index_sql(model, [first_field], suffix=""))
return super(DatabaseSchemaEditor, self)._delete_composed_index(model, fields, *args)
def _set_field_new_type_null_status(self, field, new_type):
"""
Keep the null property of the old field. If it has changed, it will be
handled separately.
"""
if field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return new_type
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, old_field, new_field, new_type)
def _rename_field_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._rename_field_sql(table, old_field, new_field, new_type)
| bsd-3-clause |
xuerenlv/PaperWork | original_version/get_valid_ip.py | 1 | 15429 | #encoding=utf8
'''
Created on 2014��5��7��
@author: cc
'''
import traceback
from my_test import print_to_file
import urllib2
from StringIO import StringIO
import gzip
try:
import threading
import re
import time
from lxml import etree # @UnresolvedImport
import requests # @UnresolvedImport
import datetime
from BeautifulSoup import BeautifulSoup # @UnresolvedImport
from common_conf_manager import SCAN_FREE_DAILI
from conf import PROXIES
except:
s = traceback.format_exc()
print s
HTTP_HEADERS= {'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0'}
# HTTP_HEADERS = {'User-Agent' : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36"}
def removeDuplicate(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def get_page(search_url):
searchreq = urllib2.Request(url = search_url, headers = HTTP_HEADERS)
searchreq.add_header('Accept-encoding','gzip')
webpage = urllib2.urlopen(searchreq)
if webpage.info().get('Content-Encoding') == 'gzip':
buf = StringIO(webpage.read())
f = gzip.GzipFile(fileobj = buf)
searchpage = f.read()
else:
#print 'webpage.info encoding : ',webpage.info().get('Content-Encoding')
searchpage = webpage.read()
return searchpage
def getLinks_youdaili():
links = []
t_links = []
for i in range(1,2):
proxyUrl = 'http://www.youdaili.cn/Daili/guonei/list_'+str(i)+'.html'
# r = requests.get(proxyUrl)
r = get_page(proxyUrl)
# print_to_file("proxy_page.html", r)
proxyPage = etree.HTML(r.encode('utf-8')) # @UndefinedVariable
hrefs = proxyPage.xpath(u"//ul[@class='newslist_line']/*/a")
t_links.extend([href.attrib['href'] for href in hrefs])
for t_link in t_links:
links.append(t_link)
n_r = get_page(t_link)
proxyPage = etree.HTML(n_r.encode('utf-8')) # @UndefinedVariable
hrefs = proxyPage.xpath(u"/descendant::div[@class='content_full']/*/*/div[@class='newsdetail_cont']/div[@class='cont_font']/div[@class='dede_pages']/ul[@class='pagelist']/li/a")
proxy_head = 'http://www.youdaili.cn/Daili/guonei/'
for href in hrefs:
if u'href' in href.attrib:
if href.attrib['href'] != "#" and href.text != "下一页":
links.append(proxy_head + href.attrib['href'])
return links
'''
返回的是一个[IP:port]列表,其中可能有重复的IP
'''
def getIP_youdaili(links):
IPPool = []
for link in links:
# r = requests.get(link)
r = get_page(link)
IPpage = etree.HTML(r.encode('utf-8')) # @UndefinedVariable
IPs = IPpage.xpath(u"//div[@class='cont_font']/p/text()")
IPPool.extend([IP.split('@')[0].replace('\r\n','') for IP in IPs])
return IPPool
'''
从 http://www.youdaili.cn/获取待检测IPs
'''
def getIPs_youdaili():
links = getLinks_youdaili()
IPs = getIP_youdaili(links)
return IPs
'''
从http://cn-proxy.com/获取待检测IPs
'''
def getIPs_cn_proxy():
IPs = []
proxy_url = "http://cn-proxy.com/"
try:
r = get_page(proxy_url)
proxyPage = etree.HTML(r.encode('utf-8')) # @UndefinedVariable
IP_tables = proxyPage.xpath(u"//table[@class='sortable']")
for table in IP_tables:
t_IPs = table.xpath(u"tbody/tr")
for IP in t_IPs:
row = IP.xpath(u"td")
ip = row[0].text
port = row[1].text
IPs.append(ip + ":" + str(port))
except:
pass
return IPs
'''
从http://www.xici.net.co/获取待检测IPs
'''
def getIPs_xici():
IPs = []
proxy_url = "http://www.xici.net.co/"
try:
r = get_page(proxy_url)
proxyPage = etree.HTML(r.encode('utf-8')) # @UndefinedVariable
IP_table = proxyPage.xpath(u"//table[@id='ip_list']")[0]
rows = IP_table.xpath(u"//tr[@class='odd']")
for row in rows:
# print etree.tostring(row, encoding='utf8')
ip = row.xpath(u"td")[1].text
port = row.xpath(u"td")[2].text
IPs.append(ip + ":" + str(port))
rows = IP_table.xpath(u"//tr[@class='']")
for row in rows:
# print etree.tostring(row, encoding='utf8')
ip = row.xpath(u"td")[1].text
port = row.xpath(u"td")[2].text
IPs.append(ip + ":" + str(port))
# print ip + ":" + str(port)
except:
t = traceback.format_exc()
print t
return IPs
def getIPs_proxy_digger():
socket_list = []
url = 'http://www.site-digger.com/html/articles/20110516/proxieslist.html'
r = requests.get(url)
soup = BeautifulSoup(r.content)
tbody = soup.find('tbody')
trs = tbody.findAll('tr')
for tr in trs:
tds = tr.findAll('td')
socket_address = tds[0].text
proxy_type = tds[1].text
location = tds[2].text
if proxy_type == 'Anonymous' :#and location == 'China':
ip,port = socket_address.split(':')
socket_list.append(ip+':'+port)
return socket_list
def getIPs_org_pachong():
socket_list = []
url = 'http://pachong.org/anonymous.html'
r = requests.get(url)
soup = BeautifulSoup(r.content)
tbody = soup.find('tbody')
trs = tbody.findAll('tr')
for tr in trs:
tds = tr.findAll('td')
ip = tds[1].text
port = tds[2].text
socket_address = ip + ':' +port
proxy_type = tds[4].a.text
if proxy_type == 'high':
socket_list.append(socket_address)
return socket_list
def getIPs():
IPPool = []
try:
IPPool.extend(getIPs_youdaili())
except:
pass
try:
IPPool.extend(getIPs_cn_proxy())
except:
pass
try:
IPPool.extend(getIPs_xici())
except:
pass
try:
IPPool.extend(getIPs_proxy_digger())
except:
pass
try:
IPPool.extend(getIPs_org_pachong())
except:
pass
# try:
# t_l = getIPs_proxy_digger()
# IPPool.extend(t_l)
# print "getIPs_proxy_digger size:" + str(len(t_l))
# except:
# pass
# t_l = getIPs_proxy_digger()
# IPPool.extend(t_l)
# print "getIPs_proxy_digger size:" + str(len(t_l))
IPPool = removeDuplicate(IPPool)
print "total IPs: " + str(len(IPPool))
return IPPool
##########################################################################################################################################
TEST_URL = 'http://iframe.ip138.com/ic.asp'
try:
import logging
from common_conf_manager import PROXY_IP_UPDATE_INTERVAL,\
GOOD_PROXY_IP_THRESHOLD
# DATABASE_SUFFIX
from storage_manager import ProxyIp
except:
s = traceback.format_exc()
print s
# connect_db(DATABASE_SUFFIX)
proxy_logger = logging.getLogger("proxyLog")
proxy_ip_db_mutex = threading.Lock()
class ProxyVerifier(threading.Thread):
def __init__(self, thread_name, ip_list, proxy_manager):
threading.Thread.__init__(self)
self.name = thread_name
self.ip_list = ip_list
self.proxy_manager = proxy_manager
def run(self):
for ip in self.ip_list:
validity = False
try:
validity = self._verify_ip(ip)
except:
pass
if validity:
self.proxy_manager.add_ip(ip)
global proxy_ip_db_mutex
if proxy_ip_db_mutex.acquire():
new_proxy_ip = ProxyIp()
new_proxy_ip.ip = ip
try:
new_proxy_ip.save()
except:
pass
finally:
proxy_ip_db_mutex.release()
def _verify_ip(self, socket_address):
proxies = {"http":"http://" + socket_address}
try:
# start = time.time()
# r = requests.get(TEST_URL, proxies=proxies, timeout=GOOD_PROXY_IP_THRESHOLD)
# returnIP = re.findall(r'[0-9]+(?:\.[0-9]+){3}',r.text)[0]
# inputIP = ip.split(':')[0]
#
# end = time.time()
#
# if inputIP == returnIP and (end - start) < GOOD_PROXY_IP_THRESHOLD:
# proxy_logger.info('inputIP: ' + inputIP)
# proxy_logger.info('returnIP:' + returnIP)
# proxy_logger.info("validity time cost:" + str(end - start))
ip = socket_address.split(':')[0]
verify_url1 = 'http://luckist.sinaapp.com/test_ip'
verify_url2 = 'http://members.3322.org/dyndns/getip'
r1 = requests.get(verify_url1,proxies=proxies,timeout=3)
r2 = requests.get(verify_url2,proxies=proxies,timeout=3)
return_ip1 = r1.content.strip()
return_ip2 = r2.content.strip()
if ip == return_ip1 and ip == return_ip2:
return True
else:
return False
except:
return False
threadLock = threading.Lock()
try:
from common_conf_manager import VERIFY_PROXY_IP_NUMBER
except:
s = traceback.format_exc()
print s
class ProxyIPManager(threading.Thread):
"""
Maintain a list of effective IPs.
These IPs are gotten from http://www.youdaili.cn.
To verify a IP is validity, a request should be sent to http://iframe.ip138.com/ic.asp.
A response will be gotten, the response contains a IP address,
if this address is the same to which the request come from,
it is validity, otherwise, it is not.
"""
def __init__(self, thread_name="proxyIPmanager"):
threading.Thread.__init__(self)
self.name = thread_name
self.ip_list = []
self.mid_ip_list = []
self.cur_index = 0
self.stop_me = False
def run(self):
while not self.stop_me:
verify_threads = []
ProxyIp.drop_collection() # @UndefinedVariable
proxy_logger.info("new proxy ips updating:")
start = time.time()
try:
if SCAN_FREE_DAILI:
IPPool = getIPs()
for thread_id in range(0, VERIFY_PROXY_IP_NUMBER):
ips = []
index = thread_id
while index < len(IPPool):
ips.append(IPPool[index])
index += VERIFY_PROXY_IP_NUMBER
verify_threads.append(ProxyVerifier("verify " + str(thread_id), ips, self))
print "verify thread %d, ip_list'size is %d" % (thread_id, len(ips))
self.mid_ip_list = []
self._add_clear_proxys()
if SCAN_FREE_DAILI:
for verify_thread in verify_threads:
verify_thread.start()
for verify_thread in verify_threads:
verify_thread.join()
proxy_logger.info("all verify threads end!")
self.ip_list = self.mid_ip_list
end = time.time()
proxy_logger.info("end proxy ips updating: {total time: " + str(end - start) + ", nowtime: " + str(datetime.datetime.now()) + ", current ip_list siz: " + str(len(self.ip_list)) + "}")
proxy_logger.info("current available ips: ")
for ip in self.ip_list:
proxy_logger.info(ip)
except :
s = traceback.format_exc()
print s
pass
self._sleep_for_next_update()
def get_ip(self):
if not self.has_enough_ips():
return ""
list_size = len(self.ip_list)
ip = ""
try:
ip = self.ip_list[self.cur_index % list_size]
self.cur_index = (self.cur_index + 1) % list_size
except:
self.cur_index = 0
pass
return ip
def _add_clear_proxys(self):
clear_proxys = []
if SCAN_FREE_DAILI is False:
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62011")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62012")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62013")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62014")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62015")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62016")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62017")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62018")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62019")
# clear_proxys.append("benxiaohai:1328257811@106.186.18.67:62020")
clear_proxys.extend(PROXIES)
global proxy_ip_db_mutex
if proxy_ip_db_mutex.acquire():
for ip in clear_proxys:
new_proxy_ip = ProxyIp()
new_proxy_ip.ip = ip
try:
new_proxy_ip.save()
self.add_ip(ip)
except:
pass
proxy_ip_db_mutex.release()
def _sleep_for_next_update(self):
time.sleep(PROXY_IP_UPDATE_INTERVAL)
def has_enough_ips(self):
if len(self.ip_list) >= 10:
return True
else:
return False
def get_ip_number(self):
return len(self.ip_list)
def add_ip(self, new_ip):
global threadLock
threadLock.acquire()
try:
self.mid_ip_list.append(new_ip)
except:
pass
finally:
threadLock.release()
def stop_me(self):
self.stop_me = True
proxy_ip_manager = ProxyIPManager()
if __name__ == '__main__':
global proxy_ip_manager
proxy_ip_manager.start()
start = time.time()
while True:
if proxy_ip_manager.has_enough_ips():
print "has enough ips"
end = time.time()
print str(end - start) + "s"
else:
print "does not have enough ips"
time.sleep(10)
# getIPs_cn_proxy()
# getIPs_xici()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.