repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
lisael/pg-django | tests/regressiontests/generic_views/dates.py | 25 | 21109 | from __future__ import absolute_import
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from .models import Book
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def _make_books(self, n, base_date):
for i in range(n):
b = Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100+i,
pubdate=base_date - datetime.timedelta(days=1))
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertFalse('latest' in res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_paginated_archive_view(self):
self._make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2008, 10, 1)])
self.assertEqual(res.context['year'], '2008')
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0].date(), b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since it's allow_future but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month,day in ((9,1), (10,2), (11,3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,9,1))
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
def test_week_view_allow_empty(self):
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, "Archive for Oct. 1, 2008. Previous day is May 1, 2006")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_queryset(self):
"""
Ensure that custom querysets are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_queryset/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_queryset/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
| bsd-3-clause |
salvacarrion/orange3-educational | orangecontrib/educational/optimizers/sgd_optimizers.py | 1 | 17972 | import numpy as np
import copy
__all__ = ['SGD', 'Momentum', 'NesterovMomentum', 'AdaGrad', 'RMSProp',
'AdaDelta', 'Adam', 'Adamax', 'create_opt']
def create_opt(opt2copy, learning_rate=None):
opt = copy.copy(opt2copy) # Shallow copy
if learning_rate:
opt.learning_rate = learning_rate
return opt
class SGD:
"""Stochastic Gradient Descent (SGD) updates
Generates update expressions of the form:
* ``param := param - learning_rate * gradient``
Args:
learning_rate: float, optional
The learning rate controlling the size of update steps
"""
def __init__(self, learning_rate=1.0):
self.learning_rate = learning_rate
self.name = 'Stochastic Gradient Descent'
def update(self, grads, params, indices=None):
"""SGD updates
Args:
grads: array
List of gradient expressions
params: array
The variables to generate update expressions for
indices: array, optional
Indices in params to update
"""
if indices is None:
indices = np.arange(len(params))
params[indices] -= self.learning_rate * grads
def __str__(self):
return self.name
class Momentum:
"""Stochastic Gradient Descent (SGD) updates with momentum
Generates update expressions of the form:
* ``velocity := momentum * velocity - learning_rate * gradient``
* ``param := param + velocity``
Args:
learning_rate: float
The learning rate controlling the size of update steps
momentum: float, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Notes:
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1 - momentum`.
See Also:
apply_momentum: Generic function applying momentum to updates
nesterov_momentum: Nesterov's variant of SGD with momentum
"""
def __init__(self, learning_rate=1.0, momentum=0.9):
self.learning_rate = learning_rate
self.momentum = momentum
self.velocity = None
self.name = 'Momentum'
def update(self, grads, params, indices=None):
"""Momentum updates
Args:
grads: array
List of gradient expressions
params: array
The variables to generate update expressions for
indices: array
Indices in params to update
"""
if indices is None:
indices = np.arange(len(params))
if self.velocity is None:
self.velocity = np.zeros(params.shape)
self.velocity[indices] = \
self.momentum * self.velocity[indices] - self.learning_rate * grads
params[indices] += self.velocity[indices]
def __str__(self):
return self.name
class NesterovMomentum:
"""Stochastic Gradient Descent (SGD) updates with Nesterov momentum
Generates update expressions of the form:
* ``param_ahead := param + momentum * velocity``
* ``velocity := momentum * velocity - learning_rate * gradient_ahead``
* ``param := param + velocity``
In order to express the update to look as similar to vanilla SGD, this can
be written as:
* ``v_prev := velocity``
* ``velocity := momentum * velocity - learning_rate * gradient``
* ``param := -momentum * v_prev + (1 + momentum) * velocity``
Args:
learning_rate : float
The learning rate controlling the size of update steps
momentum: float, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Notes:
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1 - momentum`.
The classic formulation of Nesterov momentum (or Nesterov accelerated
gradient) requires the gradient to be evaluated at the predicted next
position in parameter space. Here, we use the formulation described at
https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,
which allows the gradient to be evaluated at the current parameters.
See Also:
apply_nesterov_momentum: Function applying momentum to updates
"""
def __init__(self, learning_rate=1.0, momentum=0.9):
self.learning_rate = learning_rate
self.momentum = momentum
self.velocity = None
self.name = "Nesterov's Accelerated Momentum"
def update(self, grads, params, indices=None):
"""NAG updates
Args:
grads: array
List of gradient expressions
params: array
The variables to generate update expressions for
indices: array, optional
Indices in params to update
Returns
updates: list of float
Variables updated with the gradients
"""
if indices is None:
indices = np.arange(len(params))
if self.velocity is None:
self.velocity = np.zeros(params.shape)
v_prev = self.velocity[indices]
self.velocity[indices] = \
self.momentum * self.velocity[indices] - self.learning_rate * grads
params[indices] += -self.momentum * v_prev + \
(1 + self.momentum) * self.velocity[indices]
def __str__(self):
return self.name
class AdaGrad:
"""AdaGrad updates
Scale learning rates by dividing with the square root of accumulated
squared gradients. See [1]_ for further description.
* ``param := param - learning_rate * gradient``
Args:
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
epsilon: float or symbolic scalar
Small value added for numerical stability
Notes:
Using step size eta Adagrad calculates the learning rate for feature i
at time step t as:
.. math:: \\eta_{t,i} = \\frac{\\eta}
{\\sqrt{\\sum^t_{t^\\prime} g^2_{t^\\prime,i}+\\epsilon}} g_{t,i}
as such the learning rate is monotonically decreasing.
Epsilon is not included in the typical formula, see [2]_.
References:
.. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):
Adaptive subgradient methods for online learning and stochastic
optimization. JMLR, 12:2121-2159.
.. [2] Chris Dyer:
Notes on AdaGrad. http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
"""
def __init__(self, learning_rate=1.0, epsilon=1e-6):
self.learning_rate = learning_rate
self.epsilon = epsilon
self.accu = None
self.name = 'AdaGrad'
def update(self, grads, params, indices=None):
"""AdaGrad updates
Args:
grads: array
List of gradient expressions
params: array
The variables to generate update expressions for
indices: array, optional
Indices in params to update
"""
if indices is None:
indices = np.arange(len(params))
if self.accu is None:
self.accu = np.zeros(params.shape)
self.accu[indices] += grads ** 2
den = np.sqrt(self.accu[indices] + self.epsilon)
params[indices] -= self.learning_rate * grads/den
def __str__(self):
return self.name
class RMSProp:
"""RMSProp
Scale learning rates by dividing with the moving average of the root mean
squared (RMS) gradients. See [3]_ for further description.
Args:
learning_rate: float
The learning rate controlling the size of update steps
rho: float
Gradient moving average decay factor
epsilon: float
Small value added for numerical stability
Notes:
`rho` should be between 0 and 1. A value of `rho` close to 1 will decay
the moving average slowly and a value close to 0 will decay the moving
average fast.
Using the step size :math:`\\eta` and a decay factor :math:`\\rho` the
learning rate :math:`\\eta_t` is calculated as:
.. math::
r_t &= \\rho r_{t-1} + (1-\\rho)*g^2\\\\
\\eta_t &= \\frac{\\eta}{\\sqrt{r_t + \\epsilon}}
References:
.. [3] Tieleman, T. and Hinton, G. (2012):
Neural Networks for Machine Learning, Lecture 6.5 - rmsprop.
Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU
(formula @5:20)
"""
def __init__(self, learning_rate=1.0, rho=0.9, epsilon=1e-6):
self.learning_rate = learning_rate
self.rho = rho
self.epsilon = epsilon
self.accu = None
self.name = 'RMSProp'
def update(self, grads, params, indices=None):
"""RMSProp updates
Args:
grads: array
List of gradient expressions
params: array
The variables to generate update expressions for
indices: array, optional
Indices in params to update
"""
if indices is None:
indices = np.arange(len(params))
if self.accu is None:
self.accu = np.zeros(params.shape)
self.accu[indices] = \
self.rho * self.accu[indices] + (1 - self.rho) * grads ** 2
params[indices] -= self.learning_rate * grads /\
np.sqrt(self.accu[indices] + self.epsilon)
def __str__(self):
return self.name
class AdaDelta:
"""AdaDelta
Scale learning rates by a the ratio of accumulated gradients to accumulated
step sizes, see [4]_ and notes for further description.
Args:
learning_rate: float
The learning rate controlling the size of update steps
rho: float
Squared gradient moving average decay factor
epsilon: float
Small value added for numerical stability
Notes:
rho should be between 0 and 1. A value of rho close to 1 will decay the
moving average slowly and a value close to 0 will decay the moving
average fast.
rho = 0.95 and epsilon=1e-6 are suggested in the paper and reported to
work for multiple datasets (MNIST, speech).
In the paper, no learning rate is considered (so learning_rate=1.0).
Probably best to keep it at this value.
epsilon is important for the very first update (so the numerator does
not become 0).
Using the step size eta and a decay factor rho the learning rate is
calculated as:
.. math::
r_t &= \\rho r_{t-1} + (1-\\rho)*g^2\\\\
\\eta_t &= \\eta \\frac{\\sqrt{s_{t-1} + \\epsilon}}
{\sqrt{r_t + \epsilon}}\\\\
s_t &= \\rho s_{t-1} + (1-\\rho)*g^2
References:
.. [4] Zeiler, M. D. (2012):
ADADELTA: An Adaptive Learning Rate Method.
arXiv Preprint arXiv:1212.5701.
"""
def __init__(self, learning_rate=1.0, rho=0.95, epsilon=1e-6):
self.learning_rate = learning_rate
self.rho = rho
self.epsilon = epsilon
self.accu = None
self.delta_accu = None
self.name = 'AdaDelta'
def update(self, grads, params, indices=None):
"""AdaDelta updates
Args:
grads: array
List of gradient expressions
params: array
The variables to generate update expressions for
indices: array, optional
Indices in params to update
"""
if indices is None:
indices = np.arange(len(params))
if self.accu is None or self.delta_accu is None:
self.accu = np.zeros(params.shape)
self.delta_accu = np.zeros(params.shape)
self.accu[indices] = self.rho * self.accu[indices] + \
(1 - self.rho) * grads ** 2
# compute parameter update, using the 'old' delta_accu
update = grads * np.sqrt(self.delta_accu[indices] + self.epsilon) / \
np.sqrt(self.accu[indices] + self.epsilon)
params[indices] -= self.learning_rate * update
# update delta_accu (as accu, but accumulating updates)
delta_accu_new = \
self.rho * self.delta_accu[indices] + (1 - self.rho) * update ** 2
self.delta_accu[indices] = delta_accu_new
return params
def __str__(self):
return self.name
class Adam:
"""Adam
Adam updates implemented as in [5]_.
Args:
learning_rate : float
The learning rate controlling the size of update steps
beta_1 : float
Exponential decay rate for the first moment estimates.
beta_2 : float
Exponential decay rate for the second moment estimates.
epsilon : float
Constant for numerical stability.
Notes:
The paper [5]_ includes an additional hyperparameter lambda. This is
only needed to prove convergence of the algorithm and has no practical
use, it is therefore omitted here.
References:
.. [5] Kingma, Diederik, and Jimmy Ba (2014):
Adam: A Method for Stochastic Optimization.
arXiv preprint arXiv:1412.6980.
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999,
epsilon=1e-8):
self.learning_rate = learning_rate
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.t_prev = 0
self.m_prev = None
self.v_prev = None
self.name = 'Adam'
def update(self, grads, params, indices=None):
"""Adam updates
Args:
grads: array
List of gradient expressions
params: array
The variables to generate update expressions for
indices: array, optional
Indices of parameters ('params') to update. If None (default),
all parameters will be updated.
Returns
updates: list of float
Variables updated with the gradients
"""
if indices is None:
indices = np.arange(len(params))
if self.m_prev is None or self.v_prev is None:
self.m_prev = np.zeros(params.shape)
self.v_prev = np.zeros(params.shape)
t = self.t_prev + 1
# To understand the coefficients plot this:
# sqrt(1-0.999^x)*(1-0.9^x)
# or this:
# (1-0.999^x)*(1-0.9^x)
# Computing bias-corrected first and second moment estimates to
# counteract the effect of vt and mt been biased towards zero
a_t = self.learning_rate * np.sqrt(1 - self.beta2 ** t) / \
(1 - self.beta1 ** t)
self.m_prev[indices] = self.beta1 * self.m_prev[indices] + \
(1 - self.beta1) * grads
self.v_prev[indices] = self.beta2 * self.v_prev[indices] + \
(1 - self.beta2) * grads ** 2
params[indices] -= a_t * self.m_prev[indices] / \
(np.sqrt(self.v_prev[indices]) + self.epsilon)
self.t_prev = t
def __str__(self):
return self.name
class Adamax:
"""Adamax
Adamax updates implemented as in [6]_. This is a variant of of the Adam
algorithm based on the infinity norm.
Args:
learning_rate : float
The learning rate controlling the size of update steps
beta_1 : float
Exponential decay rate for the first moment estimates.
beta_2 : float
Exponential decay rate for the second moment estimates.
epsilon : float
Constant for numerical stability.
References:
.. [6] Kingma, Diederik, and Jimmy Ba (2014):
Adam: A Method for Stochastic Optimization.
arXiv preprint arXiv:1412.6980.
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999,
epsilon=1e-8):
self.learning_rate = learning_rate
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.t_prev = 0
self.m_prev = None
self.u_prev = None
self.name = 'Adamax'
def update(self, grads, params, indices=None):
"""Adamax updates
Args:
grads: array
List of gradient expressions
params: array
The variables to generate update expressions for
indices: array, optional
Indices of parameters ('params') to update. If None (default),
all parameters will be updated.
Returns
updates: list of float
Variables updated with the gradients
"""
if indices is None:
indices = np.arange(len(params))
if self.m_prev is None or self.u_prev is None:
self.m_prev = np.zeros(params.shape)
self.u_prev = np.zeros(params.shape)
t = self.t_prev + 1
a_t = self.learning_rate/(1 - self.beta1**t)
self.m_prev[indices] = self.beta1 * self.m_prev[indices] + \
(1 - self.beta1) * grads
self.u_prev[indices] = np.maximum(self.beta2 * self.u_prev[indices],
np.abs(grads))
params[indices] -= a_t * self.m_prev[indices] / \
(self.u_prev[indices] + self.epsilon)
self.t_prev = t
def __str__(self):
return self.name | gpl-3.0 |
trinerdi/icpc-notebook | lib/googletest/test/gtest_env_var_test.py | 343 | 4036 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT"""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
vollib/py_vollib | py_vollib/ref_python/black/implied_volatility.py | 1 | 2865 | # -*- coding: utf-8 -*-
"""
py_vollib.ref_python.black.implied_volatility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A library for option pricing, implied volatility, and
greek calculation. py_vollib is based on lets_be_rational,
a Python wrapper for LetsBeRational by Peter Jaeckel as
described below.
:copyright: © 2017 Gammon Capital LLC
:license: MIT, see LICENSE for more details.
py_vollib.ref_python is a pure python version of py_vollib without any dependence on LetsBeRational. It is provided purely as a reference implementation for sanity checking. It is not recommended for industrial use.
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""
# -----------------------------------------------------------------------------
# IMPORTS
# Standard library imports
# Related third party imports
from scipy.optimize import brentq
# Local application/library specific imports
from py_vollib.ref_python.black import black
# -----------------------------------------------------------------------------
# FUNCTIONS - IMPLIED VOLATILITY
def implied_volatility(price, F, K, r, t, flag):
"""Returns the Black delta of an option.
:param price:
:type price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param r: annual risk-free interest rate
:type r: float
:param t: time to expiration in years
:type t: float
:param flag: 'c' or 'p' for call or put.
:type flag: str
:returns: float
>>> F = 101.0
>>> K = 102.0
>>> t = .5
>>> r = .01
>>> flag = 'p'
>>> sigma_in = 0.2
>>> price = black(flag, F, K, t, r, sigma_in)
>>> expected_price = 6.20451158097
>>> abs(expected_price - price) < 0.00001
True
>>> sigma_out = implied_volatility(price, F, K, r, t, flag)
>>> sigma_in == sigma_out or abs(sigma_in - sigma_out) < 0.00001
True
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> r = .02
>>> discounted_call_price = black(flag, F, K, t, r, sigma)
>>> iv = implied_volatility(discounted_call_price, F, K, r, t, flag)
>>> expected_discounted_call_price = 5.5811067246
>>> expected_iv = 0.2
>>> abs(expected_discounted_call_price - discounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
f = lambda sigma: price - black(flag, F, K, t, r, sigma)
return brentq(
f,
a=1e-12,
b=100,
xtol=1e-15,
rtol=1e-15,
maxiter=1000,
full_output=False
)
if __name__ == "__main__":
from py_vollib.helpers.doctest_helper import run_doctest
run_doctest()
| mit |
coruus/pylibtiff | libtiff/tests/test_tiff_file.py | 11 | 1135 |
import os
import atexit
from tempfile import mktemp
from numpy import *
from libtiff import TIFF
from libtiff import TIFFfile, TIFFimage
def test_write_read():
for compression in [None, 'lzw']:
for itype in [uint8, uint16, uint32, uint64,
int8, int16, int32, int64,
float32, float64,
complex64, complex128]:
image = array([[1,2,3], [4,5,6]], itype)
fn = mktemp('.tif')
if 0:
tif = TIFF.open(fn,'w')
tif.write_image(image, compression=compression)
tif.close()
else:
tif = TIFFimage(image)
tif.write_file(fn, compression=compression)
del tif
tif = TIFFfile(fn)
data, names = tif.get_samples()
assert names==['sample0'],`names`
assert len(data)==1, `len(data)`
assert image.dtype==data[0].dtype, `image.dtype, data[0].dtype`
assert (image==data[0]).all()
#os.remove(fn)
atexit.register(os.remove, fn)
| bsd-3-clause |
Bollegala/MLIB | cluster/seqclust.py | 1 | 5463 | #! /usr/bin/python
#! coding: utf-8
"""
Performs sequential coclustering.
Given a matrix,theta (column threshold), and phi (row threshold),
produces a set of clusterings (row clusters and column clusters)
"""
import sys, math, re, getopt
sys.path.append("../..")
from MLIB.cluster.matrix import MATRIX
from MLIB.utils.ProgBar import TerminalController,ProgressBar
class SEQCLUST:
def __init__(self):
pass
def patsort(self, A, B):
if A[1] > B[1]:
return(-1)
return(1)
def sim(self, c, v):
sim = 0
for wpair in v:
if wpair in c.wpairs:
sim += float(v[wpair]*c.wpairs[wpair])
return(sim)
def cluster(self, m, theta):
#first sort patterns according to the total frequency
#of all word-pairs in which they appear.
pats = [] # (pat_id, total_frequency_in_wpairs)
for pat in m.get_row_id_list():
row = m.get_row(pat)
total = 0
for k in row:
total += row[k]
pats.append((pat, total))
N = len(pats)
pats.sort(self.patsort)
#initialize clusters.
clusts = []
count = 0
m.L2_normalize_rows()
term = TerminalController()
progress = ProgressBar(term, "Clustering total rows = %d" %N)
for (pat, total) in pats:
maxsim = 0
maxclust = None
count += 1
for c in clusts:
v = m.get_row(pat)
s = self.sim(c, v)
if s > maxsim:
maxsim = s
maxclust = c
if maxsim > theta:
progress.update(float(count)/N,
"MERGED %d: row = %d freq = %d clusts = %d" \
% (count, pat, total, len(clusts)))
maxclust.merge(pat, m.get_row(pat))
else:
progress.update(float(count)/N,
" NEW %d: %s freq = %d clusts = %d" \
% (count, pat, total, len(clusts)))
clusts.append(SEQ_CLUST_DATA(pat, m.get_row(pat)))
return(clusts)
def write_clusters(self, clusts, theta, fname):
"""
format.
total_no_clusts sparsity singletons theta comma_sep_lists
sparsity = singletons/total_no_clusts
"""
F = open(fname, "w")
singletons = 0
for c in clusts:
if len(c.pats) == 1:
singletons += 1
sparsity = float(singletons)/float(len(clusts))
print "Total Clusters =", len(clusts)
print "singletons =", singletons
print "sparsity =", sparsity
print "theta =", theta
F.write("TOTAL_CLUSTERS=%d SINGLETONS=%d SPARSITY=%f THETA=%f "\
% (len(clusts), singletons, sparsity, theta))
for c in clusts:
F.write("%s " % ",".join([str(x) for x in c.pats]))
F.close()
pass
class SEQ_CLUST_DATA:
def __init__(self, pat, v):
self.pats = [pat]
self.wpairs = {}
for k in v:
if v[k] != 0:
self.wpairs[k] = v[k]
pass
def normalize(self):
sqd = 0
for k in self.wpairs:
sqd += self.wpairs[k]**2
sqd = math.sqrt(float(sqd))
for k in self.wpairs:
self.wpairs[k] = float(self.wpairs[k])/float(sqd)
pass
def merge(self, pat, v):
self.pats.append(pat)
for k in v:
if v[k] != 0:
self.wpairs[k] = self.wpairs.get(k, 0)+v[k]
self.normalize()
pass
def usage():
sys.stderr.write("""python seqclust.py -i <input_matrix_file>
-o <output_clusters_file>
-t <threshold>\n""")
pass
def process_command_line():
"""
Get the command line arguments and validate.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:t:u",\
["help", "input=","output=",
"theta=", "transpose"])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
# parameter values.
matrix_fname = None
clust_fname = None
theta = 0
transpose = True
for opt, val in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(1)
if opt in ("-i", "--input"):
matrix_fname = val
if opt in ("-o", "--output"):
clust_fname = val
if opt in ("-t", "--theta"):
theta = float(val)
if opt in ("-u", "--transpose"):
transpose = False
if matrix_fname and clust_fname and (theta >= 0) and (theta <=1):
perform_sequential_clustering(matrix_fname, clust_fname,
theta, transpose)
pass
def perform_sequential_clustering(matrix_fname, clust_fname, theta, transpose):
"""
Perform sequenctial clustering.
"""
M = MATRIX(True)
M.read_matrix(matrix_fname)
if transpose:
MT = M.transpose()
else:
MT = M
clustAlgo = SEQCLUST()
clusts = clustAlgo.cluster(MT, theta)
clustAlgo.write_clusters(clusts, theta, clust_fname)
sys.stderr.write("Clustering Finished....Terminating\n")
pass
if __name__ == "__main__":
process_command_line()
| bsd-3-clause |
CoDEmanX/ArangoDB | 3rdParty/V8-4.3.61/build/gyp/test/mac/gyptest-app.py | 75 | 4193 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
import TestGyp
import os
import plistlib
import subprocess
import sys
def GetStdout(cmdlist):
return subprocess.Popen(cmdlist,
stdout=subprocess.PIPE).communicate()[0].rstrip('\n')
def ExpectEq(expected, actual):
if expected != actual:
print >>sys.stderr, 'Expected "%s", got "%s"' % (expected, actual)
test.fail_test()
def ls(path):
'''Returns a list of all files in a directory, relative to the directory.'''
result = []
for dirpath, _, files in os.walk(path):
for f in files:
result.append(os.path.join(dirpath, f)[len(path) + 1:])
return result
def XcodeVersion():
stdout = subprocess.check_output(['xcodebuild', '-version'])
version = stdout.splitlines()[0].split()[-1].replace('.', '')
return (version + '0' * (3 - len(version))).zfill(4)
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='app-bundle')
test.build('test.gyp', test.ALL, chdir='app-bundle')
# Binary
test.built_file_must_exist('Test App Gyp.app/Contents/MacOS/Test App Gyp',
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path('Test App Gyp.app/Contents/Info.plist',
chdir='app-bundle')
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test-App-Gyp') # Variable expansion
test.must_not_contain(info_plist, '${MACOSX_DEPLOYMENT_TARGET}');
if test.format != 'make':
# TODO: Synthesized plist entries aren't hooked up in the make generator.
plist = plistlib.readPlist(info_plist)
ExpectEq(GetStdout(['sw_vers', '-buildVersion']),
plist['BuildMachineOSBuild'])
# Prior to Xcode 5.0.0, SDKROOT (and thus DTSDKName) was only defined if
# set in the Xcode project file. Starting with that version, it is always
# defined.
expected = ''
if XcodeVersion() >= '0500':
version = GetStdout(['xcodebuild', '-version', '-sdk', '', 'SDKVersion'])
expected = 'macosx' + version
ExpectEq(expected, plist['DTSDKName'])
sdkbuild = GetStdout(
['xcodebuild', '-version', '-sdk', '', 'ProductBuildVersion'])
if not sdkbuild:
# Above command doesn't work in Xcode 4.2.
sdkbuild = plist['BuildMachineOSBuild']
ExpectEq(sdkbuild, plist['DTSDKBuild'])
xcode, build = GetStdout(['xcodebuild', '-version']).splitlines()
xcode = xcode.split()[-1].replace('.', '')
xcode = (xcode + '0' * (3 - len(xcode))).zfill(4)
build = build.split()[-1]
ExpectEq(xcode, plist['DTXcode'])
ExpectEq(build, plist['DTXcodeBuild'])
# Resources
strings_files = ['InfoPlist.strings', 'utf-16be.strings', 'utf-16le.strings']
for f in strings_files:
strings = test.built_file_path(
os.path.join('Test App Gyp.app/Contents/Resources/English.lproj', f),
chdir='app-bundle')
test.must_exist(strings)
# Xcodes writes UTF-16LE with BOM.
contents = open(strings, 'rb').read()
if not contents.startswith('\xff\xfe' + '/* Localized'.encode('utf-16le')):
test.fail_test()
test.built_file_must_exist(
'Test App Gyp.app/Contents/Resources/English.lproj/MainMenu.nib',
chdir='app-bundle')
# Packaging
test.built_file_must_exist('Test App Gyp.app/Contents/PkgInfo',
chdir='app-bundle')
test.built_file_must_match('Test App Gyp.app/Contents/PkgInfo', 'APPLause',
chdir='app-bundle')
# Check that no other files get added to the bundle.
if set(ls(test.built_file_path('Test App Gyp.app', chdir='app-bundle'))) != \
set(['Contents/MacOS/Test App Gyp',
'Contents/Info.plist',
'Contents/Resources/English.lproj/MainMenu.nib',
'Contents/PkgInfo',
] +
[os.path.join('Contents/Resources/English.lproj', f)
for f in strings_files]):
test.fail_test()
test.pass_test()
| apache-2.0 |
ridfrustum/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/inline_formsets/tests.py | 51 | 6157 | from django.forms.models import inlineformset_factory
from django.test import TestCase
from regressiontests.inline_formsets.models import Poet, Poem, School, Parent, Child
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'1',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': u'test',
'poem_set-0-DELETE': u'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'0',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': u'',
'poem_set-0-poem': u'1',
'poem_set-0-name': u'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'1',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': unicode(poem.id),
'poem_set-0-poem': unicode(poem.id),
'poem_set-0-name': u'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name=u'test')
mother = Parent.objects.create(name=u'mother')
father = Parent.objects.create(name=u'father')
data = {
'child_set-TOTAL_FORMS': u'1',
'child_set-INITIAL_FORMS': u'0',
'child_set-MAX_NUM_FORMS': u'0',
'child_set-0-name': u'child',
}
formset = ChildFormSet(data, instance=school)
self.assertEqual(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
class InlineFormsetFactoryTest(TestCase):
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name='mother')
inlineformset_factory(Parent, Child, fk_name='father')
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
self.assertRaisesRegexp(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has more than 1 ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child
)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
self.assertRaises(Exception,
"fk_name 'school' is not a ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child, fk_name='school'
)
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
self.assertRaisesRegexp(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has no field named 'test'",
inlineformset_factory, Parent, Child, fk_name='test'
)
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(
Parent, Child, exclude=['school'], fk_name='mother'
)
inlineformset_factory(
Parent, Child, exclude=('school',), fk_name='mother'
)
| gpl-3.0 |
atsnyder/ITK | Wrapping/Generators/Python/itkTemplate.py | 7 | 16335 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
import types
import inspect
import os
import warnings
import itkConfig
from itkTypes import itkCType
def itkFormatWarning(msg, *a):
""""Format the warnings issued by itk to display only the message.
This will ignore the filename and the linenumber where the warning was
triggered. The message is returned to the warnings module.
"""
return str(msg) + '\n'
# Redefine the format of the warnings
warnings.formatwarning = itkFormatWarning
def registerNoTpl(name, cl):
"""Register a class without template
It can seem not useful to register classes without template (and it wasn't
useful until the SmartPointer template was generated), but those classes
can be used as template argument of classes with template.
"""
itkTemplate.__templates__[normalizeName(name)] = cl
def normalizeName(name):
"""Normalize the class name to remove ambiguity
This function removes the white spaces in the name, and also
remove the pointer declaration "*" (it have no sense in python) """
name = name.replace(" ", "")
name = name.replace("*", "")
return name
class itkTemplate(object):
"""This class manages access to available template arguments of a C++ class.
There are two ways to access types:
1. With a dict interface. The user can manipulate template parameters
similarly to C++, with the exception that the available parameters sets are
chosen at compile time. It is also possible, with the dict interface, to
explore the available parameters sets.
2. With object attributes. The user can easily find the available parameters
sets by pressing tab in interperter like ipython
"""
__templates__ = {}
__class_to_template__ = {}
__named_templates__ = {}
__doxygen_root__ = itkConfig.doxygen_root
def __new__(cls, name):
# Singleton pattern: we only make a single instance of any Template of
# a given name. If we have already made the instance, just return it
# as-is.
if name not in cls.__named_templates__:
new_instance = object.__new__(cls)
new_instance.__name__ = name
new_instance.__template__ = {}
cls.__named_templates__[name] = new_instance
return cls.__named_templates__[name]
def __add__(self, paramSetString, cl):
"""Add a new argument set and the resulting class to the template.
paramSetString is the C++ string which defines the parameters set.
cl is the class which corresponds to the couple template-argument set.
"""
# recreate the full name and normalize it to avoid ambiguity
normFullName = normalizeName(
self.__name__ + "<" + paramSetString + ">")
# the full class should not be already registered. If it is, there is a
# problem somewhere so warn the user so he can fix the problem
if normFullName in itkTemplate.__templates__:
message = (
"Template %s\n already defined as %s\n is redefined "
"as %s") % (normFullName, self.__templates__[normFullName], cl)
warnings.warn(message)
# register the class
itkTemplate.__templates__[normFullName] = cl
# __find_param__ will parse the paramSetString and produce a list of
# the same parameters transformed in corresponding python classes.
# we transform this list in tuple to make it usable as key of the dict
param = tuple(self.__find_param__(paramSetString))
# once again, warn the user if the tuple of parameter is already
# defined so he can fix the problem
if param in self.__template__:
message = "Warning: template already defined '%s'" % normFullName
warnings.warn(message)
# and register the parameter tuple
self.__template__[param] = cl
# add in __class_to_template__ dictionary
itkTemplate.__class_to_template__[cl] = (self, param)
# now populate the template
# 2 cases:
# - the template is a SmartPointer. In that case, the attribute name
# will be the full real name of the class without the itk prefix and
# _Pointer suffix
# - the template is not a SmartPointer. In that case, we keep only the
# end of the real class name which is a short string discribing the
# template arguments (for example IUC2)
if cl.__name__.startswith("itk"):
if cl.__name__.endswith("_Pointer"):
# it's a SmartPointer
attributeName = cl.__name__[len("itk"):-len("_Pointer")]
else:
# it's not a SmartPointer
# we need to now the size of the name to keep only the suffix
# short name does not contain :: and nested namespace
# itk::Numerics::Sample -> itkSample
import re
shortNameSize = len(re.sub(r':.*:', '', self.__name__))
attributeName = cl.__name__[shortNameSize:]
elif cl.__name__.startswith("vcl_complex"):
# C++ name is likely to be std::complex here, instead of the
# expected vcl_complex
attributeName = cl.__name__[len("vcl_complex"):]
else:
import re
shortNameSize = len(re.sub(r'.*::', '', self.__name__))
attributeName = cl.__name__[shortNameSize:]
if attributeName.isdigit():
# the attribute name can't be a number
# add a single undescore before it to build a valid name
attributeName = "_" + attributeName
# add the attribute to this object
self.__dict__[attributeName] = cl
def __find_param__(self, paramSetString):
"""Find the parameters of the template.
paramSetString is the C++ string which defines the parameters set.
__find_param__ returns a list of itk classes, itkCType, and/or numbers
which correspond to the parameters described in paramSetString.
The parameters MUST have been registered before calling this method,
or __find_param__ will return a string and not the wanted object, and
will display a warning. Registration order is important.
This method is not static only to be able to display the template name
in the warning.
"""
# split the string in a list of parameters
paramStrings = []
inner = 0
part = paramSetString.split(",")
for elt in part:
if inner == 0:
paramStrings.append(elt)
else:
paramStrings[-1] += "," + elt
inner += elt.count("<") - elt.count(">")
# convert all string parameters into classes (if possible)
parameters = []
for param in paramStrings:
# the parameter need to be normalized several time below
# do it once here
param = param.strip()
paramNorm = normalizeName(param)
if paramNorm in itkTemplate.__templates__:
# the parameter is registered.
# just get the really class form the dictionary
param = itkTemplate.__templates__[paramNorm]
elif itkCType.GetCType(param):
# the parameter is a c type
# just get the itkCtype instance
param = itkCType.GetCType(param)
elif paramNorm.isdigit():
# the parameter is a number
# convert the string to a number !
param = int(param)
elif paramNorm == "true":
param = True
elif paramNorm == "false":
param = False
else:
# unable to convert the parameter
# use it without changes, but display a warning message, to
# incite developer to fix the problem
message = (
"Warning: Unknown parameter '%s' in "
"template '%s'" % (param, self.__name__))
warnings.warn(message)
parameters.append(param)
return parameters
def __getitem__(self, parameters):
"""Return the class which corresponds to the given template parameters.
parameters can be:
- a single parameter (Ex: itk.Index[2])
- a list of elements (Ex: itk.Image[itk.UC, 2])
"""
isin = isinstance(parameters, types.TupleType)
if not isin and not isinstance(parameters, types.ListType):
# parameters is a single element.
# include it in a list to manage the 2 cases in the same way
parameters = [parameters]
cleanParameters = []
for param in parameters:
# In the case of itk class instance, get the class
name = param.__class__.__name__
isclass = inspect.isclass(param)
if not isclass and name[:3] == 'itk' and name != "itkCType":
param = param.__class__
# append the parameter to the list. If it's not a supported type,
# it is not in the dictionary and we will raise an exception below
cleanParameters.append(param)
try:
return(self.__template__[tuple(cleanParameters)])
except:
raise KeyError(
'itkTemplate : No template %s for the %s class' %
(str(parameters), self.__name__))
def __repr__(self):
return '<itkTemplate %s>' % self.__name__
def __getattribute__(self, attr):
"""Support for reading doxygen man pages to produce __doc__ strings."""
root = itkTemplate.__doxygen_root__
indoc = (attr == '__doc__')
if indoc and root != "" and self.__name__.startswith('itk'):
try:
import commands
doxyname = self.__name__.replace("::", "_")
man_path = "%s/man3/%s.3" % (root, doxyname)
bzman_path = "%s/man3/%s.3.bz2" % (root, doxyname)
if os.path.exists(bzman_path):
return (
commands.getoutput(
"bunzip2 --stdout '" + bzman_path +
"' | groff -mandoc -Tascii -c"))
elif os.path.exists(man_path):
# Use groff here instead of man because man dies when it is
# passed paths with spaces (!) groff does not.
return (
commands.getoutput(
"groff -mandoc -Tascii -c '" +
man_path + "'"))
else:
return (
"Cannot find man page for %s: %s"
% (self.__name__, man_path + "[.bz2]"))
except Exception as e:
return (
"Cannot display man page for %s due to exception: %s."
% (self.__name__, e))
else:
return object.__getattribute__(self, attr)
def New(self, *args, **kargs):
"""TODO: some doc! Don't call it __call__ as it break the __doc__
attribute feature in ipython"""
import itk
keys = self.keys()
if len(args) != 0:
# try to find a type suitable for the input provided
input_types = [output(f).__class__ for f in args]
keys = [k for k in self.keys() if k[0] == input_types[0]]
cur = itk.auto_pipeline.current
if cur is not None and len(cur) != 0:
# try to find a type suitable for the input provided
input_type = output(cur).__class__
keys = [k for k in self.keys() if k[0] == input_type]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
return self[keys[0]].New(*args, **kargs)
def keys(self):
return self.__template__.keys()
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self):
for k in self.keys():
yield k
def __contains__(self, key):
return key in self
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __len__(self):
return len(self.keys())
def GetTypes(self):
"""Helper method which prints out the available template parameters."""
print("<itkTemplate %s>" % self.__name__)
print("Options:")
for tp in self.GetTypesAsList():
print(" " + str(tp).replace("(", "[").replace(")", "]"))
def GetTypesAsList(self):
"""Helper method which returns the available template parameters."""
# Make a list of allowed types, and sort them
ctypes = []
classes = []
for key_tuple in self.__template__:
key = str(key_tuple)
if "itkCType" in key:
ctypes.append(key)
elif "class" in key:
classes.append(key)
# Sort the lists
ctypes = sorted(ctypes)
classes = sorted(classes)
return ctypes + classes
# create a new New function which accepts parameters
def New(self, *args, **kargs):
import itk
itk.set_inputs(self, args, kargs)
# now, try to add observer to display progress
if "auto_progress" in kargs.keys():
if kargs["auto_progress"] in [True, 1]:
callback = itk.terminal_progress_callback
elif kargs["auto_progress"] == 2:
callback = itk.simple_progress_callback
else:
callback = None
elif itkConfig.ProgressCallback:
callback = itkConfig.ProgressCallback
else:
callback = None
if callback:
try:
name = self.__class__.__name__
def progress():
# self and callback are kept referenced with a closure
callback(name, self.GetProgress())
self.AddObserver(itk.ProgressEvent(), progress)
except:
# it seems that something goes wrong...
# as this feature is designed for prototyping, it's not really a
# problem if an object doesn't have progress reporter, so adding
# reporter can silently fail
pass
if itkConfig.NotInPlace and "SetInPlace" in dir(self):
self.SetInPlace(False)
if itk.auto_pipeline.current is not None:
itk.auto_pipeline.current.connect(self)
return self
def output(input):
try:
img = input.GetOutput()
except AttributeError:
img = input
return img
def image(input):
import sys
print(
("WrapITK warning: itk.image() is deprecated. "
"Use itk.output() instead."), file=sys.stderr)
return output(input)
| apache-2.0 |
dhenrygithub/QGIS | python/plugins/processing/algs/gdal/OgrAlgorithm.py | 3 | 1400 | # -*- coding: utf-8 -*-
"""
***************************************************************************
OgrAlgorithm.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools import vector
class OgrAlgorithm(GdalAlgorithm):
def ogrConnectionString(self, uri):
return vector.ogrConnectionString(uri)
def ogrLayerName(self, uri):
return vector.ogrLayerName(uri)
| gpl-2.0 |
t794104/ansible | test/units/modules/storage/netapp/test_na_ontap_net_port.py | 38 | 6401 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_net_port \
import NetAppOntapNetPort as port_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'port':
xml = self.build_port_info(self.data)
self.xml_out = xml
return xml
@staticmethod
def build_port_info(port_details):
''' build xml data for net-port-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'net-port-info': {
# 'port': port_details['port'],
'mtu': port_details['mtu'],
'is-administrative-auto-negotiate': 'true',
'ipspace': 'default',
'administrative-flowcontrol': port_details['flowcontrol_admin'],
'node': port_details['node']
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_port = {
'node': 'test',
'ports': 'a1',
'flowcontrol_admin': 'something',
'mtu': '1000'
}
def mock_args(self):
return {
'node': self.mock_port['node'],
'flowcontrol_admin': self.mock_port['flowcontrol_admin'],
'ports': [self.mock_port['ports']],
'mtu': self.mock_port['mtu'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_port_mock_object(self, kind=None, data=None):
"""
Helper method to return an na_ontap_net_port object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_net_port object
"""
obj = port_module()
obj.autosupport_log = Mock(return_value=None)
if data is None:
data = self.mock_port
obj.server = MockONTAPConnection(kind=kind, data=data)
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
port_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_port(self):
''' Test if get_net_port returns None for non-existent port '''
set_module_args(self.mock_args())
result = self.get_port_mock_object().get_net_port('test')
assert result is None
def test_get_existing_port(self):
''' Test if get_net_port returns details for existing port '''
set_module_args(self.mock_args())
result = self.get_port_mock_object('port').get_net_port('test')
assert result['mtu'] == self.mock_port['mtu']
assert result['flowcontrol_admin'] == self.mock_port['flowcontrol_admin']
def test_successful_modify(self):
''' Test modify_net_port '''
data = self.mock_args()
data['mtu'] = '2000'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_multiple_ports(self):
''' Test modify_net_port '''
data = self.mock_args()
data['ports'] = ['a1', 'a2']
data['mtu'] = '2000'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_net_port.NetAppOntapNetPort.get_net_port')
def test_get_called(self, get_port):
''' Test get_net_port '''
data = self.mock_args()
data['ports'] = ['a1', 'a2']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert get_port.call_count == 2
| gpl-3.0 |
munyirik/python | cpython/Doc/includes/email-dir.py | 43 | 3984 | #!/usr/bin/env python3
"""Send the contents of a directory as a MIME message."""
import os
import sys
import smtplib
# For guessing MIME type based on file name extension
import mimetypes
from argparse import ArgumentParser
from email import encoders
from email.message import Message
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
COMMASPACE = ', '
def main():
parser = ArgumentParser(description="""\
Send the contents of a directory as a MIME message.
Unless the -o option is given, the email is sent by forwarding to your local
SMTP server, which then does the normal delivery process. Your local machine
must be running an SMTP server.
""")
parser.add_argument('-d', '--directory',
help="""Mail the contents of the specified directory,
otherwise use the current directory. Only the regular
files in the directory are sent, and we don't recurse to
subdirectories.""")
parser.add_argument('-o', '--output',
metavar='FILE',
help="""Print the composed message to FILE instead of
sending the message to the SMTP server.""")
parser.add_argument('-s', '--sender', required=True,
help='The value of the From: header (required)')
parser.add_argument('-r', '--recipient', required=True,
action='append', metavar='RECIPIENT',
default=[], dest='recipients',
help='A To: header value (at least one required)')
args = parser.parse_args()
directory = args.directory
if not directory:
directory = '.'
# Create the enclosing (outer) message
outer = MIMEMultipart()
outer['Subject'] = 'Contents of directory %s' % os.path.abspath(directory)
outer['To'] = COMMASPACE.join(args.recipients)
outer['From'] = args.sender
outer.preamble = 'You will not see this in a MIME-aware mail reader.\n'
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
if not os.path.isfile(path):
continue
# Guess the content type based on the file's extension. Encoding
# will be ignored, although we should check for simple things like
# gzip'd or compressed files.
ctype, encoding = mimetypes.guess_type(path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
with open(path) as fp:
# Note: we should handle calculating the charset
msg = MIMEText(fp.read(), _subtype=subtype)
elif maintype == 'image':
with open(path, 'rb') as fp:
msg = MIMEImage(fp.read(), _subtype=subtype)
elif maintype == 'audio':
with open(path, 'rb') as fp:
msg = MIMEAudio(fp.read(), _subtype=subtype)
else:
with open(path, 'rb') as fp:
msg = MIMEBase(maintype, subtype)
msg.set_payload(fp.read())
# Encode the payload using Base64
encoders.encode_base64(msg)
# Set the filename parameter
msg.add_header('Content-Disposition', 'attachment', filename=filename)
outer.attach(msg)
# Now send or store the message
composed = outer.as_string()
if args.output:
with open(args.output, 'w') as fp:
fp.write(composed)
else:
with smtplib.SMTP('localhost') as s:
s.sendmail(args.sender, args.recipients, composed)
if __name__ == '__main__':
main()
| bsd-3-clause |
nicobustillos/odoo | addons/hr_expense/report/hr_expense_report.py | 287 | 5652 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_expense_report(osv.osv):
_name = "hr.expense.report"
_description = "Expenses Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date ', readonly=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Force Journal', readonly=True),
'product_qty':fields.float('Product Quantity', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee's Name", readonly=True),
'date_confirm': fields.date('Confirmation Date', readonly=True),
'date_valid': fields.date('Validation Date', readonly=True),
'department_id':fields.many2one('hr.department','Department', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Validation User', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'price_total':fields.float('Total Price', readonly=True, digits_compute=dp.get_precision('Account')),
'delay_valid':fields.float('Delay to Valid', readonly=True),
'delay_confirm':fields.float('Delay to Confirm', readonly=True),
'analytic_account': fields.many2one('account.analytic.account','Analytic account',readonly=True),
'price_average':fields.float('Average Price', readonly=True, digits_compute=dp.get_precision('Account')),
'nbr':fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'no_of_products':fields.integer('# of Products', readonly=True),
'no_of_account':fields.integer('# of Accounts', readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('confirm', 'Waiting confirmation'),
('accepted', 'Accepted'),
('done', 'Done'),
('cancelled', 'Cancelled')],
'Status', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_expense_report')
cr.execute("""
create or replace view hr_expense_report as (
select
min(l.id) as id,
s.date as date,
s.create_date as create_date,
s.employee_id,
s.journal_id,
s.currency_id,
s.date_confirm as date_confirm,
s.date_valid as date_valid,
s.user_valid as user_id,
s.department_id,
avg(extract('epoch' from age(s.date_valid,s.date)))/(3600*24) as delay_valid,
avg(extract('epoch' from age(s.date_valid,s.date_confirm)))/(3600*24) as delay_confirm,
l.product_id as product_id,
l.analytic_account as analytic_account,
sum(l.unit_quantity * u.factor) as product_qty,
s.company_id as company_id,
sum(l.unit_quantity*l.unit_amount) as price_total,
(sum(l.unit_quantity*l.unit_amount)/sum(case when l.unit_quantity=0 or u.factor=0 then 1 else l.unit_quantity * u.factor end))::decimal(16,2) as price_average,
count(*) as nbr,
(select unit_quantity from hr_expense_line where id=l.id and product_id is not null) as no_of_products,
(select analytic_account from hr_expense_line where id=l.id and analytic_account is not null) as no_of_account,
s.state
from hr_expense_line l
left join hr_expense_expense s on (s.id=l.expense_id)
left join product_uom u on (u.id=l.uom_id)
group by
s.date,
s.create_date,
s.date_confirm,
s.date_valid,
l.product_id,
l.analytic_account,
s.currency_id,
s.user_valid,
s.department_id,
l.uom_id,
l.id,
s.state,
s.journal_id,
s.company_id,
s.employee_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
naresh21/synergetics-edx-platform | openedx/core/djangoapps/course_groups/migrations/0001_initial.py | 20 | 4087 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CohortMembership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', CourseKeyField(max_length=255)),
],
),
migrations.CreateModel(
name='CourseCohort',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('assignment_type', models.CharField(default=b'manual', max_length=20, choices=[(b'random', b'Random'), (b'manual', b'Manual')])),
],
),
migrations.CreateModel(
name='CourseCohortsSettings',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_cohorted', models.BooleanField(default=False)),
('course_id', CourseKeyField(help_text=b'Which course are these settings associated with?', unique=True, max_length=255, db_index=True)),
('_cohorted_discussions', models.TextField(null=True, db_column=b'cohorted_discussions', blank=True)),
('always_cohort_inline_discussions', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='CourseUserGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'What is the name of this group? Must be unique within a course.', max_length=255)),
('course_id', CourseKeyField(help_text=b'Which course is this group associated with?', max_length=255, db_index=True)),
('group_type', models.CharField(max_length=20, choices=[(b'cohort', b'Cohort')])),
('users', models.ManyToManyField(help_text=b'Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)),
],
),
migrations.CreateModel(
name='CourseUserGroupPartitionGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('partition_id', models.IntegerField(help_text=b'contains the id of a cohorted partition in this course')),
('group_id', models.IntegerField(help_text=b'contains the id of a specific group within the cohorted partition')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup')),
],
),
migrations.AddField(
model_name='coursecohort',
name='course_user_group',
field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup'),
),
migrations.AddField(
model_name='cohortmembership',
name='course_user_group',
field=models.ForeignKey(to='course_groups.CourseUserGroup'),
),
migrations.AddField(
model_name='cohortmembership',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='courseusergroup',
unique_together=set([('name', 'course_id')]),
),
migrations.AlterUniqueTogether(
name='cohortmembership',
unique_together=set([('user', 'course_id')]),
),
]
| agpl-3.0 |
tfmorris/freebase-python-samples | simple/mqlwrite.py | 1 | 1270 | from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
import json
from urllib import urlencode
import httplib2
import os
CLIENT_ID = open(os.environ['HOME'] + "/.freebase_client_id").read()
CLIENT_SECRET = open(os.environ['HOME'] + "/.freebase_client_secret").read()
def authenticated_http():
storage = Storage('freebase.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope='https://www.googleapis.com/auth/freebase',
user_agent='freebase-cmdline-sample/1.0',
xoauth_displayname='Freebase Client Example App')
credentials = run(flow, storage)
http = httplib2.Http()
return credentials.authorize(http)
http = authenticated_http()
query = {"create":"unconditional","id":None,"name":"Nowhere","type":"/location/location"}
data = dict(query=json.dumps(query))
headers = {
'X-HTTP-Method-Override': 'GET',
'Content-Type': 'application/x-www-form-urlencoded'
}
url = 'https://www.googleapis.com/freebase/v1sandbox/mqlwrite' + '?' + urlencode(data)
resp, content = http.request(url, "GET", headers=headers)
print content
| bsd-3-clause |
mobify/python-driver | cassandra/query.py | 6 | 35337 | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module holds classes for working with prepared statements and
specifying consistency levels and retry policies for individual
queries.
"""
from collections import namedtuple
from datetime import datetime, timedelta
import re
import struct
import time
import six
from six.moves import range, zip
from cassandra import ConsistencyLevel, OperationTimedOut
from cassandra.util import unix_time_from_uuid1
from cassandra.encoder import Encoder
import cassandra.encoder
from cassandra.protocol import _UNSET_VALUE
from cassandra.util import OrderedDict
import logging
log = logging.getLogger(__name__)
UNSET_VALUE = _UNSET_VALUE
"""
Specifies an unset value when binding a prepared statement.
Unset values are ignored, allowing prepared statements to be used without specify
See https://issues.apache.org/jira/browse/CASSANDRA-7304 for further details on semantics.
.. versionadded:: 2.6.0
Only valid when using native protocol v4+
"""
NON_ALPHA_REGEX = re.compile('[^a-zA-Z0-9]')
START_BADCHAR_REGEX = re.compile('^[^a-zA-Z0-9]*')
END_BADCHAR_REGEX = re.compile('[^a-zA-Z0-9_]*$')
_clean_name_cache = {}
def _clean_column_name(name):
try:
return _clean_name_cache[name]
except KeyError:
clean = NON_ALPHA_REGEX.sub("_", START_BADCHAR_REGEX.sub("", END_BADCHAR_REGEX.sub("", name)))
_clean_name_cache[name] = clean
return clean
def tuple_factory(colnames, rows):
"""
Returns each row as a tuple
Example::
>>> from cassandra.query import tuple_factory
>>> session = cluster.connect('mykeyspace')
>>> session.row_factory = tuple_factory
>>> rows = session.execute("SELECT name, age FROM users LIMIT 1")
>>> print rows[0]
('Bob', 42)
.. versionchanged:: 2.0.0
moved from ``cassandra.decoder`` to ``cassandra.query``
"""
return rows
def named_tuple_factory(colnames, rows):
"""
Returns each row as a `namedtuple <https://docs.python.org/2/library/collections.html#collections.namedtuple>`_.
This is the default row factory.
Example::
>>> from cassandra.query import named_tuple_factory
>>> session = cluster.connect('mykeyspace')
>>> session.row_factory = named_tuple_factory
>>> rows = session.execute("SELECT name, age FROM users LIMIT 1")
>>> user = rows[0]
>>> # you can access field by their name:
>>> print "name: %s, age: %d" % (user.name, user.age)
name: Bob, age: 42
>>> # or you can access fields by their position (like a tuple)
>>> name, age = user
>>> print "name: %s, age: %d" % (name, age)
name: Bob, age: 42
>>> name = user[0]
>>> age = user[1]
>>> print "name: %s, age: %d" % (name, age)
name: Bob, age: 42
.. versionchanged:: 2.0.0
moved from ``cassandra.decoder`` to ``cassandra.query``
"""
clean_column_names = map(_clean_column_name, colnames)
try:
Row = namedtuple('Row', clean_column_names)
except Exception:
log.warning("Failed creating named tuple for results with column names %s (cleaned: %s) "
"(see Python 'namedtuple' documentation for details on name rules). "
"Results will be returned with positional names. "
"Avoid this by choosing different names, using SELECT \"<col name>\" AS aliases, "
"or specifying a different row_factory on your Session" %
(colnames, clean_column_names))
Row = namedtuple('Row', clean_column_names, rename=True)
return [Row(*row) for row in rows]
def dict_factory(colnames, rows):
"""
Returns each row as a dict.
Example::
>>> from cassandra.query import dict_factory
>>> session = cluster.connect('mykeyspace')
>>> session.row_factory = dict_factory
>>> rows = session.execute("SELECT name, age FROM users LIMIT 1")
>>> print rows[0]
{u'age': 42, u'name': u'Bob'}
.. versionchanged:: 2.0.0
moved from ``cassandra.decoder`` to ``cassandra.query``
"""
return [dict(zip(colnames, row)) for row in rows]
def ordered_dict_factory(colnames, rows):
"""
Like :meth:`~cassandra.query.dict_factory`, but returns each row as an OrderedDict,
so the order of the columns is preserved.
.. versionchanged:: 2.0.0
moved from ``cassandra.decoder`` to ``cassandra.query``
"""
return [OrderedDict(zip(colnames, row)) for row in rows]
FETCH_SIZE_UNSET = object()
class Statement(object):
"""
An abstract class representing a single query. There are three subclasses:
:class:`.SimpleStatement`, :class:`.BoundStatement`, and :class:`.BatchStatement`.
These can be passed to :meth:`.Session.execute()`.
"""
retry_policy = None
"""
An instance of a :class:`cassandra.policies.RetryPolicy` or one of its
subclasses. This controls when a query will be retried and how it
will be retried.
"""
trace = None
"""
If :meth:`.Session.execute()` is run with `trace` set to :const:`True`,
this will be set to a :class:`.QueryTrace` instance.
"""
trace_id = None
"""
If :meth:`.Session.execute()` is run with `trace` set to :const:`True`,
this will be set to the tracing ID from the server.
"""
consistency_level = None
"""
The :class:`.ConsistencyLevel` to be used for this operation. Defaults
to :const:`None`, which means that the default consistency level for
the Session this is executed in will be used.
"""
fetch_size = FETCH_SIZE_UNSET
"""
How many rows will be fetched at a time. This overrides the default
of :attr:`.Session.default_fetch_size`
This only takes effect when protocol version 2 or higher is used.
See :attr:`.Cluster.protocol_version` for details.
.. versionadded:: 2.0.0
"""
keyspace = None
"""
The string name of the keyspace this query acts on. This is used when
:class:`~.TokenAwarePolicy` is configured for
:attr:`.Cluster.load_balancing_policy`
It is set implicitly on :class:`.BoundStatement`, and :class:`.BatchStatement`,
but must be set explicitly on :class:`.SimpleStatement`.
.. versionadded:: 2.1.3
"""
custom_payload = None
"""
:ref:`custom_payload` to be passed to the server.
These are only allowed when using protocol version 4 or higher.
.. versionadded:: 2.6.0
"""
_serial_consistency_level = None
_routing_key = None
def __init__(self, retry_policy=None, consistency_level=None, routing_key=None,
serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None,
custom_payload=None):
self.retry_policy = retry_policy
if consistency_level is not None:
self.consistency_level = consistency_level
self._routing_key = routing_key
if serial_consistency_level is not None:
self.serial_consistency_level = serial_consistency_level
if fetch_size is not FETCH_SIZE_UNSET:
self.fetch_size = fetch_size
if keyspace is not None:
self.keyspace = keyspace
if custom_payload is not None:
self.custom_payload = custom_payload
def _get_routing_key(self):
return self._routing_key
def _set_routing_key(self, key):
if isinstance(key, (list, tuple)):
self._routing_key = b"".join(struct.pack("HsB", len(component), component, 0)
for component in key)
else:
self._routing_key = key
def _del_routing_key(self):
self._routing_key = None
routing_key = property(
_get_routing_key,
_set_routing_key,
_del_routing_key,
"""
The :attr:`~.TableMetadata.partition_key` portion of the primary key,
which can be used to determine which nodes are replicas for the query.
If the partition key is a composite, a list or tuple must be passed in.
Each key component should be in its packed (binary) format, so all
components should be strings.
""")
def _get_serial_consistency_level(self):
return self._serial_consistency_level
def _set_serial_consistency_level(self, serial_consistency_level):
acceptable = (None, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL)
if serial_consistency_level not in acceptable:
raise ValueError(
"serial_consistency_level must be either ConsistencyLevel.SERIAL "
"or ConsistencyLevel.LOCAL_SERIAL")
self._serial_consistency_level = serial_consistency_level
def _del_serial_consistency_level(self):
self._serial_consistency_level = None
serial_consistency_level = property(
_get_serial_consistency_level,
_set_serial_consistency_level,
_del_serial_consistency_level,
"""
The serial consistency level is only used by conditional updates
(``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` condition). For
those, the ``serial_consistency_level`` defines the consistency level of
the serial phase (or "paxos" phase) while the normal
:attr:`~.consistency_level` defines the consistency for the "learn" phase,
i.e. what type of reads will be guaranteed to see the update right away.
For example, if a conditional write has a :attr:`~.consistency_level` of
:attr:`~.ConsistencyLevel.QUORUM` (and is successful), then a
:attr:`~.ConsistencyLevel.QUORUM` read is guaranteed to see that write.
But if the regular :attr:`~.consistency_level` of that write is
:attr:`~.ConsistencyLevel.ANY`, then only a read with a
:attr:`~.consistency_level` of :attr:`~.ConsistencyLevel.SERIAL` is
guaranteed to see it (even a read with consistency
:attr:`~.ConsistencyLevel.ALL` is not guaranteed to be enough).
The serial consistency can only be one of :attr:`~.ConsistencyLevel.SERIAL`
or :attr:`~.ConsistencyLevel.LOCAL_SERIAL`. While ``SERIAL`` guarantees full
linearizability (with other ``SERIAL`` updates), ``LOCAL_SERIAL`` only
guarantees it in the local data center.
The serial consistency level is ignored for any query that is not a
conditional update. Serial reads should use the regular
:attr:`consistency_level`.
Serial consistency levels may only be used against Cassandra 2.0+
and the :attr:`~.Cluster.protocol_version` must be set to 2 or higher.
.. versionadded:: 2.0.0
""")
class SimpleStatement(Statement):
"""
A simple, un-prepared query.
"""
def __init__(self, query_string, *args, **kwargs):
"""
`query_string` should be a literal CQL statement with the exception
of parameter placeholders that will be filled through the
`parameters` argument of :meth:`.Session.execute()`.
All arguments to :class:`Statement` apply to this class as well
"""
Statement.__init__(self, *args, **kwargs)
self._query_string = query_string
@property
def query_string(self):
return self._query_string
def __str__(self):
consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set')
return (u'<SimpleStatement query="%s", consistency=%s>' %
(self.query_string, consistency))
__repr__ = __str__
class PreparedStatement(object):
"""
A statement that has been prepared against at least one Cassandra node.
Instances of this class should not be created directly, but through
:meth:`.Session.prepare()`.
A :class:`.PreparedStatement` should be prepared only once. Re-preparing a statement
may affect performance (as the operation requires a network roundtrip).
"""
column_metadata = None
query_id = None
query_string = None
keyspace = None # change to prepared_keyspace in major release
routing_key_indexes = None
_routing_key_index_set = None
consistency_level = None
serial_consistency_level = None
protocol_version = None
fetch_size = FETCH_SIZE_UNSET
custom_payload = None
def __init__(self, column_metadata, query_id, routing_key_indexes, query,
keyspace, protocol_version):
self.column_metadata = column_metadata
self.query_id = query_id
self.routing_key_indexes = routing_key_indexes
self.query_string = query
self.keyspace = keyspace
self.protocol_version = protocol_version
@classmethod
def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, query, prepared_keyspace, protocol_version):
if not column_metadata:
return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version)
if pk_indexes:
routing_key_indexes = pk_indexes
else:
routing_key_indexes = None
first_col = column_metadata[0]
ks_meta = cluster_metadata.keyspaces.get(first_col.keyspace_name)
if ks_meta:
table_meta = ks_meta.tables.get(first_col.table_name)
if table_meta:
partition_key_columns = table_meta.partition_key
# make a map of {column_name: index} for each column in the statement
statement_indexes = dict((c.name, i) for i, c in enumerate(column_metadata))
# a list of which indexes in the statement correspond to partition key items
try:
routing_key_indexes = [statement_indexes[c.name]
for c in partition_key_columns]
except KeyError: # we're missing a partition key component in the prepared
pass # statement; just leave routing_key_indexes as None
return PreparedStatement(column_metadata, query_id, routing_key_indexes,
query, prepared_keyspace, protocol_version)
def bind(self, values):
"""
Creates and returns a :class:`BoundStatement` instance using `values`.
See :meth:`BoundStatement.bind` for rules on input ``values``.
"""
return BoundStatement(self).bind(values)
def is_routing_key_index(self, i):
if self._routing_key_index_set is None:
self._routing_key_index_set = set(self.routing_key_indexes) if self.routing_key_indexes else set()
return i in self._routing_key_index_set
def __str__(self):
consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set')
return (u'<PreparedStatement query="%s", consistency=%s>' %
(self.query_string, consistency))
__repr__ = __str__
class BoundStatement(Statement):
"""
A prepared statement that has been bound to a particular set of values.
These may be created directly or through :meth:`.PreparedStatement.bind()`.
"""
prepared_statement = None
"""
The :class:`PreparedStatement` instance that this was created from.
"""
values = None
"""
The sequence of values that were bound to the prepared statement.
"""
def __init__(self, prepared_statement, *args, **kwargs):
"""
`prepared_statement` should be an instance of :class:`PreparedStatement`.
All arguments to :class:`Statement` apply to this class as well
"""
self.prepared_statement = prepared_statement
self.consistency_level = prepared_statement.consistency_level
self.serial_consistency_level = prepared_statement.serial_consistency_level
self.fetch_size = prepared_statement.fetch_size
self.custom_payload = prepared_statement.custom_payload
self.values = []
meta = prepared_statement.column_metadata
if meta:
self.keyspace = meta[0].keyspace_name
Statement.__init__(self, *args, **kwargs)
def bind(self, values):
"""
Binds a sequence of values for the prepared statement parameters
and returns this instance. Note that `values` *must* be:
* a sequence, even if you are only binding one value, or
* a dict that relates 1-to-1 between dict keys and columns
.. versionchanged:: 2.6.0
:data:`~.UNSET_VALUE` was introduced. These can be bound as positional parameters
in a sequence, or by name in a dict. Additionally, when using protocol v4+:
* short sequences will be extended to match bind parameters with UNSET_VALUE
* names may be omitted from a dict with UNSET_VALUE implied.
"""
if values is None:
values = ()
proto_version = self.prepared_statement.protocol_version
col_meta = self.prepared_statement.column_metadata
col_meta_len = len(col_meta)
value_len = len(values)
# special case for binding dicts
if isinstance(values, dict):
unbound_values = values.copy()
values = []
# sort values accordingly
for col in col_meta:
try:
values.append(unbound_values.pop(col.name))
except KeyError:
if proto_version >= 4:
values.append(UNSET_VALUE)
else:
raise KeyError(
'Column name `%s` not found in bound dict.' %
(col.name))
value_len = len(values)
if unbound_values:
raise ValueError("Unexpected arguments provided to bind(): %s" % unbound_values.keys())
if value_len > col_meta_len:
raise ValueError(
"Too many arguments provided to bind() (got %d, expected %d)" %
(len(values), len(col_meta)))
# this is fail-fast for clarity pre-v4. When v4 can be assumed,
# the error will be better reported when UNSET_VALUE is implicitly added.
if proto_version < 4 and self.prepared_statement.routing_key_indexes and \
value_len < len(self.prepared_statement.routing_key_indexes):
raise ValueError(
"Too few arguments provided to bind() (got %d, required %d for routing key)" %
(value_len, len(self.prepared_statement.routing_key_indexes)))
self.raw_values = values
self.values = []
for value, col_spec in zip(values, col_meta):
if value is None:
self.values.append(None)
elif value is UNSET_VALUE:
if proto_version >= 4:
self._append_unset_value()
else:
raise ValueError("Attempt to bind UNSET_VALUE while using unsuitable protocol version (%d < 4)" % proto_version)
else:
try:
self.values.append(col_spec.type.serialize(value, proto_version))
except (TypeError, struct.error) as exc:
actual_type = type(value)
message = ('Received an argument of invalid type for column "%s". '
'Expected: %s, Got: %s; (%s)' % (col_spec.name, col_spec.type, actual_type, exc))
raise TypeError(message)
if proto_version >= 4:
diff = col_meta_len - len(self.values)
if diff:
for _ in range(diff):
self._append_unset_value()
return self
def _append_unset_value(self):
next_index = len(self.values)
if self.prepared_statement.is_routing_key_index(next_index):
col_meta = self.prepared_statement.column_metadata[next_index]
raise ValueError("Cannot bind UNSET_VALUE as a part of the routing key '%s'" % col_meta.name)
self.values.append(UNSET_VALUE)
@property
def routing_key(self):
if not self.prepared_statement.routing_key_indexes:
return None
if self._routing_key is not None:
return self._routing_key
routing_indexes = self.prepared_statement.routing_key_indexes
if len(routing_indexes) == 1:
self._routing_key = self.values[routing_indexes[0]]
else:
components = []
for statement_index in routing_indexes:
val = self.values[statement_index]
l = len(val)
components.append(struct.pack(">H%dsB" % l, l, val, 0))
self._routing_key = b"".join(components)
return self._routing_key
def __str__(self):
consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set')
return (u'<BoundStatement query="%s", values=%s, consistency=%s>' %
(self.prepared_statement.query_string, self.raw_values, consistency))
__repr__ = __str__
class BatchType(object):
"""
A BatchType is used with :class:`.BatchStatement` instances to control
the atomicity of the batch operation.
.. versionadded:: 2.0.0
"""
LOGGED = None
"""
Atomic batch operation.
"""
UNLOGGED = None
"""
Non-atomic batch operation.
"""
COUNTER = None
"""
Batches of counter operations.
"""
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
return self.name
def __repr__(self):
return "BatchType.%s" % (self.name, )
BatchType.LOGGED = BatchType("LOGGED", 0)
BatchType.UNLOGGED = BatchType("UNLOGGED", 1)
BatchType.COUNTER = BatchType("COUNTER", 2)
class BatchStatement(Statement):
"""
A protocol-level batch of operations which are applied atomically
by default.
.. versionadded:: 2.0.0
"""
batch_type = None
"""
The :class:`.BatchType` for the batch operation. Defaults to
:attr:`.BatchType.LOGGED`.
"""
serial_consistency_level = None
"""
The same as :attr:`.Statement.serial_consistency_level`, but is only
supported when using protocol version 3 or higher.
"""
_statements_and_parameters = None
_session = None
def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None,
consistency_level=None, serial_consistency_level=None,
session=None, custom_payload=None):
"""
`batch_type` specifies The :class:`.BatchType` for the batch operation.
Defaults to :attr:`.BatchType.LOGGED`.
`retry_policy` should be a :class:`~.RetryPolicy` instance for
controlling retries on the operation.
`consistency_level` should be a :class:`~.ConsistencyLevel` value
to be used for all operations in the batch.
`custom_payload` is a :ref:`custom_payload` passed to the server.
Note: as Statement objects are added to the batch, this map is
updated with any values found in their custom payloads. These are
only allowed when using protocol version 4 or higher.
Example usage:
.. code-block:: python
insert_user = session.prepare("INSERT INTO users (name, age) VALUES (?, ?)")
batch = BatchStatement(consistency_level=ConsistencyLevel.QUORUM)
for (name, age) in users_to_insert:
batch.add(insert_user, (name, age))
session.execute(batch)
You can also mix different types of operations within a batch:
.. code-block:: python
batch = BatchStatement()
batch.add(SimpleStatement("INSERT INTO users (name, age) VALUES (%s, %s)"), (name, age))
batch.add(SimpleStatement("DELETE FROM pending_users WHERE name=%s"), (name,))
session.execute(batch)
.. versionadded:: 2.0.0
.. versionchanged:: 2.1.0
Added `serial_consistency_level` as a parameter
.. versionchanged:: 2.6.0
Added `custom_payload` as a parameter
"""
self.batch_type = batch_type
self._statements_and_parameters = []
self._session = session
Statement.__init__(self, retry_policy=retry_policy, consistency_level=consistency_level,
serial_consistency_level=serial_consistency_level, custom_payload=custom_payload)
def add(self, statement, parameters=None):
"""
Adds a :class:`.Statement` and optional sequence of parameters
to be used with the statement to the batch.
Like with other statements, parameters must be a sequence, even
if there is only one item.
"""
if isinstance(statement, six.string_types):
if parameters:
encoder = Encoder() if self._session is None else self._session.encoder
statement = bind_params(statement, parameters, encoder)
self._statements_and_parameters.append((False, statement, ()))
elif isinstance(statement, PreparedStatement):
query_id = statement.query_id
bound_statement = statement.bind(() if parameters is None else parameters)
self._update_state(bound_statement)
self._statements_and_parameters.append(
(True, query_id, bound_statement.values))
elif isinstance(statement, BoundStatement):
if parameters:
raise ValueError(
"Parameters cannot be passed with a BoundStatement "
"to BatchStatement.add()")
self._update_state(statement)
self._statements_and_parameters.append(
(True, statement.prepared_statement.query_id, statement.values))
else:
# it must be a SimpleStatement
query_string = statement.query_string
if parameters:
encoder = Encoder() if self._session is None else self._session.encoder
query_string = bind_params(query_string, parameters, encoder)
self._update_state(statement)
self._statements_and_parameters.append((False, query_string, ()))
return self
def add_all(self, statements, parameters):
"""
Adds a sequence of :class:`.Statement` objects and a matching sequence
of parameters to the batch. :const:`None` can be used in place of
parameters when no parameters are needed.
"""
for statement, value in zip(statements, parameters):
self.add(statement, parameters)
def _maybe_set_routing_attributes(self, statement):
if self.routing_key is None:
if statement.keyspace and statement.routing_key:
self.routing_key = statement.routing_key
self.keyspace = statement.keyspace
def _update_custom_payload(self, statement):
if statement.custom_payload:
if self.custom_payload is None:
self.custom_payload = {}
self.custom_payload.update(statement.custom_payload)
def _update_state(self, statement):
self._maybe_set_routing_attributes(statement)
self._update_custom_payload(statement)
def __str__(self):
consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set')
return (u'<BatchStatement type=%s, statements=%d, consistency=%s>' %
(self.batch_type, len(self._statements_and_parameters), consistency))
__repr__ = __str__
ValueSequence = cassandra.encoder.ValueSequence
"""
A wrapper class that is used to specify that a sequence of values should
be treated as a CQL list of values instead of a single column collection when used
as part of the `parameters` argument for :meth:`.Session.execute()`.
This is typically needed when supplying a list of keys to select.
For example::
>>> my_user_ids = ('alice', 'bob', 'charles')
>>> query = "SELECT * FROM users WHERE user_id IN %s"
>>> session.execute(query, parameters=[ValueSequence(my_user_ids)])
"""
def bind_params(query, params, encoder):
if isinstance(params, dict):
return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in six.iteritems(params))
else:
return query % tuple(encoder.cql_encode_all_types(v) for v in params)
class TraceUnavailable(Exception):
"""
Raised when complete trace details cannot be fetched from Cassandra.
"""
pass
class QueryTrace(object):
"""
A trace of the duration and events that occurred when executing
an operation.
"""
trace_id = None
"""
:class:`uuid.UUID` unique identifier for this tracing session. Matches
the ``session_id`` column in ``system_traces.sessions`` and
``system_traces.events``.
"""
request_type = None
"""
A string that very generally describes the traced operation.
"""
duration = None
"""
A :class:`datetime.timedelta` measure of the duration of the query.
"""
client = None
"""
The IP address of the client that issued this request
This is only available when using Cassandra 2.2+
"""
coordinator = None
"""
The IP address of the host that acted as coordinator for this request.
"""
parameters = None
"""
A :class:`dict` of parameters for the traced operation, such as the
specific query string.
"""
started_at = None
"""
A UTC :class:`datetime.datetime` object describing when the operation
was started.
"""
events = None
"""
A chronologically sorted list of :class:`.TraceEvent` instances
representing the steps the traced operation went through. This
corresponds to the rows in ``system_traces.events`` for this tracing
session.
"""
_session = None
_SELECT_SESSIONS_FORMAT = "SELECT * FROM system_traces.sessions WHERE session_id = %s"
_SELECT_EVENTS_FORMAT = "SELECT * FROM system_traces.events WHERE session_id = %s"
_BASE_RETRY_SLEEP = 0.003
def __init__(self, trace_id, session):
self.trace_id = trace_id
self._session = session
def populate(self, max_wait=2.0):
"""
Retrieves the actual tracing details from Cassandra and populates the
attributes of this instance. Because tracing details are stored
asynchronously by Cassandra, this may need to retry the session
detail fetch. If the trace is still not available after `max_wait`
seconds, :exc:`.TraceUnavailable` will be raised; if `max_wait` is
:const:`None`, this will retry forever.
"""
attempt = 0
start = time.time()
while True:
time_spent = time.time() - start
if max_wait is not None and time_spent >= max_wait:
raise TraceUnavailable(
"Trace information was not available within %f seconds. Consider raising Session.max_trace_wait." % (max_wait,))
log.debug("Attempting to fetch trace info for trace ID: %s", self.trace_id)
session_results = self._execute(
self._SELECT_SESSIONS_FORMAT, (self.trace_id,), time_spent, max_wait)
if not session_results or session_results[0].duration is None:
time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt))
attempt += 1
continue
log.debug("Fetched trace info for trace ID: %s", self.trace_id)
session_row = session_results[0]
self.request_type = session_row.request
self.duration = timedelta(microseconds=session_row.duration)
self.started_at = session_row.started_at
self.coordinator = session_row.coordinator
self.parameters = session_row.parameters
# since C* 2.2
self.client = getattr(session_row, 'client', None)
log.debug("Attempting to fetch trace events for trace ID: %s", self.trace_id)
time_spent = time.time() - start
event_results = self._execute(
self._SELECT_EVENTS_FORMAT, (self.trace_id,), time_spent, max_wait)
log.debug("Fetched trace events for trace ID: %s", self.trace_id)
self.events = tuple(TraceEvent(r.activity, r.event_id, r.source, r.source_elapsed, r.thread)
for r in event_results)
break
def _execute(self, query, parameters, time_spent, max_wait):
timeout = (max_wait - time_spent) if max_wait is not None else None
future = self._session._create_response_future(query, parameters, trace=False, custom_payload=None, timeout=timeout)
# in case the user switched the row factory, set it to namedtuple for this query
future.row_factory = named_tuple_factory
future.send_request()
try:
return future.result()
except OperationTimedOut:
raise TraceUnavailable("Trace information was not available within %f seconds" % (max_wait,))
def __str__(self):
return "%s [%s] coordinator: %s, started at: %s, duration: %s, parameters: %s" \
% (self.request_type, self.trace_id, self.coordinator, self.started_at,
self.duration, self.parameters)
class TraceEvent(object):
"""
Representation of a single event within a query trace.
"""
description = None
"""
A brief description of the event.
"""
datetime = None
"""
A UTC :class:`datetime.datetime` marking when the event occurred.
"""
source = None
"""
The IP address of the node this event occurred on.
"""
source_elapsed = None
"""
A :class:`datetime.timedelta` measuring the amount of time until
this event occurred starting from when :attr:`.source` first
received the query.
"""
thread_name = None
"""
The name of the thread that this event occurred on.
"""
def __init__(self, description, timeuuid, source, source_elapsed, thread_name):
self.description = description
self.datetime = datetime.utcfromtimestamp(unix_time_from_uuid1(timeuuid))
self.source = source
if source_elapsed is not None:
self.source_elapsed = timedelta(microseconds=source_elapsed)
else:
self.source_elapsed = None
self.thread_name = thread_name
def __str__(self):
return "%s on %s[%s] at %s" % (self.description, self.source, self.thread_name, self.datetime)
| apache-2.0 |
eaplatanios/jelly-bean-world | api/python/src/jbw/permissions.py | 1 | 1899 | # Copyright 2019, The Jelly Bean World Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import, division, print_function
__all__ = ['Permissions', 'GRANT_ALL_PERMISSIONS', 'DENY_ALL_PERMISSIONS']
class Permissions:
"""Permissions that can control/limit the behavior of MPI clients."""
add_agent = False
remove_agent = False
remove_client = False
set_active = False
get_map = False
get_agent_ids = False
get_agent_states = False
manage_semaphores = False
get_semaphores = False
GRANT_ALL_PERMISSIONS = Permissions()
GRANT_ALL_PERMISSIONS.add_agent = True
GRANT_ALL_PERMISSIONS.remove_agent = True
GRANT_ALL_PERMISSIONS.remove_client = True
GRANT_ALL_PERMISSIONS.set_active = True
GRANT_ALL_PERMISSIONS.get_map = True
GRANT_ALL_PERMISSIONS.get_agent_ids = True
GRANT_ALL_PERMISSIONS.get_agent_states = True
GRANT_ALL_PERMISSIONS.manage_semaphores = True
GRANT_ALL_PERMISSIONS.get_semaphores = True
DENY_ALL_PERMISSIONS = Permissions()
DENY_ALL_PERMISSIONS.add_agent = False
DENY_ALL_PERMISSIONS.remove_agent = False
DENY_ALL_PERMISSIONS.remove_client = False
DENY_ALL_PERMISSIONS.set_active = False
DENY_ALL_PERMISSIONS.get_map = False
DENY_ALL_PERMISSIONS.get_agent_ids = False
DENY_ALL_PERMISSIONS.get_agent_states = False
DENY_ALL_PERMISSIONS.manage_semaphores = False
DENY_ALL_PERMISSIONS.get_semaphores = False
| apache-2.0 |
elastic-coders/aiohttp | demos/polls/aiohttpdemo_polls/middlewares.py | 12 | 1370 | import aiohttp_jinja2
from aiohttp import web
async def handle_404(request, response):
response = aiohttp_jinja2.render_template('404.html',
request,
{})
return response
async def handle_500(request, response):
response = aiohttp_jinja2.render_template('500.html',
request,
{})
return response
def error_pages(overrides):
async def middleware(app, handler):
async def middleware_handler(request):
try:
response = await handler(request)
override = overrides.get(response.status)
if override is None:
return response
else:
return await override(request, response)
except web.HTTPException as ex:
override = overrides.get(ex.status)
if override is None:
raise
else:
return await override(request, ex)
return middleware_handler
return middleware
def setup_middlewares(app):
error_middleware = error_pages({404: handle_404,
500: handle_500})
app.middlewares.append(error_middleware)
| apache-2.0 |
zhengyongbo/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py | 124 | 2790 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.port import builders
from webkitpy.tool.commands.rebaseline import AbstractRebaseliningCommand
from webkitpy.tool.servers.gardeningserver import GardeningHTTPServer
class GardenOMatic(AbstractRebaseliningCommand):
name = "garden-o-matic"
help_text = "Command for gardening the WebKit tree."
def __init__(self):
super(GardenOMatic, self).__init__(options=(self.platform_options + [
self.move_overwritten_baselines_option,
self.results_directory_option,
self.no_optimize_option,
]))
def execute(self, options, args, tool):
print "This command runs a local HTTP server that changes your working copy"
print "based on the actions you take in the web-based UI."
args = {}
if options.platform:
# FIXME: This assumes that the port implementation (chromium-, gtk-, etc.) is the first part of options.platform.
args['platform'] = options.platform.split('-')[0]
builder = builders.builder_name_for_port_name(options.platform)
if builder:
args['builder'] = builder
if options.results_directory:
args['useLocalResults'] = "true"
httpd = GardeningHTTPServer(httpd_port=8127, config={'tool': tool, 'options': options})
self._tool.user.open_url(httpd.url(args))
print "Local HTTP server started."
httpd.serve_forever()
| bsd-3-clause |
sphax3d/gedit-plugins | plugins/commander/modules/shell.py | 1 | 5675 | # -*- coding: utf-8 -*-
#
# shell.py - shell commander module
#
# Copyright (C) 2010 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import subprocess
import fcntl
import os
import tempfile
import signal
from gi.repository import GLib, GObject, Gio
import commander.commands as commands
import commander.commands.exceptions
import commander.commands.result
__commander_module__ = True
__root__ = ['!', '!!', '!&']
class Process:
def __init__(self, entry, pipe, replace, background, tmpin, stdout, suspend):
self.pipe = pipe
self.replace = replace
self.tmpin = tmpin
self.entry = entry
self.suspend = suspend
if replace:
self.entry.view().set_editable(False)
if not background:
fcntl.fcntl(stdout, fcntl.F_SETFL, os.O_NONBLOCK)
conditions = GLib.IOCondition.IN | GLib.IOCondition.PRI | GLib.IOCondition.ERR | GLib.IOCondition.HUP
self.watch = GLib.io_add_watch(stdout, conditions, self.collect_output)
self._buffer = ''
else:
stdout.close()
def update(self):
parts = self._buffer.split("\n")
for p in parts[:-1]:
self.entry.info_show(p)
self._buffer = parts[-1]
def collect_output(self, fd, condition):
if condition & (GLib.IOCondition.IN | GLib.IOCondition.PRI):
try:
ret = fd.read()
# This seems to happen on OS X...
if ret == '':
condition = condition | GLib.IOConditiom.HUP
else:
self._buffer += ret
if not self.replace:
self.update()
except:
self.entry.info_show(self._buffer.strip("\n"))
self.stop()
return False
if condition & (GLib.IOCondition.ERR | GLib.IOCondition.HUP):
if self.replace:
buf = self.entry.view().get_buffer()
buf.begin_user_action()
bounds = buf.get_selection_bounds()
if bounds:
buf.delete(bounds[0], bounds[1])
buf.insert_at_cursor(self._buffer)
buf.end_user_action()
else:
self.entry.info_show(self._buffer.strip("\n"))
self.stop()
return False
return True
def stop(self):
if not self.suspend:
return
if hasattr(self.pipe, 'kill'):
self.pipe.kill()
GObject.source_remove(self.watch)
if self.replace:
self.entry.view().set_editable(True)
if self.tmpin:
self.tmpin.close()
sus = self.suspend
self.suspend = None
sus.resume()
def _run_command(entry, replace, background, argstr):
tmpin = None
cwd = None
doc = entry.view().get_buffer()
if not doc.is_untitled() and doc.is_local():
gfile = doc.get_location()
cwd = os.path.dirname(gfile.get_path())
if '<!' in argstr:
bounds = entry.view().get_buffer().get_selection_bounds()
if not bounds:
bounds = entry.view().get_buffer().get_bounds()
inp = bounds[0].get_text(bounds[1])
# Write to temporary file
tmpin = tempfile.NamedTemporaryFile(delete=False)
tmpin.write(inp)
tmpin.flush()
# Replace with temporary file
argstr = argstr.replace('<!', '< "' + tmpin.name + '"')
try:
p = subprocess.Popen(argstr, shell=True, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = p.stdout
except Exception, e:
raise commander.commands.exceptions.Execute('Failed to execute: ' + e)
suspend = None
if not background:
suspend = commander.commands.result.Suspend()
proc = Process(entry, p, replace, background, tmpin, stdout, suspend)
if not background:
yield suspend
# Cancelled or simply done
proc.stop()
yield commander.commands.result.DONE
else:
yield commander.commands.result.HIDE
def __default__(entry, argstr):
"""Run shell command: ! <command>
You can use <b><!</b> as a special input meaning the current selection or current
document."""
return _run_command(entry, False, False, argstr)
def background(entry, argstr):
"""Run shell command in the background: !& <command>
You can use <b><!</b> as a special input meaning the current selection or current
document."""
return _run_command(entry, False, True, argstr)
def replace(entry, argstr):
"""Run shell command and place output in document: !! <command>
You can use <b><!</b> as a special input meaning the current selection or current
document."""
return _run_command(entry, True, False, argstr)
locals()['!'] = __default__
locals()['!!'] = replace
locals()['!&'] = background
# vi:ex:ts=4:et
| gpl-2.0 |
ryuunosukeyoshi/PartnerPoi-Bot | lib/youtube_dl/extractor/urort.py | 64 | 2249 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
unified_strdate,
)
class UrortIE(InfoExtractor):
IE_DESC = 'NRK P3 Urørt'
_VALID_URL = r'https?://(?:www\.)?urort\.p3\.no/#!/Band/(?P<id>[^/]+)$'
_TEST = {
'url': 'https://urort.p3.no/#!/Band/Gerilja',
'md5': '5ed31a924be8a05e47812678a86e127b',
'info_dict': {
'id': '33124-24',
'ext': 'mp3',
'title': 'The Bomb',
'thumbnail': r're:^https?://.+\.jpg',
'uploader': 'Gerilja',
'uploader_id': 'Gerilja',
'upload_date': '20100323',
},
'params': {
'matchtitle': '^The Bomb$', # To test, we want just one video
}
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id)
json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
songs = self._download_json(json_url, playlist_id)
entries = []
for s in songs:
formats = [{
'tbr': f.get('Quality'),
'ext': f['FileType'],
'format_id': '%s-%s' % (f['FileType'], f.get('Quality', '')),
'url': 'http://p3urort.blob.core.windows.net/tracks/%s' % f['FileRef'],
'preference': 3 if f['FileType'] == 'mp3' else 2,
} for f in s['Files']]
self._sort_formats(formats)
e = {
'id': '%d-%s' % (s['BandId'], s['$id']),
'title': s['Title'],
'uploader_id': playlist_id,
'uploader': s.get('BandName', playlist_id),
'thumbnail': 'http://urort.p3.no/cloud/images/%s' % s['Image'],
'upload_date': unified_strdate(s.get('Released')),
'formats': formats,
}
entries.append(e)
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_id,
'entries': entries,
}
| gpl-3.0 |
mvdriel/ansible-modules-core | cloud/linode/linode.py | 142 | 18004 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: linode
short_description: create / delete / stop / restart an instance in Linode Public Cloud
description:
- creates / deletes a Linode Public Cloud instance and optionally waits for it to be 'running'.
version_added: "1.3"
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'active', 'started', 'absent', 'deleted', 'stopped', 'restarted']
default: present
api_key:
description:
- Linode API key
default: null
name:
description:
- Name to give the instance (alphanumeric, dashes, underscore)
- To keep sanity on the Linode Web Console, name is prepended with LinodeID_
default: null
type: string
linode_id:
description:
- Unique ID of a linode server
aliases: lid
default: null
type: integer
plan:
description:
- plan to use for the instance (Linode plan)
default: null
type: integer
payment_term:
description:
- payment term to use for the instance (payment term in months)
default: 1
type: integer
choices: [1, 12, 24]
password:
description:
- root password to apply to a new server (auto generated if missing)
default: null
type: string
ssh_pub_key:
description:
- SSH public key applied to root user
default: null
type: string
swap:
description:
- swap size in MB
default: 512
type: integer
distribution:
description:
- distribution to use for the instance (Linode Distribution)
default: null
type: integer
datacenter:
description:
- datacenter to create an instance in (Linode Datacenter)
default: null
type: integer
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements:
- "python >= 2.6"
- "linode-python"
- "pycurl"
author: "Vincent Viallet (@zbal)"
notes:
- LINODE_API_KEY env variable can be used instead
'''
EXAMPLES = '''
# Create a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
# Ensure a running server (create if missing)
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
# Delete a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: absent
# Stop a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: stopped
# Reboot a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: restarted
'''
import time
import os
try:
import pycurl
HAS_PYCURL = True
except ImportError:
HAS_PYCURL = False
try:
from linode import api as linode_api
HAS_LINODE = True
except ImportError:
HAS_LINODE = False
def randompass():
'''
Generate a long random password that comply to Linode requirements
'''
# Linode API currently requires the following:
# It must contain at least two of these four character classes:
# lower case letters - upper case letters - numbers - punctuation
# we play it safe :)
import random
import string
# as of python 2.4, this reseeds the PRNG from urandom
random.seed()
lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
number = ''.join(random.choice(string.digits) for x in range(6))
punct = ''.join(random.choice(string.punctuation) for x in range(6))
p = lower + upper + number + punct
return ''.join(random.sample(p, len(p)))
def getInstanceDetails(api, server):
'''
Return the details of an instance, populating IPs, etc.
'''
instance = {'id': server['LINODEID'],
'name': server['LABEL'],
'public': [],
'private': []}
# Populate with ips
for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
if ip['ISPUBLIC'] and 'ipv4' not in instance:
instance['ipv4'] = ip['IPADDRESS']
instance['fqdn'] = ip['RDNS_NAME']
if ip['ISPUBLIC']:
instance['public'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
else:
instance['private'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
return instance
def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout):
instances = []
changed = False
new_server = False
servers = []
disks = []
configs = []
jobs = []
# See if we can match an existing server details with the provided linode_id
if linode_id:
# For the moment we only consider linode_id as criteria for match
# Later we can use more (size, name, etc.) and update existing
servers = api.linode_list(LinodeId=linode_id)
# Attempt to fetch details about disks and configs only if servers are
# found with linode_id
if servers:
disks = api.linode_disk_list(LinodeId=linode_id)
configs = api.linode_config_list(LinodeId=linode_id)
# Act on the state
if state in ('active', 'present', 'started'):
# TODO: validate all the plan / distribution / datacenter are valid
# Multi step process/validation:
# - need linode_id (entity)
# - need disk_id for linode_id - create disk from distrib
# - need config_id for linode_id - create config (need kernel)
# Any create step triggers a job that need to be waited for.
if not servers:
for arg in ('name', 'plan', 'distribution', 'datacenter'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Create linode entity
new_server = True
try:
res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
PaymentTerm=payment_term)
linode_id = res['LinodeID']
# Update linode Label to match name
api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name))
# Save server
servers = api.linode_list(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
if not disks:
for arg in ('name', 'linode_id', 'distribution'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Create disks (1 from distrib, 1 for SWAP)
new_server = True
try:
if not password:
# Password is required on creation, if not provided generate one
password = randompass()
if not swap:
swap = 512
# Create data disk
size = servers[0]['TOTALHD'] - swap
if ssh_pub_key:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution,
rootPass=password, rootSSHKey=ssh_pub_key,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
else:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution, rootPass=password,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
jobs.append(res['JobID'])
# Create SWAP disk
res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
Label='%s swap disk (lid: %s)' % (name, linode_id),
Size=swap)
jobs.append(res['JobID'])
except Exception, e:
# TODO: destroy linode ?
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
if not configs:
for arg in ('name', 'linode_id', 'distribution'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Check architecture
for distrib in api.avail_distributions():
if distrib['DISTRIBUTIONID'] != distribution:
continue
arch = '32'
if distrib['IS64BIT']:
arch = '64'
break
# Get latest kernel matching arch
for kernel in api.avail_kernels():
if not kernel['LABEL'].startswith('Latest %s' % arch):
continue
kernel_id = kernel['KERNELID']
break
# Get disk list
disks_id = []
for disk in api.linode_disk_list(LinodeId=linode_id):
if disk['TYPE'] == 'ext3':
disks_id.insert(0, str(disk['DISKID']))
continue
disks_id.append(str(disk['DISKID']))
# Trick to get the 9 items in the list
while len(disks_id) < 9:
disks_id.append('')
disks_list = ','.join(disks_id)
# Create config
new_server = True
try:
api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
Disklist=disks_list, Label='%s config' % name)
configs = api.linode_config_list(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
# Start / Ensure servers are running
for server in servers:
# Refresh server state
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# Ensure existing servers are up and running, boot if necessary
if server['STATUS'] != 1:
res = api.linode_boot(LinodeId=linode_id)
jobs.append(res['JobID'])
changed = True
# wait here until the instances are up
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
# refresh the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# status:
# -2: Boot failed
# 1: Running
if server['STATUS'] in (-2, 1):
break
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' %
(server['LABEL'], server['LINODEID']))
# Get a fresh copy of the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
if server['STATUS'] == -2:
module.fail_json(msg = '%s (lid: %s) failed to boot' %
(server['LABEL'], server['LINODEID']))
# From now on we know the task is a success
# Build instance report
instance = getInstanceDetails(api, server)
# depending on wait flag select the status
if wait:
instance['status'] = 'Running'
else:
instance['status'] = 'Starting'
# Return the root password if this is a new box and no SSH key
# has been provided
if new_server and not ssh_pub_key:
instance['password'] = password
instances.append(instance)
elif state in ('stopped'):
for arg in ('name', 'linode_id'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
if not servers:
module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
if server['STATUS'] != 2:
try:
res = api.linode_shutdown(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Stopping'
changed = True
else:
instance['status'] = 'Stopped'
instances.append(instance)
elif state in ('restarted'):
for arg in ('name', 'linode_id'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
if not servers:
module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
try:
res = api.linode_reboot(LinodeId=server['LINODEID'])
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Restarting'
changed = True
instances.append(instance)
elif state in ('absent', 'deleted'):
for server in servers:
instance = getInstanceDetails(api, server)
try:
api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Deleting'
changed = True
instances.append(instance)
# Ease parsing if only 1 instance
if len(instances) == 1:
module.exit_json(changed=changed, instance=instances[0])
module.exit_json(changed=changed, instances=instances)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['active', 'present', 'started',
'deleted', 'absent', 'stopped',
'restarted']),
api_key = dict(),
name = dict(type='str'),
plan = dict(type='int'),
distribution = dict(type='int'),
datacenter = dict(type='int'),
linode_id = dict(type='int', aliases=['lid']),
payment_term = dict(type='int', default=1, choices=[1, 12, 24]),
password = dict(type='str'),
ssh_pub_key = dict(type='str'),
swap = dict(type='int', default=512),
wait = dict(type='bool', default=True),
wait_timeout = dict(default=300),
)
)
if not HAS_PYCURL:
module.fail_json(msg='pycurl required for this module')
if not HAS_LINODE:
module.fail_json(msg='linode-python required for this module')
state = module.params.get('state')
api_key = module.params.get('api_key')
name = module.params.get('name')
plan = module.params.get('plan')
distribution = module.params.get('distribution')
datacenter = module.params.get('datacenter')
linode_id = module.params.get('linode_id')
payment_term = module.params.get('payment_term')
password = module.params.get('password')
ssh_pub_key = module.params.get('ssh_pub_key')
swap = module.params.get('swap')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
# Setup the api_key
if not api_key:
try:
api_key = os.environ['LINODE_API_KEY']
except KeyError, e:
module.fail_json(msg = 'Unable to load %s' % e.message)
# setup the auth
try:
api = linode_api.Api(api_key)
api.test_echo()
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
tonk/ansible | test/ansible_test/validate-modules-unit/test_validate_modules_regex.py | 68 | 1796 | """Tests for validate-modules regexes."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from validate_modules.main import TYPE_REGEX
@pytest.mark.parametrize('cstring,cexpected', [
['if type(foo) is Bar', True],
['if Bar is type(foo)', True],
['if type(foo) is not Bar', True],
['if Bar is not type(foo)', True],
['if type(foo) == Bar', True],
['if Bar == type(foo)', True],
['if type(foo)==Bar', True],
['if Bar==type(foo)', True],
['if type(foo) != Bar', True],
['if Bar != type(foo)', True],
['if type(foo)!=Bar', True],
['if Bar!=type(foo)', True],
['if foo or type(bar) != Bar', True],
['x = type(foo)', False],
["error = err.message + ' ' + str(err) + ' - ' + str(type(err))", False],
# cloud/amazon/ec2_group.py
["module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))", False],
# files/patch.py
["p = type('Params', (), module.params)", False], # files/patch.py
# system/osx_defaults.py
["if self.current_value is not None and not isinstance(self.current_value, type(self.value)):", True],
# system/osx_defaults.py
['raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)', False],
# network/nxos/nxos_interface.py
["if get_interface_type(interface) == 'svi':", False],
])
def test_type_regex(cstring, cexpected): # type: (str, str) -> None
"""Check TYPE_REGEX against various examples to verify it correctly matches or does not match."""
match = TYPE_REGEX.match(cstring)
if cexpected and not match:
assert False, "%s should have matched" % cstring
elif not cexpected and match:
assert False, "%s should not have matched" % cstring
| gpl-3.0 |
Eficent/hr | hr_holidays_compute_days/models/hr_holidays.py | 1 | 4855 | # -*- coding: utf-8 -*-
# © 2015 iDT LABS (http://www.@idtlabs.sl)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, _
from openerp.exceptions import ValidationError
from dateutil.relativedelta import relativedelta
import math
class HrHolidays(models.Model):
_inherit = 'hr.holidays'
@api.model
def _check_date_helper(self, employee_id, date):
status_id = self.holiday_status_id.id or self.env.context.get(
'holiday_status_id',
False)
if employee_id and status_id:
employee = self.env['hr.employee'].browse(employee_id)
status = self.env['hr.holidays.status'].browse(status_id)
if (not employee.work_scheduled_on_day(
fields.Date.from_string(date),
public_holiday=status.exclude_public_holidays,
schedule=status.exclude_rest_days)):
return False
return True
@api.multi
def onchange_employee(self, employee_id):
res = super(HrHolidays, self).onchange_employee(employee_id)
date_from = self.date_from or self.env.context.get('date_from')
date_to = self.date_to or self.env.context.get('date_to')
if (date_to and date_from) and (date_from <= date_to):
if not self._check_date_helper(employee_id, date_from):
raise ValidationError(_("You cannot schedule the start date "
"on a public holiday or employee's "
"rest day"))
if not self._check_date_helper(employee_id, date_to):
raise ValidationError(_("You cannot schedule the end date "
"on a public holiday or employee's "
"rest day"))
duration = self._compute_number_of_days(employee_id,
date_to,
date_from)
res['value']['number_of_days_temp'] = duration
return res
@api.multi
def onchange_date_from(self, date_to, date_from):
res = super(HrHolidays, self).onchange_date_from(date_to, date_from)
employee_id = self.employee_id.id or self.env.context.get(
'employee_id',
False)
if not self._check_date_helper(employee_id, date_from):
raise ValidationError(_("You cannot schedule the start date on "
"a public holiday or employee's rest day"))
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._compute_number_of_days(employee_id,
date_to,
date_from)
res['value']['number_of_days_temp'] = diff_day
return res
@api.multi
def onchange_date_to(self, date_to, date_from):
res = super(HrHolidays, self).onchange_date_to(date_to, date_from)
employee_id = self.employee_id.id or self.env.context.get(
'employee_id',
False)
if not self._check_date_helper(employee_id, date_to):
raise ValidationError(_("You cannot schedule the end date on "
"a public holiday or employee's rest day"))
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._compute_number_of_days(employee_id,
date_to,
date_from)
res['value']['number_of_days_temp'] = diff_day
return res
def _compute_number_of_days(self, employee_id, date_to, date_from):
days = self._get_number_of_days(date_from, date_to)
if days or date_to == date_from:
days = round(math.floor(days))+1
status_id = self.holiday_status_id.id or self.env.context.get(
'holiday_status_id',
False)
if employee_id and date_from and date_to and status_id:
employee = self.env['hr.employee'].browse(employee_id)
status = self.env['hr.holidays.status'].browse(status_id)
date_from = fields.Date.from_string(date_from)
date_to = fields.Date.from_string(date_to)
date_dt = date_from
while date_dt <= date_to:
# if public holiday or rest day let us skip
if not employee.work_scheduled_on_day(
date_dt,
status.exclude_public_holidays,
status.exclude_rest_days
):
days -= 1
date_dt += relativedelta(days=1)
return days
| agpl-3.0 |
RockySteveJobs/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/codec.py | 361 | 2771 | """Classes for reading/writing binary data (such as TLS records)."""
from compat import *
class Writer:
def __init__(self, length=0):
#If length is zero, then this is just a "trial run" to determine length
self.index = 0
self.bytes = createByteArrayZeros(length)
def add(self, x, length):
if self.bytes:
newIndex = self.index+length-1
while newIndex >= self.index:
self.bytes[newIndex] = x & 0xFF
x >>= 8
newIndex -= 1
self.index += length
def addFixSeq(self, seq, length):
if self.bytes:
for e in seq:
self.add(e, length)
else:
self.index += len(seq)*length
def addVarSeq(self, seq, length, lengthLength):
if self.bytes:
self.add(len(seq)*length, lengthLength)
for e in seq:
self.add(e, length)
else:
self.index += lengthLength + (len(seq)*length)
class Parser:
def __init__(self, bytes):
self.bytes = bytes
self.index = 0
def get(self, length):
if self.index + length > len(self.bytes):
raise SyntaxError()
x = 0
for count in range(length):
x <<= 8
x |= self.bytes[self.index]
self.index += 1
return x
def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes
return bytes
def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList):
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength)
if lengthList % length != 0:
raise SyntaxError()
lengthList = int(lengthList/length)
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index
def setLengthCheck(self, length):
self.lengthCheck = length
self.indexCheck = self.index
def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck:
raise SyntaxError()
def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck:
return False
elif (self.index - self.indexCheck) == self.lengthCheck:
return True
else:
raise SyntaxError() | apache-2.0 |
punalpatel/st2 | st2exporter/st2exporter/worker.py | 10 | 5056 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import Queue
import eventlet
from kombu import Connection
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.action import (LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED,
LIVEACTION_STATUS_CANCELED)
from st2common.models.api.execution import ActionExecutionAPI
from st2common.models.db.execution import ActionExecutionDB
from st2common.persistence.execution import ActionExecution
from st2common.persistence.marker import DumperMarker
from st2common.transport import consumers, execution, publishers
from st2common.transport import utils as transport_utils
from st2common.util import isotime
from st2exporter.exporter.dumper import Dumper
__all__ = [
'ExecutionsExporter'
]
COMPLETION_STATUSES = [LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED,
LIVEACTION_STATUS_CANCELED]
LOG = logging.getLogger(__name__)
EXPORTER_WORK_Q = execution.get_queue(
'st2.exporter.work', routing_key=publishers.UPDATE_RK)
class ExecutionsExporter(consumers.MessageHandler):
message_type = ActionExecutionDB
def __init__(self, connection, queues):
super(ExecutionsExporter, self).__init__(connection, queues)
self.pending_executions = Queue.Queue()
self._dumper = Dumper(queue=self.pending_executions,
export_dir=cfg.CONF.exporter.dump_dir)
self._consumer_thread = None
def start(self, wait=False):
LOG.info('Bootstrapping executions from db...')
try:
self._bootstrap()
except:
LOG.exception('Unable to bootstrap executions from db. Aborting.')
raise
self._consumer_thread = eventlet.spawn(super(ExecutionsExporter, self).start, wait=True)
self._dumper.start()
if wait:
self.wait()
def wait(self):
self._consumer_thread.wait()
self._dumper.wait()
def shutdown(self):
self._dumper.stop()
super(ExecutionsExporter, self).shutdown()
def process(self, execution):
LOG.debug('Got execution from queue: %s', execution)
if execution.status not in COMPLETION_STATUSES:
return
execution_api = ActionExecutionAPI.from_model(execution, mask_secrets=True)
self.pending_executions.put_nowait(execution_api)
LOG.debug("Added execution to queue.")
def _bootstrap(self):
marker = self._get_export_marker_from_db()
LOG.info('Using marker %s...' % marker)
missed_executions = self._get_missed_executions_from_db(export_marker=marker)
LOG.info('Found %d executions not exported yet...', len(missed_executions))
for missed_execution in missed_executions:
if missed_execution.status not in COMPLETION_STATUSES:
continue
execution_api = ActionExecutionAPI.from_model(missed_execution, mask_secrets=True)
try:
LOG.debug('Missed execution %s', execution_api)
self.pending_executions.put_nowait(execution_api)
except:
LOG.exception('Failed adding execution to in-memory queue.')
continue
LOG.info('Bootstrapped executions...')
def _get_export_marker_from_db(self):
try:
markers = DumperMarker.get_all()
except:
return None
else:
if len(markers) >= 1:
marker = markers[0]
return isotime.parse(marker.marker)
else:
return None
def _get_missed_executions_from_db(self, export_marker=None):
if not export_marker:
return self._get_all_executions_from_db()
# XXX: Should adapt this query to get only executions with status
# in COMPLETION_STATUSES.
filters = {'end_timestamp__gt': export_marker}
LOG.info('Querying for executions with filters: %s', filters)
return ActionExecution.query(**filters)
def _get_all_executions_from_db(self):
return ActionExecution.get_all() # XXX: Paginated call.
def get_worker():
with Connection(transport_utils.get_messaging_urls()) as conn:
return ExecutionsExporter(conn, [EXPORTER_WORK_Q])
| apache-2.0 |
joanma100/Sigil | src/Resource_Files/python_pkg/linux_python_gather.py | 3 | 5657 | #!/usr/bin/env python3
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import sys, os, inspect, shutil, platform, textwrap, py_compile, site
from python_paths import py_ver, py_lib, py_exe, py_inc, py_dest, tmp_prefix
# Python standard modules location
srcdir = os.path.dirname(inspect.getfile(os))
# Where we're going to copy stuff
py_dir = os.path.join(py_dest, 'lib', os.path.basename(srcdir))
print ('py_dir', py_dir)
app_dir = os.path.dirname(py_dest)
print ('app_dir', app_dir)
pyhome_dir = os.path.join(app_dir.replace(tmp_prefix, ''), os.path.basename(py_dest))
print ('pyhome_dir', pyhome_dir)
site_dest = os.path.join(py_dir, 'site-packages')
# Cherry-picked additional and/or modified modules
site_packages = [ ('lxml', 'd'),
('six.py', 'f'),
('bs4', 'd'),
('html5lib','d'),
('PIL', 'd'),
('regex.py','f'),
('_regex.cpython-34m.so','f'),
('_regex_core.py','f'),
('test_regex.py', 'f')]
def copy_site_packages(packages, dest):
#if not os.path.exists(dest):
# os.mkdir(dest)
for pkg, typ in packages:
found = False
for path in site.getsitepackages():
if not found:
for entry in os.listdir(path):
if entry == pkg:
if typ == 'd' and os.path.isdir(os.path.join(path, entry)):
shutil.copytree(os.path.join(path, entry), os.path.join(site_dest, entry), ignore=ignore_in_dirs)
found = True
break
else:
if os.path.isfile(os.path.join(path, entry)):
shutil.copy2(os.path.join(path, entry), os.path.join(site_dest, entry))
found = True
break
else:
break
def ignore_in_dirs(base, items, ignored_dirs=None):
ans = []
if ignored_dirs is None:
ignored_dirs = {'.svn', '.bzr', '.git', 'test', 'tests', 'testing', '__pycache__'}
for name in items:
path = os.path.join(base, name)
if os.path.isdir(path):
if name in ignored_dirs or not os.path.exists(os.path.join(path, '__init__.py')):
ans.append(name)
else:
if name.rpartition('.')[-1] not in ('so', 'py'):
ans.append(name)
return ans
def copy_pylib():
shutil.copy2(py_lib, app_dir)
shutil.copy2(py_exe, os.path.join(py_dest, 'bin', "sigil-python3"))
def copy_python():
if not os.path.exists(py_dir):
os.mkdir(py_dir)
for x in os.listdir(srcdir):
y = os.path.join(srcdir, x)
ext = os.path.splitext(x)[1]
if os.path.isdir(y) and x not in ('test', 'hotshot', 'distutils',
'site-packages', 'idlelib', 'lib2to3', 'dist-packages', '__pycache__'):
shutil.copytree(y, os.path.join(py_dir, x),
ignore=ignore_in_dirs)
if os.path.isfile(y) and ext in ('.py', '.so'):
shutil.copy2(y, py_dir)
#site_dest = os.path.join(py_dir, 'site-packages')
copy_site_packages(site_packages, site_dest)
create_site_py()
create_pyvenv()
for x in os.walk(py_dir):
for f in x[-1]:
if f.endswith('.py'):
y = os.path.join(x[0], f)
rel = os.path.relpath(y, py_dir)
try:
py_compile.compile(y, cfile=y+'o',dfile=rel, doraise=True, optimize=2)
os.remove(y)
z = y+'c'
if os.path.exists(z):
os.remove(z)
except:
print ('Failed to byte-compile', y)
def create_site_py():
with open(os.path.join(py_dir, 'site.py'), 'wb') as f:
f.write(bytes(textwrap.dedent('''\
import sys
import builtins
import os
import _sitebuiltins
def set_helper():
builtins.help = _sitebuiltins._Helper()
def fix_sys_path():
if os.sep == '/':
sys.path.append(os.path.join(sys.prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
else:
for path in sys.path:
py_ver = "".join(map(str, sys.version_info[:2]))
if os.path.basename(path) == "python" + py_ver + ".zip":
sys.path.remove(path)
sys.path.append(os.path.join(sys.prefix, "lib", "site-packages"))
def main():
try:
fix_sys_path()
set_helper()
except SystemExit as err:
if err.code is None:
return 0
if isinstance(err.code, int):
return err.code
print (err.code)
return 1
except:
import traceback
traceback.print_exc()
return 1
if not sys.flags.no_site:
main()
'''), 'UTF-8'))
def create_pyvenv():
with open(os.path.join(py_dest, 'pyvenv.cfg'), 'wb') as f:
f.write(bytes(textwrap.dedent('''\
home = %s
include-system-site-packages = false
version = 3.4.0
''') % pyhome_dir, 'UTF-8'))
if __name__ == '__main__':
copy_pylib()
copy_python()
| gpl-3.0 |
disnesquick/ripley | py/interface.py | 1 | 3569 | from unstuck import *
class ExposedCall:
def __init__(self, func):
self.call = func
def __call__(self, *args, **kwargs):
raise(NotImplementedError)
class ExposedObject:
exposedMethods = {}
def __call__(self, *args, **kwargs):
raise(NotImplementedError)
class BoundMethod:
def __init__(self, instance, proxy):
self.instance = instance
self.proxy = proxy
def __call__(self, *args):
return await(self.proxy.handleCall(self.instance, args))
def async(self, *args):
return async(self.proxy.handleCall(self.instance, args))
def coro(self, *args):
return self.proxy.handleCall(self.instance, args)
class BoundCall:
def __init__(self, route, proxy):
self.route = route
self.proxy = proxy
def __call__(self, *args):
return await(self.proxy.handleCall(self.route, args))
def async(self, *args):
return async(self.proxy.handleCall(self.route, args))
def coro(self, *args):
return self.proxy.handleCall(self.route, args)
class MethodProxy:
def __init__(self, transverseID):
self.transverseID = transverseID
def __get__(self, instance, owner):
return BoundMethod(instance, self)
class CallProxy:
def __init__(self, transverseID):
self.transverseID = transverseID
def __get__(self, instance, owner):
return BoundCall(instance.destination, self)
class NotificationProxy(CallProxy):
@asynchronous
def handleCall(self, route, args):
connection = route.connection
objectID = yield from connection.transceiveResolve(
route, self.transverseID)
outStream = connection.transmitNotify(route, objectID)
self.serializeArguments(connection, args, outStream)
outStream.commit()
class MethodNotificationProxy(MethodProxy):
@asynchronous
def handleCall(self, instance, args):
route = instance.destination
connection = route.connection
objectID = yield from connection.transceiveResolve(
route, self.transverseID)
outStream = connection.transmitNotify(route, objectID)
self.serializeArguments(connection, instance, args, outStream)
outStream.commit()
class EvaluationProxy(CallProxy):
@asynchronous
def handleCall(self, route, args):
connection = route.connection
# Resolve the TransverseID to a CallID
objectID = yield from connection.transceiveResolve(
route, self.transverseID)
# Transmit the remote call
outStream, responseFuture = connection.transceiveEval(route, objectID)
self.serializeArguments(connection, args, outStream)
outStream.commit()
# Wait for the reply and deserialize the return or throw an exception
# if this failed.
inStream = yield from responseFuture
return self.deserializeReturn(connection, inStream)
class MethodEvaluationProxy(MethodProxy):
@asynchronous
def handleCall(self, instance, args):
""" Sends the argument-bound call to a specific gateway for execution
on the remote end.
"""
route = instance.destination
connection = route.connection
# Resolve the TransverseID to a CallID
objectID = yield from connection.transceiveResolve(
route, self.transverseID)
# Transmit the remote call
outStream, responseFuture = connection.transceiveEval(route, objectID)
self.serializeArguments(connection, instance, args, outStream)
outStream.commit()
# Wait for the reply and deserialize the return or throw an exception
# if this failed.
inStream = yield from responseFuture
return self.deserializeReturn(connection, inStream)
| gpl-2.0 |
lightslife/aliyun-cli | aliyuncli/aliyunCliParser.py | 9 | 5683 | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import sys
import paramOptimize
class aliyunCliParser():
def __init__(self):
self.args = sys.argv[1:]
pass
# this function find cli cmd
def getCliCmd(self):
if self.args.__len__() >= 1:
return self.args[0].lower()
# this function find cli operation
def getCliOperation(self):
if self.args.__len__() >= 2:
return self.args[1]
def _getCommand(self):
if self.args.__len__() >=1:
return self.args[0]
def _getOperations(self):
operations = []
i =1
_len = self.args.__len__()
if _len >=2:
while i < _len:
if self.args[i].strip().find('--'):
operations.append(self.args[i])
else:
break
i =i+1
if len(operations):
return operations
else :
return None
def _getKeyValues(self):
keyValues = dict()
len = self.args.__len__()
if len >= 2:
current=1
while current <len:
#values = list()
if self.args[current].strip().startswith('--'):
key=self.args[current].strip()
start=current + 1
values=list()
while start <len and not self.args[start].strip().startswith('--'):
values.append(self.args[start].strip())
start=start+1
keyValues[key] = values
current=start
else:
current=current+1
paramOptimize._paramOptimize(keyValues)
return keyValues
# this function find cli key:values , notice here is values , we need consider multiple values case
# --args is key, and if no -- is value
def getCliKeyValues(self):
keyValues = dict()
len = self.args.__len__()
if len >= 3:
left_index = 2
if self.args[1].find("--") >= 0:
left_index = 1
for index in range(left_index, len):
currentValue = self.args[index]
if currentValue.find('--') >= 0 : # this is command
index = index+1 # check next args
values = list()
while index < len and self.args[index].find('--') < 0:
values.append(self.args[index])
index = index + 1
keyValues[currentValue] = values
return keyValues
# this function will find the temp key and secret if user input the --key and --value
def getTempKeyAndSecret(self):
keyValues = dict()
len = self.args.__len__()
keystr = "--AccessKeyId"
secretstr = "--AccessKeySecret"
_key = None
_secret = None
if len >= 3:
for index in range(2, len):
currentValue = self.args[index]
if currentValue.find('--') >= 0 : # this is command
index = index+1 # check next args
values = list()
while index < len and self.args[index].find('--') < 0:
values.append(self.args[index])
index = index + 1
keyValues[currentValue] = values
if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0:
_key = keyValues[keystr][0]
if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
_secret = keyValues[secretstr][0]
#print "accesskeyid: ", _key , "accesskeysecret: ",_secret
return _key, _secret
# this function will give all extension command defined by us
def getAllExtensionCommands(self):
cmds = list()
cmds = ['help', '-h', '--help', ]
return cmds
# this function will filter all key and values which is in openApi
def getOpenApiKeyValues(self, map):
keys = map.keys()
newMap = dict()
for key in keys:
value = map.get(key)
key = key.replace('--', '')
newMap[key] = value
return newMap
def _getOpenApiKeyValues(self, map):
keys = map.keys()
newMap = dict()
for key in keys:
value = map.get(key)
key = key.replace('--', '')
newMap[key] = value
return newMap
# this function will filter all key and values which is in extension command
# this function will filter all key and values which is in extension command
def getExtensionKeyValues(self, map):
pass
# this function will return output format from key values
def getOutPutFormat(self, map):
keys = map.keys()
for key in keys:
if key == '--output' :
return map.get(key)
return None
| apache-2.0 |
Marqin/godot | misc/scripts/makeargs.py | 22 | 2047 |
text = """
#define FUNC$numR(m_r,m_func,$argt)\\
virtual m_r m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
m_r ret;\\
command_queue.push_and_ret( visual_server, &VisualServer::m_func,$argp,&ret);\\
return ret;\\
} else {\\
return visual_server->m_func($argp);\\
}\\
}
#define FUNC$numRC(m_r,m_func,$argt)\\
virtual m_r m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
m_r ret;\\
command_queue.push_and_ret( visual_server, &VisualServer::m_func,$argp,&ret);\\
return ret;\\
} else {\\
return visual_server->m_func($argp);\\
}\\
}
#define FUNC$numS(m_func,$argt)\\
virtual void m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push_and_sync( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$numSC(m_func,$argt)\\
virtual void m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push_and_sync( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$num(m_func,$argt)\\
virtual void m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$numC(m_func,$argt)\\
virtual void m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
"""
for i in range(1, 8):
tp = ""
p = ""
t = ""
for j in range(i):
if (j > 0):
tp += ", "
p += ", "
t += ", "
tp += ("m_arg" + str(j + 1) + " p" + str(j + 1))
p += ("p" + str(j + 1))
t += ("m_arg" + str(j + 1))
t = text.replace("$argtp", tp).replace("$argp", p).replace("$argt", t).replace("$num", str(i))
print(t)
| mit |
aweinstock314/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/tokenizertotree.py | 483 | 1965 | from __future__ import absolute_import, division, unicode_literals
import sys
import os
import json
import re
import html5lib
from . import support
from . import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\|\s*)<html ([^>]+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist" % out_path)
sys.exit(1)
for filename in support.get_data_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(open(filename, "r"))
except ValueError:
sys.stderr.write("Failed to load %s\n" % filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat" % name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
# don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
tree = p.parse(test_data["input"])
output = p.tree.testSerializer(tree)
output = "\n".join(("| " + line[3:]) if line.startswith("| ") else line
for line in output.split("\n"))
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
| mpl-2.0 |
CTSRD-SOAAP/chromium-42.0.2311.135 | tools/telemetry/telemetry/web_components/web_components_project.py | 45 | 1359 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from trace_viewer import trace_viewer_project
def _FindAllFilesRecursive(source_paths, pred):
all_filenames = set()
for source_path in source_paths:
for dirpath, _, filenames in os.walk(source_path):
for f in filenames:
if f.startswith('.'):
continue
x = os.path.abspath(os.path.join(dirpath, f))
if pred(x):
all_filenames.add(x)
return all_filenames
class WebComponentsProject(trace_viewer_project.TraceViewerProject):
telemetry_path = os.path.abspath(util.GetTelemetryDir())
def __init__(self, *args, **kwargs):
super(WebComponentsProject, self).__init__(*args, **kwargs)
exclude_paths = [os.path.join(self.telemetry_path, 'docs'),
os.path.join(self.telemetry_path, 'unittest_data'),
os.path.join(self.telemetry_path, 'support')]
excluded_html_files = _FindAllFilesRecursive(
exclude_paths,
lambda x: x.endswith('.html'))
self.non_module_html_files.extend(excluded_html_files)
self.non_module_html_files.appendRel(self.telemetry_path, 'results.html')
self.source_paths.append(self.telemetry_path)
| bsd-3-clause |
alertby/mbed | workspace_tools/host_tests/host_tests_plugins/module_copy_shell.py | 11 | 2473 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from os.path import join, basename
from host_test_plugins import HostTestPluginBase
class HostTestPluginCopyMethod_Shell(HostTestPluginBase):
# Plugin interface
name = 'HostTestPluginCopyMethod_Shell'
type = 'CopyMethod'
stable = True
capabilities = ['shell', 'cp', 'copy', 'xcopy']
required_parameters = ['image_path', 'destination_disk']
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capabilitity, *args, **kwargs):
""" Executes capability by name.
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capabilitity, *args, **kwargs) is True:
image_path = kwargs['image_path']
destination_disk = kwargs['destination_disk']
# Wait for mount point to be ready
self.check_mount_point_ready(destination_disk) # Blocking
# Prepare correct command line parameter values
image_base_name = basename(image_path)
destination_path = join(destination_disk, image_base_name)
if capabilitity == 'shell':
if os.name == 'nt': capabilitity = 'copy'
elif os.name == 'posix': capabilitity = 'cp'
if capabilitity == 'cp' or capabilitity == 'copy' or capabilitity == 'xcopy':
copy_method = capabilitity
cmd = [copy_method, image_path, destination_path]
shell = not capabilitity == 'cp'
result = self.run_command(cmd, shell=shell)
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Shell()
| apache-2.0 |
Nextdoor/buckle | buckle/message.py | 1 | 2006 | from __future__ import print_function
import os
import sys
GREEN = '\033[32m'
YELLOW = '\033[33m'
RED = '\033[31m'
EXIT = '\033[0m'
INFO = 'info'
WARNING = 'warning'
ERROR = 'error'
LEVEL_COLOR_MAP = {
INFO: GREEN,
WARNING: YELLOW,
ERROR: RED
}
class Sender(object):
def __init__(self, prefix=None):
self._prefix = prefix
def format(self, msg, level):
""" Escapes a message with a color assigned based on its level. Informational messages
are green, warnings are yellow, and errors are red. The given prefix is prepended to the
message for namespace identification. If $TERM is not set, no color escape sequences are
added.
Args:
msg: Given message
level: stderr 'level' priority. Must be INFO, WARNING, or ERROR.
prefix: Given namespace of the project calling this library.
"""
msg = level.upper() + ': ' + self._prefix + ' ' + msg
if os.getenv('TERM'):
msg = LEVEL_COLOR_MAP[level] + msg + EXIT
return msg
def write(self, msg, level):
""" Prints a message to stderr with a color assigned based on its level. Informational
messages are green, warnings are yellow, and errors are red. The given prefix is prepended
to the message for namespace identification. If $TERM is not set, no color escape sequences
are added.
Args:
msg: Given message
level: stderr 'level' priority. Must be INFO, WARNING, or ERROR.
prefix: Given namespace of the project calling this library.
"""
print(self.format(msg, level), file=sys.stderr)
def info(self, msg, **kwargs):
self.write(msg, INFO, **kwargs)
def warning(self, msg, **kwargs):
self.write(msg, WARNING, **kwargs)
def error(self, msg, **kwargs):
self.write(msg, ERROR, **kwargs)
def format_error(self, msg, **kwargs):
return self.format(msg, ERROR, **kwargs)
| bsd-2-clause |
dmmcquay/kubernetes | cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py | 365 | 1084 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = '{} --version'.format(bin_name).split()
version_string = subprocess.check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
| apache-2.0 |
ar7z1/ansible | lib/ansible/module_utils/facts/other/ohai.py | 232 | 2307 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class OhaiFactCollector(BaseFactCollector):
'''This is a subclass of Facts for including information gathered from Ohai.'''
name = 'ohai'
_fact_ids = set()
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='ohai',
prefix='ohai_')
super(OhaiFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_ohai(self, module):
ohai_path = module.get_bin_path('ohai')
return ohai_path
def run_ohai(self, module, ohai_path,):
rc, out, err = module.run_command(ohai_path)
return rc, out, err
def get_ohai_output(self, module):
ohai_path = self.find_ohai(module)
if not ohai_path:
return None
rc, out, err = self.run_ohai(module, ohai_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
ohai_facts = {}
if not module:
return ohai_facts
ohai_output = self.get_ohai_output(module)
if ohai_output is None:
return ohai_facts
try:
ohai_facts = json.loads(ohai_output)
except Exception:
# FIXME: useful error, logging, something...
pass
return ohai_facts
| gpl-3.0 |
utkbansal/kuma | vendor/packages/translate/convert/test_po2prop.py | 26 | 13240 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.convert import po2prop, test_convert
from translate.misc import wStringIO
from translate.storage import po
class TestPO2Prop:
def po2prop(self, posource):
"""helper that converts po source to .properties source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
convertor = po2prop.po2prop()
outputprop = convertor.convertstore(inputpo)
return outputprop
def merge2prop(self, propsource, posource, personality="java", remove_untranslated=False):
"""helper that merges po translations to .properties source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
templatefile = wStringIO.StringIO(propsource)
#templateprop = properties.propfile(templatefile)
convertor = po2prop.reprop(templatefile, inputpo, personality=personality, remove_untranslated=remove_untranslated)
outputprop = convertor.convertstore()
print(outputprop)
return outputprop
def test_merging_simple(self):
"""check the simplest case of merging a translation"""
posource = '''#: prop\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop=value\n'''
propexpected = '''prop=waarde\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_merging_untranslated(self):
"""check the simplest case of merging an untranslated unit"""
posource = '''#: prop\nmsgid "value"\nmsgstr ""\n'''
proptemplate = '''prop=value\n'''
propexpected = proptemplate
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_hard_newlines_preserved(self):
"""check that we preserver hard coded newlines at the start and end of sentence"""
posource = '''#: prop\nmsgid "\\nvalue\\n\\n"\nmsgstr "\\nwaarde\\n\\n"\n'''
proptemplate = '''prop=\\nvalue\\n\\n\n'''
propexpected = '''prop=\\nwaarde\\n\\n\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_space_preservation(self):
"""check that we preserve any spacing in properties files when merging"""
posource = '''#: prop\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop = value\n'''
propexpected = '''prop = waarde\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_merging_blank_entries(self):
"""check that we can correctly merge entries that are blank in the template"""
posource = r'''#: accesskey-accept
msgid ""
"_: accesskey-accept\n"
""
msgstr ""'''
proptemplate = 'accesskey-accept=\n'
propexpected = 'accesskey-accept=\n'
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_merging_fuzzy(self):
"""check merging a fuzzy translation"""
posource = '''#: prop\n#, fuzzy\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop=value\n'''
propexpected = '''prop=value\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_mozilla_accesskeys(self):
"""check merging Mozilla accesskeys"""
posource = '''#: prop.label prop.accesskey
msgid "&Value"
msgstr "&Waarde"
#: key.label key.accesskey
msgid "&Key"
msgstr "&Sleutel"
'''
proptemplate = '''prop.label=Value
prop.accesskey=V
key.label=Key
key.accesskey=K
'''
propexpected = '''prop.label=Waarde
prop.accesskey=W
key.label=Sleutel
key.accesskey=S
'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print(propfile)
assert propfile == propexpected
def test_mozilla_accesskeys_missing_accesskey(self):
"""check merging Mozilla accesskeys"""
posource = '''#: prop.label prop.accesskey
# No accesskey because we forgot or language doesn't do accesskeys
msgid "&Value"
msgstr "Waarde"
'''
proptemplate = '''prop.label=Value
prop.accesskey=V
'''
propexpected = '''prop.label=Waarde
prop.accesskey=V
'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print(propfile)
assert propfile == propexpected
def test_mozilla_margin_whitespace(self):
"""Check handling of Mozilla leading and trailing spaces"""
posource = '''#: sepAnd
msgid " and "
msgstr " و "
#: sepComma
msgid ", "
msgstr "، "
'''
proptemplate = r'''sepAnd = \u0020and\u0020
sepComma = ,\u20
'''
propexpected = r'''sepAnd = \u0020و\u0020
sepComma = ،\u0020
'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print(propfile)
assert propfile == propexpected
def test_mozilla_all_whitespace(self):
"""Check for all white-space Mozilla hack, remove when the
corresponding code is removed."""
posource = '''#: accesskey-accept
msgctxt "accesskey-accept"
msgid ""
msgstr " "
#: accesskey-help
msgid "H"
msgstr "م"
'''
proptemplate = '''accesskey-accept=
accesskey-help=H
'''
propexpected = '''accesskey-accept=
accesskey-help=م
'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print(propfile)
assert propfile == propexpected
def test_merging_propertyless_template(self):
"""check that when merging with a template with no property values that we copy the template"""
posource = ""
proptemplate = "# A comment\n"
propexpected = proptemplate
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_delimiters(self):
"""test that we handle different delimiters."""
posource = '''#: prop\nmsgid "value"\nmsgstr "translated"\n'''
proptemplate = '''prop %s value\n'''
propexpected = '''prop %s translated\n'''
for delim in ['=', ':', '']:
print("testing '%s' as delimiter" % delim)
propfile = self.merge2prop(proptemplate % delim, posource)
print(propfile)
assert propfile == propexpected % delim
def test_empty_value(self):
"""test that we handle an value in the template"""
posource = '''#: key
msgctxt "key"
msgid ""
msgstr "translated"
'''
proptemplate = '''key\n'''
propexpected = '''key = translated\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_personalities(self):
"""test that we output correctly for Java and Mozilla style property files. Mozilla uses Unicode, while Java uses escaped Unicode"""
posource = u'''#: prop\nmsgid "value"\nmsgstr "ṽḁḽṻḝ"\n'''
proptemplate = u'''prop = value\n'''
propexpectedjava = u'''prop = \\u1E7D\\u1E01\\u1E3D\\u1E7B\\u1E1D\n'''
propfile = self.merge2prop(proptemplate, posource)
assert propfile == propexpectedjava
propexpectedmozilla = u'''prop = ṽḁḽṻḝ\n'''.encode('utf-8')
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
assert propfile == propexpectedmozilla
proptemplate = u'''prop = value\n'''.encode('utf-16')
propexpectedskype = u'''prop = ṽḁḽṻḝ\n'''.encode('utf-16')
propfile = self.merge2prop(proptemplate, posource, personality="skype")
assert propfile == propexpectedskype
proptemplate = u'''"prop" = "value";\n'''.encode('utf-16')
propexpectedstrings = u'''"prop" = "ṽḁḽṻḝ";\n'''.encode('utf-16')
propfile = self.merge2prop(proptemplate, posource, personality="strings")
assert propfile == propexpectedstrings
def test_merging_untranslated_simple(self):
"""check merging untranslated entries in two 1) use English 2) drop key, value pair"""
posource = '''#: prop\nmsgid "value"\nmsgstr ""\n'''
proptemplate = '''prop = value\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == proptemplate # We use the existing values
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
assert propfile == '' # We drop the key
def test_merging_untranslated_multiline(self):
"""check merging untranslated entries with multiline values"""
posource = '''#: prop\nmsgid "value1 value2"\nmsgstr ""\n'''
proptemplate = '''prop = value1 \
value2
'''
propexpected = '''prop = value1 value2\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected # We use the existing values
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
assert propfile == '' # We drop the key
def test_merging_untranslated_comments(self):
"""check merging untranslated entries with comments"""
posource = '''#: prop\nmsgid "value"\nmsgstr ""\n'''
proptemplate = '''# A comment\nprop = value\n'''
propexpected = '# A comment\nprop = value\n'
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected # We use the existing values
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
# FIXME ideally we should drop the comment as well as the unit
assert propfile == '# A comment\n' # We drop the key
def test_merging_untranslated_unchanged(self):
"""check removing untranslated entries but keeping unchanged ones"""
posource = '''#: prop
msgid "value"
msgstr ""
#: prop2
msgid "value2"
msgstr "value2"
'''
proptemplate = '''prop=value
prop2=value2
'''
propexpected = '''prop2=value2\n'''
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
assert propfile == propexpected
def test_merging_blank(self):
"""We always merge in a blank translation for a blank source"""
posource = '''#: prop
msgctxt "prop"
msgid ""
msgstr "value"
#: prop2
msgctxt "prop2"
msgid ""
msgstr ""
'''
proptemplate = '''prop=
prop2=
'''
propexpected = '''prop=value
prop2=
'''
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=False)
print(propfile)
assert propfile == propexpected
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
assert propfile == propexpected
def test_gaia_plurals(self):
"""Test back conversion of gaia plural units."""
proptemplate = '''
message-multiedit-header={[ plural(n) ]}
message-multiedit-header[zero]=Edit
message-multiedit-header[one]={{ n }} selected
message-multiedit-header[two]={{ n }} selected
message-multiedit-header[few]={{ n }} selected
message-multiedit-header[many]={{ n }} selected
message-multiedit-header[other]={{ n }} selected
'''
posource = r'''#: message-multiedit-header[zero]
msgctxt "message-multiedit-header[zero]"
msgid "Edit"
msgstr "Redigeer"
#: message-multiedit-header
msgctxt "message-multiedit-header"
msgid "Edit"
msgid_plural "{{ n }} selected"
msgstr[0] "xxxRedigeerxxx"
msgstr[1] "{{ n }} gekies"
msgstr[2] "{{ n }} gekies"
msgstr[3] "{{ n }} gekies"
msgstr[4] "{{ n }} gekies"
msgstr[5] "{{ n }} gekies"
'''
propexpected = '''
message-multiedit-header={[ plural(n) ]}
message-multiedit-header[zero]=Redigeer
message-multiedit-header[one]={{ n }} gekies
message-multiedit-header[two]={{ n }} gekies
message-multiedit-header[few]={{ n }} gekies
message-multiedit-header[many]={{ n }} gekies
message-multiedit-header[other]={{ n }} gekies
'''
propfile = self.merge2prop(proptemplate, posource, personality="gaia")
assert propfile == propexpected
class TestPO2PropCommand(test_convert.TestConvertCommand, TestPO2Prop):
"""Tests running actual po2prop commands on files"""
convertmodule = po2prop
defaultoptions = {"progress": "none"}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--fuzzy")
options = self.help_check(options, "--threshold=PERCENT")
options = self.help_check(options, "--personality=TYPE")
options = self.help_check(options, "--encoding=ENCODING")
options = self.help_check(options, "--removeuntranslated")
options = self.help_check(options, "--nofuzzy", last=True)
| mpl-2.0 |
rapidpro/expressions | python/setup.py | 1 | 1901 | from setuptools import setup, find_packages
from collections import defaultdict
extra_packages = defaultdict(list)
def _is_requirement(line):
"""Returns whether the line is a valid package requirement."""
line = line.strip()
return line and not (line.startswith("-r") or line.startswith("#"))
def _read_requirements(filename, extra_packages):
"""Returns a list of package requirements read from the file."""
requirements_file = open(filename).read()
hard_requirements = []
for line in requirements_file.splitlines():
if _is_requirement(line):
if line.find(';') > -1:
dep, condition = tuple(line.split(';'))
extra_packages[condition.strip()].append(dep.strip())
else:
hard_requirements.append(line.strip())
return hard_requirements, extra_packages
required_packages, extra_packages = _read_requirements("requirements/base.txt", extra_packages)
test_packages, extra_packages = _read_requirements("requirements/tests.txt", extra_packages)
setup(
name='rapidpro-expressions',
version='1.8',
description='Python implementation of the RapidPro expression and templating system',
url='https://github.com/rapidpro/expressions',
author='Nyaruka',
author_email='code@nyaruka.com',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
],
keywords='rapidpro templating',
packages=find_packages(),
package_data={'temba_expressions': ['month.aliases']},
install_requires=required_packages,
extra_packages=extra_packages,
test_suite='nose.collector',
tests_require=required_packages + test_packages,
)
| bsd-3-clause |
cloakedcode/CouchPotatoServer | libs/enzyme/ogm.py | 180 | 10836 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import struct
import re
import stat
import os
import logging
from exceptions import ParseError
import core
# get logging object
log = logging.getLogger(__name__)
PACKET_TYPE_HEADER = 0x01
PACKED_TYPE_METADATA = 0x03
PACKED_TYPE_SETUP = 0x05
PACKET_TYPE_BITS = 0x07
PACKET_IS_SYNCPOINT = 0x08
#VORBIS_VIDEO_PACKET_INFO = 'video'
STREAM_HEADER_VIDEO = '<4sIQQIIHII'
STREAM_HEADER_AUDIO = '<4sIQQIIHHHI'
VORBISCOMMENT = { 'TITLE': 'title',
'ALBUM': 'album',
'ARTIST': 'artist',
'COMMENT': 'comment',
'ENCODER': 'encoder',
'TRACKNUMBER': 'trackno',
'LANGUAGE': 'language',
'GENRE': 'genre',
}
# FIXME: check VORBISCOMMENT date and convert to timestamp
# Deactived tag: 'DATE': 'date',
MAXITERATIONS = 30
class Ogm(core.AVContainer):
table_mapping = { 'VORBISCOMMENT' : VORBISCOMMENT }
def __init__(self, file):
core.AVContainer.__init__(self)
self.samplerate = 1
self.all_streams = [] # used to add meta data to streams
self.all_header = []
for i in range(MAXITERATIONS):
granule, nextlen = self._parseOGGS(file)
if granule == None:
if i == 0:
# oops, bad file
raise ParseError()
break
elif granule > 0:
# ok, file started
break
# seek to the end of the stream, to avoid scanning the whole file
if (os.stat(file.name)[stat.ST_SIZE] > 50000):
file.seek(os.stat(file.name)[stat.ST_SIZE] - 49000)
# read the rest of the file into a buffer
h = file.read()
# find last OggS to get length info
if len(h) > 200:
idx = h.find('OggS')
pos = -49000 + idx
if idx:
file.seek(os.stat(file.name)[stat.ST_SIZE] + pos)
while 1:
granule, nextlen = self._parseOGGS(file)
if not nextlen:
break
# Copy metadata to the streams
if len(self.all_header) == len(self.all_streams):
for i in range(len(self.all_header)):
# get meta info
for key in self.all_streams[i].keys():
if self.all_header[i].has_key(key):
self.all_streams[i][key] = self.all_header[i][key]
del self.all_header[i][key]
if self.all_header[i].has_key(key.upper()):
asi = self.all_header[i][key.upper()]
self.all_streams[i][key] = asi
del self.all_header[i][key.upper()]
# Chapter parser
if self.all_header[i].has_key('CHAPTER01') and \
not self.chapters:
while 1:
s = 'CHAPTER%02d' % (len(self.chapters) + 1)
if self.all_header[i].has_key(s) and \
self.all_header[i].has_key(s + 'NAME'):
pos = self.all_header[i][s]
try:
pos = int(pos)
except ValueError:
new_pos = 0
for v in pos.split(':'):
new_pos = new_pos * 60 + float(v)
pos = int(new_pos)
c = self.all_header[i][s + 'NAME']
c = core.Chapter(c, pos)
del self.all_header[i][s + 'NAME']
del self.all_header[i][s]
self.chapters.append(c)
else:
break
# If there are no video streams in this ogg container, it
# must be an audio file. Raise an exception to cause the
# factory to fall back to audio.ogg.
if len(self.video) == 0:
raise ParseError
# Copy Metadata from tables into the main set of attributes
for header in self.all_header:
self._appendtable('VORBISCOMMENT', header)
def _parseOGGS(self, file):
h = file.read(27)
if len(h) == 0:
# Regular File end
return None, None
elif len(h) < 27:
log.debug(u'%d Bytes of Garbage found after End.' % len(h))
return None, None
if h[:4] != "OggS":
log.debug(u'Invalid Ogg')
raise ParseError()
version = ord(h[4])
if version != 0:
log.debug(u'Unsupported OGG/OGM Version %d' % version)
return None, None
head = struct.unpack('<BQIIIB', h[5:])
headertype, granulepos, serial, pageseqno, checksum, \
pageSegCount = head
self.mime = 'application/ogm'
self.type = 'OGG Media'
tab = file.read(pageSegCount)
nextlen = 0
for i in range(len(tab)):
nextlen += ord(tab[i])
else:
h = file.read(1)
packettype = ord(h[0]) & PACKET_TYPE_BITS
if packettype == PACKET_TYPE_HEADER:
h += file.read(nextlen - 1)
self._parseHeader(h, granulepos)
elif packettype == PACKED_TYPE_METADATA:
h += file.read(nextlen - 1)
self._parseMeta(h)
else:
file.seek(nextlen - 1, 1)
if len(self.all_streams) > serial:
stream = self.all_streams[serial]
if hasattr(stream, 'samplerate') and \
stream.samplerate:
stream.length = granulepos / stream.samplerate
elif hasattr(stream, 'bitrate') and \
stream.bitrate:
stream.length = granulepos / stream.bitrate
return granulepos, nextlen + 27 + pageSegCount
def _parseMeta(self, h):
flags = ord(h[0])
headerlen = len(h)
if headerlen >= 7 and h[1:7] == 'vorbis':
header = {}
nextlen, self.encoder = self._extractHeaderString(h[7:])
numItems = struct.unpack('<I', h[7 + nextlen:7 + nextlen + 4])[0]
start = 7 + 4 + nextlen
for _ in range(numItems):
(nextlen, s) = self._extractHeaderString(h[start:])
start += nextlen
if s:
a = re.split('=', s)
header[(a[0]).upper()] = a[1]
# Put Header fields into info fields
self.type = 'OGG Vorbis'
self.subtype = ''
self.all_header.append(header)
def _parseHeader(self, header, granule):
headerlen = len(header)
flags = ord(header[0])
if headerlen >= 30 and header[1:7] == 'vorbis':
ai = core.AudioStream()
ai.version, ai.channels, ai.samplerate, bitrate_max, ai.bitrate, \
bitrate_min, blocksize, framing = \
struct.unpack('<IBIiiiBB', header[7:7 + 23])
ai.codec = 'Vorbis'
#ai.granule = granule
#ai.length = granule / ai.samplerate
self.audio.append(ai)
self.all_streams.append(ai)
elif headerlen >= 7 and header[1:7] == 'theora':
# Theora Header
# XXX Finish Me
vi = core.VideoStream()
vi.codec = 'theora'
self.video.append(vi)
self.all_streams.append(vi)
elif headerlen >= 142 and \
header[1:36] == 'Direct Show Samples embedded in Ogg':
# Old Directshow format
# XXX Finish Me
vi = core.VideoStream()
vi.codec = 'dshow'
self.video.append(vi)
self.all_streams.append(vi)
elif flags & PACKET_TYPE_BITS == PACKET_TYPE_HEADER and \
headerlen >= struct.calcsize(STREAM_HEADER_VIDEO) + 1:
# New Directshow Format
htype = header[1:9]
if htype[:5] == 'video':
sh = header[9:struct.calcsize(STREAM_HEADER_VIDEO) + 9]
streamheader = struct.unpack(STREAM_HEADER_VIDEO, sh)
vi = core.VideoStream()
(type, ssize, timeunit, samplerate, vi.length, buffersize, \
vi.bitrate, vi.width, vi.height) = streamheader
vi.width /= 65536
vi.height /= 65536
# XXX length, bitrate are very wrong
vi.codec = type
vi.fps = 10000000 / timeunit
self.video.append(vi)
self.all_streams.append(vi)
elif htype[:5] == 'audio':
sha = header[9:struct.calcsize(STREAM_HEADER_AUDIO) + 9]
streamheader = struct.unpack(STREAM_HEADER_AUDIO, sha)
ai = core.AudioStream()
(type, ssize, timeunit, ai.samplerate, ai.length, buffersize, \
ai.bitrate, ai.channels, bloc, ai.bitrate) = streamheader
self.samplerate = ai.samplerate
log.debug(u'Samplerate %d' % self.samplerate)
self.audio.append(ai)
self.all_streams.append(ai)
elif htype[:4] == 'text':
subtitle = core.Subtitle()
# FIXME: add more info
self.subtitles.append(subtitle)
self.all_streams.append(subtitle)
else:
log.debug(u'Unknown Header')
def _extractHeaderString(self, header):
len = struct.unpack('<I', header[:4])[0]
try:
return (len + 4, unicode(header[4:4 + len], 'utf-8'))
except (KeyError, IndexError, UnicodeDecodeError):
return (len + 4, None)
Parser = Ogm
| gpl-3.0 |
kenwang815/KodiPlugins | script.module.youtube.dl/lib/youtube_dl/extractor/camdemy.py | 10 | 5439 | # coding: utf-8
from __future__ import unicode_literals
import datetime
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
parse_iso8601,
str_to_int,
)
class CamdemyIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?camdemy\.com/media/(?P<id>\d+)'
_TESTS = [{
# single file
'url': 'http://www.camdemy.com/media/5181/',
'md5': '5a5562b6a98b37873119102e052e311b',
'info_dict': {
'id': '5181',
'ext': 'mp4',
'title': 'Ch1-1 Introduction, Signals (02-23-2012)',
'thumbnail': 're:^https?://.*\.jpg$',
'description': '',
'creator': 'ss11spring',
'upload_date': '20130114',
'timestamp': 1358154556,
'view_count': int,
}
}, {
# With non-empty description
'url': 'http://www.camdemy.com/media/13885',
'md5': '4576a3bb2581f86c61044822adbd1249',
'info_dict': {
'id': '13885',
'ext': 'mp4',
'title': 'EverCam + Camdemy QuickStart',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:050b62f71ed62928f8a35f1a41e186c9',
'creator': 'evercam',
'upload_date': '20140620',
'timestamp': 1403271569,
}
}, {
# External source
'url': 'http://www.camdemy.com/media/14842',
'md5': '50e1c3c3aa233d3d7b7daa2fa10b1cf7',
'info_dict': {
'id': '2vsYQzNIsJo',
'ext': 'mp4',
'upload_date': '20130211',
'uploader': 'Hun Kim',
'description': 'Excel 2013 Tutorial for Beginners - How to add Password Protection',
'uploader_id': 'hunkimtutorials',
'title': 'Excel 2013 Tutorial - How to add Password Protection',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
page = self._download_webpage(url, video_id)
src_from = self._html_search_regex(
r"<div class='srcFrom'>Source: <a title='([^']+)'", page,
'external source', default=None)
if src_from:
return self.url_result(src_from)
oembed_obj = self._download_json(
'http://www.camdemy.com/oembed/?format=json&url=' + url, video_id)
thumb_url = oembed_obj['thumbnail_url']
video_folder = compat_urlparse.urljoin(thumb_url, 'video/')
file_list_doc = self._download_xml(
compat_urlparse.urljoin(video_folder, 'fileList.xml'),
video_id, 'Filelist XML')
file_name = file_list_doc.find('./video/item/fileName').text
video_url = compat_urlparse.urljoin(video_folder, file_name)
timestamp = parse_iso8601(self._html_search_regex(
r"<div class='title'>Posted\s*:</div>\s*<div class='value'>([^<>]+)<",
page, 'creation time', fatal=False),
delimiter=' ', timezone=datetime.timedelta(hours=8))
view_count = str_to_int(self._html_search_regex(
r"<div class='title'>Views\s*:</div>\s*<div class='value'>([^<>]+)<",
page, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': oembed_obj['title'],
'thumbnail': thumb_url,
'description': self._html_search_meta('description', page),
'creator': oembed_obj['author_name'],
'duration': oembed_obj['duration'],
'timestamp': timestamp,
'view_count': view_count,
}
class CamdemyFolderIE(InfoExtractor):
_VALID_URL = r'https?://www.camdemy.com/folder/(?P<id>\d+)'
_TESTS = [{
# links with trailing slash
'url': 'http://www.camdemy.com/folder/450',
'info_dict': {
'id': '450',
'title': '信號與系統 2012 & 2011 (Signals and Systems)',
},
'playlist_mincount': 145
}, {
# links without trailing slash
# and multi-page
'url': 'http://www.camdemy.com/folder/853',
'info_dict': {
'id': '853',
'title': '科學計算 - 使用 Matlab'
},
'playlist_mincount': 20
}, {
# with displayMode parameter. For testing the codes to add parameters
'url': 'http://www.camdemy.com/folder/853/?displayMode=defaultOrderByOrg',
'info_dict': {
'id': '853',
'title': '科學計算 - 使用 Matlab'
},
'playlist_mincount': 20
}]
def _real_extract(self, url):
folder_id = self._match_id(url)
# Add displayMode=list so that all links are displayed in a single page
parsed_url = list(compat_urlparse.urlparse(url))
query = dict(compat_urlparse.parse_qsl(parsed_url[4]))
query.update({'displayMode': 'list'})
parsed_url[4] = compat_urllib_parse_urlencode(query)
final_url = compat_urlparse.urlunparse(parsed_url)
page = self._download_webpage(final_url, folder_id)
matches = re.findall(r"href='(/media/\d+/?)'", page)
entries = [self.url_result('http://www.camdemy.com' + media_path)
for media_path in matches]
folder_title = self._html_search_meta('keywords', page)
return self.playlist_result(entries, folder_id, folder_title)
| gpl-2.0 |
jetskijoe/headphones | lib/beetsplug/metasync/__init__.py | 13 | 4437 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Heinz Wiesinger.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronize information from music player libraries
"""
from __future__ import division, absolute_import, print_function
from abc import abstractmethod, ABCMeta
from importlib import import_module
from beets.util.confit import ConfigValueError
from beets import ui
from beets.plugins import BeetsPlugin
import six
METASYNC_MODULE = 'beetsplug.metasync'
# Dictionary to map the MODULE and the CLASS NAME of meta sources
SOURCES = {
'amarok': 'Amarok',
'itunes': 'Itunes',
}
class MetaSource(six.with_metaclass(ABCMeta, object)):
def __init__(self, config, log):
self.item_types = {}
self.config = config
self._log = log
@abstractmethod
def sync_from_source(self, item):
pass
def load_meta_sources():
""" Returns a dictionary of all the MetaSources
E.g., {'itunes': Itunes} with isinstance(Itunes, MetaSource) true
"""
meta_sources = {}
for module_path, class_name in SOURCES.items():
module = import_module(METASYNC_MODULE + '.' + module_path)
meta_sources[class_name.lower()] = getattr(module, class_name)
return meta_sources
META_SOURCES = load_meta_sources()
def load_item_types():
""" Returns a dictionary containing the item_types of all the MetaSources
"""
item_types = {}
for meta_source in META_SOURCES.values():
item_types.update(meta_source.item_types)
return item_types
class MetaSyncPlugin(BeetsPlugin):
item_types = load_item_types()
def __init__(self):
super(MetaSyncPlugin, self).__init__()
def commands(self):
cmd = ui.Subcommand('metasync',
help='update metadata from music player libraries')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help='show all changes but do nothing')
cmd.parser.add_option('-s', '--source', default=[],
action='append', dest='sources',
help='comma-separated list of sources to sync')
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the metasync function.
"""
pretend = opts.pretend
query = ui.decargs(args)
sources = []
for source in opts.sources:
sources.extend(source.split(','))
sources = sources or self.config['source'].as_str_seq()
meta_source_instances = {}
items = lib.items(query)
# Avoid needlessly instantiating meta sources (can be expensive)
if not items:
self._log.info(u'No items found matching query')
return
# Instantiate the meta sources
for player in sources:
try:
cls = META_SOURCES[player]
except KeyError:
self._log.error(u'Unknown metadata source \'{0}\''.format(
player))
try:
meta_source_instances[player] = cls(self.config, self._log)
except (ImportError, ConfigValueError) as e:
self._log.error(u'Failed to instantiate metadata source '
u'\'{0}\': {1}'.format(player, e))
# Avoid needlessly iterating over items
if not meta_source_instances:
self._log.error(u'No valid metadata sources found')
return
# Sync the items with all of the meta sources
for item in items:
for meta_source in meta_source_instances.values():
meta_source.sync_from_source(item)
changed = ui.show_model_changes(item)
if changed and not pretend:
item.store()
| gpl-3.0 |
ndingwall/scikit-learn | examples/feature_selection/plot_feature_selection.py | 18 | 3371 | """
============================
Univariate Feature Selection
============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest, f_classif
# #############################################################################
# Import some data to play with
# The iris dataset
X, y = load_iris(return_X_y=True)
# Some noisy data not correlated
E = np.random.RandomState(42).uniform(0, 0.1, size=(X.shape[0], 20))
# Add the noisy data to the informative features
X = np.hstack((X, E))
# Split dataset to select feature and evaluate the classifier
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=0
)
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
# #############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function to select the four
# most significant features
selector = SelectKBest(f_classif, k=4)
selector.fit(X_train, y_train)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)')
# #############################################################################
# Compare to the weights of an SVM
clf = make_pipeline(MinMaxScaler(), LinearSVC())
clf.fit(X_train, y_train)
print('Classification accuracy without selecting features: {:.3f}'
.format(clf.score(X_test, y_test)))
svm_weights = np.abs(clf[-1].coef_).sum(axis=0)
svm_weights /= svm_weights.sum()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight')
clf_selected = make_pipeline(
SelectKBest(f_classif, k=4), MinMaxScaler(), LinearSVC()
)
clf_selected.fit(X_train, y_train)
print('Classification accuracy after univariate feature selection: {:.3f}'
.format(clf_selected.score(X_test, y_test)))
svm_weights_selected = np.abs(clf_selected[-1].coef_).sum(axis=0)
svm_weights_selected /= svm_weights_selected.sum()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
vFense/vFenseAgent-nix | agent/deps/rpm6/Python-2.7.5/lib/python2.7/sched.py | 175 | 5088 | """A generally useful event scheduler class.
Each instance of this class manages its own queue.
No multi-threading is implied; you are supposed to hack that
yourself, or use a single instance per application.
Each instance is parametrized with two functions, one that is
supposed to return the current time, one that is supposed to
implement a delay. You can implement real-time scheduling by
substituting time and sleep from built-in module time, or you can
implement simulated time by writing your own functions. This can
also be used to integrate scheduling with STDWIN events; the delay
function is allowed to modify the queue. Time can be expressed as
integers or floating point numbers, as long as it is consistent.
Events are specified by tuples (time, priority, action, argument).
As in UNIX, lower priority numbers mean higher priority; in this
way the queue can be maintained as a priority queue. Execution of the
event means calling the action function, passing it the argument
sequence in "argument" (remember that in Python, multiple function
arguments are be packed in a sequence).
The action function may be an instance method so it
has another way to reference private data (besides global variables).
"""
# XXX The timefunc and delayfunc should have been defined as methods
# XXX so you can define new kinds of schedulers using subclassing
# XXX instead of having to define a module or class just to hold
# XXX the global state of your particular time and delay functions.
import heapq
from collections import namedtuple
__all__ = ["scheduler"]
Event = namedtuple('Event', 'time, priority, action, argument')
class scheduler:
def __init__(self, timefunc, delayfunc):
"""Initialize a new instance, passing the time and delay
functions"""
self._queue = []
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument):
"""Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
"""
event = Event(time, priority, action, argument)
heapq.heappush(self._queue, event)
return event # The ID
def enter(self, delay, priority, action, argument):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument)
def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises ValueError.
"""
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
"""Check whether the queue is empty."""
return not self._queue
def run(self):
"""Execute events until the queue is empty.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
delayfunc(time - now)
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
delayfunc(0) # Let other threads run
else:
heapq.heappush(q, event)
@property
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
events = self._queue[:]
return map(heapq.heappop, [events]*len(events))
| lgpl-3.0 |
TimYi/django | django/test/testcases.py | 49 | 58069 | from __future__ import unicode_literals
import difflib
import errno
import json
import os
import posixpath
import socket
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.core.urlresolvers import clear_url_caches, set_urlconf
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.decorators import classproperty
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* If the class has a 'urls' attribute, replace ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
self._urlconf_setup()
mail.outbox = []
def _urlconf_setup(self):
if hasattr(self, 'urls'):
warnings.warn(
"SimpleTestCase.urls is deprecated and will be removed in "
"Django 1.10. Use @override_settings(ROOT_URLCONF=...) "
"in %s instead." % self.__class__.__name__,
RemovedInDjango110Warning, stacklevel=2)
set_urlconf(None)
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
self._urlconf_teardown()
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
set_urlconf(None)
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request (use fetch_redirect_response=False to check
such links without fetching them).
"""
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango20Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
if fetch_redirect_response:
redirect_response = response.client.get(path, QueryDict(query),
secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango20Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
"Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None,
"Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of %s in response"
" (expected %d)" % (real_count, text_repr, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0,
msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err,
repr(field_errors)))
elif field in context[formset].forms[form_index].fields:
self.fail(msg_prefix + "The field '%s' "
"on formset '%s', form %d in "
"context %d contains no errors" %
(field, formset, form_index, i))
else:
self.fail(msg_prefix + "The formset '%s', form %d in "
"context %d does not contain the field '%s'" %
(formset, form_index, i, field))
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in "
"context %d does not contain any non-field "
"errors." % (formset, form_index, i))
self.assertTrue(err in non_field_errors,
msg_prefix + "The formset '%s', form %d "
"in context %d does not contain the "
"non-field error '%s' "
"(actual errors: %s)" %
(formset, form_index, i, err,
repr(non_field_errors)))
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in "
"context %d does not contain any "
"non-form errors." % (formset, i))
self.assertTrue(err in non_form_errors,
msg_prefix + "The formset '%s' in context "
"%d does not contain the "
"non-form error '%s' (actual errors: %s)" %
(formset, i, err, repr(non_form_errors)))
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render "
"the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not
None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, ', '.join(template_names)))
if count is not None:
self.assertEqual(template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)." %
(template_name, count, template_names.count(template_name)))
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
@contextmanager
def _assert_raises_message_cm(self, expected_exception, expected_message):
with self.assertRaises(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(cm.exception))
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Asserts that expected_message is found in the the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
warnings.warn(
'The callable_obj kwarg is deprecated. Pass the callable '
'as a positional argument instead.', RemovedInDjango20Warning
)
elif len(args):
callable_obj = args[0]
args = args[1:]
cm = self._assert_raises_message_cm(expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args,
**dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages,
error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs),
fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None,
'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None,
'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, needle, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(
six.text_type(xml1).splitlines(),
six.text_type(xml2).splitlines(),
)
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
registry to these applications, then firing post_migrate -- it must
run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None
or (
# Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve
test isolation.
In most situation, TestCase should be prefered to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
self._rollback_atomics(self.atomics)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, cond_func):
self.cond_func = cond_func
def __get__(self, obj, objtype):
return self.cond_func()
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
test_item.__unittest_skip__ = CheckCondition(condition)
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(*features):
"""
Skip a test if a database has at least one of the named features.
"""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""
Skip a test unless a database has all the named features.
"""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""
Skip a test unless a database has any of the named features.
"""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, static_handler, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = self._create_server(port)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def _create_server(self, port):
return WSGIServer((self.host, port), QuietWSGIRequestHandler)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (
cls.server_thread.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081-8179')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.server_thread = cls._create_server_thread(host, possible_ports, connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, host, possible_ports, connections_override):
return LiveServerThread(
host,
possible_ports,
cls.static_handler,
connections_override=connections_override,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
cls.server_thread.join()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
super(LiveServerTestCase, cls).tearDownClass()
class SerializeMixin(object):
"""
Mixin to enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass / tearDownClass.
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super(SerializeMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SerializeMixin, cls).tearDownClass()
cls._lockfile.close()
| bsd-3-clause |
mtb-za/fatiando | fatiando/gravmag/polyprism.py | 3 | 23452 | r"""
Calculate the potential fields of the 3D prism with polygonal crossection using
the formula of Plouff (1976).
**Gravity**
First and second derivatives of the gravitational potential:
* :func:`~fatiando.gravmag.polyprism.gz`
* :func:`~fatiando.gravmag.polyprism.gxx`
* :func:`~fatiando.gravmag.polyprism.gxy`
* :func:`~fatiando.gravmag.polyprism.gxz`
* :func:`~fatiando.gravmag.polyprism.gyy`
* :func:`~fatiando.gravmag.polyprism.gyz`
* :func:`~fatiando.gravmag.polyprism.gzz`
**Magnetic**
There are functions to calculate the total-field anomaly and the 3 components
of magnetic induction:
* :func:`~fatiando.gravmag.polyprism.tf`
* :func:`~fatiando.gravmag.polyprism.bx`
* :func:`~fatiando.gravmag.polyprism.by`
* :func:`~fatiando.gravmag.polyprism.bz`
**Auxiliary Functions**
Calculates the second derivatives of the function
.. math::
\phi(x,y,z) = \int\int\int \frac{1}{r}
\mathrm{d}\nu \mathrm{d}\eta \mathrm{d}\zeta
with respect to the variables :math:`x`, :math:`y`, and :math:`z`.
In this equation,
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}
and :math:`\nu`, :math:`\eta`, :math:`\zeta` are the Cartesian
coordinates of an element inside the volume of a 3D prism with
polygonal crossection. These second derivatives are used to calculate
the total field anomaly and the gravity gradient tensor
components produced by a 3D prism with polygonal crossection.
* :func:`~fatiando.gravmag.polyprism.kernelxx`
* :func:`~fatiando.gravmag.polyprism.kernelxy`
* :func:`~fatiando.gravmag.polyprism.kernelxz`
* :func:`~fatiando.gravmag.polyprism.kernelyy`
* :func:`~fatiando.gravmag.polyprism.kernelyz`
* :func:`~fatiando.gravmag.polyprism.kernelzz`
**References**
Plouff, D. , 1976, Gravity and magnetic fields of polygonal prisms and
applications to magnetic terrain corrections, Geophysics, 41(4), 727-741.
----
"""
from __future__ import division
import numpy
from numpy import arctan2, log, sqrt
from .. import utils
from ..constants import SI2MGAL, SI2EOTVOS, G, CM, T2NT
try:
from . import _polyprism
except ImportError:
_polyprism = None
def tf(xp, yp, zp, prisms, inc, dec, pmag=None):
r"""
Calculate the total-field anomaly of polygonal prisms.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
Arrays with the x, y, and z coordinates of the computation points.
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the total field anomaly.
Prisms without the physical property ``'magnetization'`` will
be ignored.
* inc : float
The inclination of the regional field (in degrees)
* dec : float
The declination of the regional field (in degrees)
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the prisms. Use this, e.g., for
sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
# Calculate the 3 components of the unit vector in the direction of the
# regional field
fx, fy, fz = utils.dircos(inc, dec)
if pmag is not None:
if isinstance(pmag, float) or isinstance(pmag, int):
pmx, pmy, pmz = pmag * fx, pmag * fy, pmag * fz
else:
pmx, pmy, pmz = pmag
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or ('magnetization' not in prism.props
and pmag is None):
continue
if pmag is None:
mag = prism.props['magnetization']
if isinstance(mag, float) or isinstance(mag, int):
mx, my, mz = mag * fx, mag * fy, mag * fz
else:
mx, my, mz = mag
else:
mx, my, mz = pmx, pmy, pmz
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.tf(xp, yp, zp, x, y, z1, z2, mx, my, mz, fx, fy, fz, res)
res *= CM * T2NT
return res
def bx(xp, yp, zp, prisms):
"""
Calculates the x component of the magnetic induction produced by 3D
prisms with polygonal crosssection.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the total field anomaly.
Prisms without the physical property ``'magnetization'`` will
be ignored. The ``'magnetization'`` must be a vector.
Returns:
* bx: array
The x component of the magnetic induction
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or ('magnetization' not in prism.props):
continue
# Get the magnetization vector components
mx, my, mz = prism.props['magnetization']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.bx(xp, yp, zp, x, y, z1, z2, mx, my, mz, res)
res *= CM * T2NT
return res
def by(xp, yp, zp, prisms):
"""
Calculates the y component of the magnetic induction produced by 3D
prisms with polygonal crosssection.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the total field anomaly.
Prisms without the physical property ``'magnetization'`` will
be ignored. The ``'magnetization'`` must be a vector.
Returns:
* by: array
The y component of the magnetic induction
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or ('magnetization' not in prism.props):
continue
# Get the magnetization vector components
mx, my, mz = prism.props['magnetization']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.by(xp, yp, zp, x, y, z1, z2, mx, my, mz, res)
res *= CM * T2NT
return res
def bz(xp, yp, zp, prisms):
"""
Calculates the z component of the magnetic induction produced by 3D
prisms with polygonal crosssection.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the total field anomaly.
Prisms without the physical property ``'magnetization'`` will
be ignored. The ``'magnetization'`` must be a vector.
Returns:
* bz: array
The z component of the magnetic induction
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or ('magnetization' not in prism.props):
continue
# Get the magnetization vector components
mx, my, mz = prism.props['magnetization']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.bz(xp, yp, zp, x, y, z1, z2, mx, my, mz, res)
res *= CM * T2NT
return res
def gz(xp, yp, zp, prisms):
r"""
Calculates the :math:`g_{z}` gravity acceleration component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI units and output in mGal!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the field.
Prisms must have the physical property ``'density'`` will be
ignored.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
dummy = 10 ** (-10)
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for prism in prisms:
if prism is None or 'density' not in prism.props:
continue
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
density = prism.props['density']
_polyprism.gz(xp, yp, zp, x, y, z1, z2, density, res)
res *= G * SI2MGAL
return res
def gxx(xp, yp, zp, prisms):
r"""
Calculates the :math:`g_{xx}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI units and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the field.
Prisms must have the physical property ``'density'`` will be
ignored.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or 'density' not in prism.props:
continue
density = prism.props['density']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
density = prism.props['density']
_polyprism.gxx(xp, yp, zp, x, y, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gxy(xp, yp, zp, prisms):
r"""
Calculates the :math:`g_{xy}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI units and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the field.
Prisms must have the physical property ``'density'`` will be
ignored.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or 'density' not in prism.props:
continue
density = prism.props['density']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
density = prism.props['density']
_polyprism.gxy(xp, yp, zp, x, y, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gxz(xp, yp, zp, prisms):
r"""
Calculates the :math:`g_{xz}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI units and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the field.
Prisms must have the physical property ``'density'`` will be
ignored.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or 'density' not in prism.props:
continue
density = prism.props['density']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
density = prism.props['density']
_polyprism.gxz(xp, yp, zp, x, y, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gyy(xp, yp, zp, prisms):
r"""
Calculates the :math:`g_{yy}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI units and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the field.
Prisms must have the physical property ``'density'`` will be
ignored.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or 'density' not in prism.props:
continue
density = prism.props['density']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
density = prism.props['density']
_polyprism.gyy(xp, yp, zp, x, y, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gyz(xp, yp, zp, prisms):
r"""
Calculates the :math:`g_{yz}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI units and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the field.
Prisms must have the physical property ``'density'`` will be
ignored.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or 'density' not in prism.props:
continue
density = prism.props['density']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
density = prism.props['density']
_polyprism.gyz(xp, yp, zp, x, y, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gzz(xp, yp, zp, prisms):
r"""
Calculates the :math:`g_{zz}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI units and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the field.
Prisms must have the physical property ``'density'`` will be
ignored.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for prism in prisms:
if prism is None or 'density' not in prism.props:
continue
density = prism.props['density']
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
density = prism.props['density']
_polyprism.gzz(xp, yp, zp, x, y, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def kernelxx(xp, yp, zp, prism):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial x^2},
where
.. math::
\phi(x,y,z) = \int \int \int \frac{1}{r}
\mathrm{d}\nu \mathrm{d}\eta \mathrm{d}\zeta
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : object of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the function.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.gxx(xp, yp, zp, x, y, z1, z2, 1, res)
return res
def kernelxy(xp, yp, zp, prism):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial x \partial y},
where
.. math::
\phi(x,y,z) = \int \int \int \frac{1}{r}
\mathrm{d}\nu \mathrm{d}\eta \mathrm{d}\zeta
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : object of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the function.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.gxy(xp, yp, zp, x, y, z1, z2, 1, res)
return res
def kernelxz(xp, yp, zp, prism):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial x \partial z},
where
.. math::
\phi(x,y,z) = \int \int \int \frac{1}{r}
\mathrm{d}\nu \mathrm{d}\eta \mathrm{d}\zeta
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : object of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the function.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.gxz(xp, yp, zp, x, y, z1, z2, 1, res)
return res
def kernelyy(xp, yp, zp, prism):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial y^2},
where
.. math::
\phi(x,y,z) = \int \int \int \frac{1}{r}
\mathrm{d}\nu \mathrm{d}\eta \mathrm{d}\zeta
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : object of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the function.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.gyy(xp, yp, zp, x, y, z1, z2, 1, res)
return res
def kernelyz(xp, yp, zp, prism):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial y \partial z},
where
.. math::
\phi(x,y,z) = \int \int \int \frac{1}{r}
\mathrm{d}\nu \mathrm{d}\eta \mathrm{d}\zeta
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : object of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the function.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.gyz(xp, yp, zp, x, y, z1, z2, 1, res)
return res
def kernelzz(xp, yp, zp, prism):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial z^2},
where
.. math::
\phi(x,y,z) = \int \int \int \frac{1}{r}
\mathrm{d}\nu \mathrm{d}\eta \mathrm{d}\zeta
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates of the computation points.
* prisms : object of :class:`fatiando.mesher.PolygonalPrism`
The model used to calculate the function.
Returns:
* res : array
The effect calculated on the computation points.
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
x, y = prism.x, prism.y
z1, z2 = prism.z1, prism.z2
_polyprism.gzz(xp, yp, zp, x, y, z1, z2, 1, res)
return res
| bsd-3-clause |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/encodings/aliases.py | 418 | 14848 | """ Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function normalizes the encoding names before
doing the lookup, so the mapping will have to map normalized
encoding names to module names.
Contents:
The following aliases dictionary contains mappings of all IANA
character set names for which the Python core library provides
codecs. In addition to these, a few Python specific codec
aliases have also been added.
"""
aliases = {
# Please keep this list sorted alphabetically by value !
# ascii codec
'646' : 'ascii',
'ansi_x3.4_1968' : 'ascii',
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
'ansi_x3.4_1986' : 'ascii',
'cp367' : 'ascii',
'csascii' : 'ascii',
'ibm367' : 'ascii',
'iso646_us' : 'ascii',
'iso_646.irv_1991' : 'ascii',
'iso_ir_6' : 'ascii',
'us' : 'ascii',
'us_ascii' : 'ascii',
# base64_codec codec
'base64' : 'base64_codec',
'base_64' : 'base64_codec',
# big5 codec
'big5_tw' : 'big5',
'csbig5' : 'big5',
# big5hkscs codec
'big5_hkscs' : 'big5hkscs',
'hkscs' : 'big5hkscs',
# bz2_codec codec
'bz2' : 'bz2_codec',
# cp037 codec
'037' : 'cp037',
'csibm037' : 'cp037',
'ebcdic_cp_ca' : 'cp037',
'ebcdic_cp_nl' : 'cp037',
'ebcdic_cp_us' : 'cp037',
'ebcdic_cp_wt' : 'cp037',
'ibm037' : 'cp037',
'ibm039' : 'cp037',
# cp1026 codec
'1026' : 'cp1026',
'csibm1026' : 'cp1026',
'ibm1026' : 'cp1026',
# cp1140 codec
'1140' : 'cp1140',
'ibm1140' : 'cp1140',
# cp1250 codec
'1250' : 'cp1250',
'windows_1250' : 'cp1250',
# cp1251 codec
'1251' : 'cp1251',
'windows_1251' : 'cp1251',
# cp1252 codec
'1252' : 'cp1252',
'windows_1252' : 'cp1252',
# cp1253 codec
'1253' : 'cp1253',
'windows_1253' : 'cp1253',
# cp1254 codec
'1254' : 'cp1254',
'windows_1254' : 'cp1254',
# cp1255 codec
'1255' : 'cp1255',
'windows_1255' : 'cp1255',
# cp1256 codec
'1256' : 'cp1256',
'windows_1256' : 'cp1256',
# cp1257 codec
'1257' : 'cp1257',
'windows_1257' : 'cp1257',
# cp1258 codec
'1258' : 'cp1258',
'windows_1258' : 'cp1258',
# cp424 codec
'424' : 'cp424',
'csibm424' : 'cp424',
'ebcdic_cp_he' : 'cp424',
'ibm424' : 'cp424',
# cp437 codec
'437' : 'cp437',
'cspc8codepage437' : 'cp437',
'ibm437' : 'cp437',
# cp500 codec
'500' : 'cp500',
'csibm500' : 'cp500',
'ebcdic_cp_be' : 'cp500',
'ebcdic_cp_ch' : 'cp500',
'ibm500' : 'cp500',
# cp775 codec
'775' : 'cp775',
'cspc775baltic' : 'cp775',
'ibm775' : 'cp775',
# cp850 codec
'850' : 'cp850',
'cspc850multilingual' : 'cp850',
'ibm850' : 'cp850',
# cp852 codec
'852' : 'cp852',
'cspcp852' : 'cp852',
'ibm852' : 'cp852',
# cp855 codec
'855' : 'cp855',
'csibm855' : 'cp855',
'ibm855' : 'cp855',
# cp857 codec
'857' : 'cp857',
'csibm857' : 'cp857',
'ibm857' : 'cp857',
# cp858 codec
'858' : 'cp858',
'csibm858' : 'cp858',
'ibm858' : 'cp858',
# cp860 codec
'860' : 'cp860',
'csibm860' : 'cp860',
'ibm860' : 'cp860',
# cp861 codec
'861' : 'cp861',
'cp_is' : 'cp861',
'csibm861' : 'cp861',
'ibm861' : 'cp861',
# cp862 codec
'862' : 'cp862',
'cspc862latinhebrew' : 'cp862',
'ibm862' : 'cp862',
# cp863 codec
'863' : 'cp863',
'csibm863' : 'cp863',
'ibm863' : 'cp863',
# cp864 codec
'864' : 'cp864',
'csibm864' : 'cp864',
'ibm864' : 'cp864',
# cp865 codec
'865' : 'cp865',
'csibm865' : 'cp865',
'ibm865' : 'cp865',
# cp866 codec
'866' : 'cp866',
'csibm866' : 'cp866',
'ibm866' : 'cp866',
# cp869 codec
'869' : 'cp869',
'cp_gr' : 'cp869',
'csibm869' : 'cp869',
'ibm869' : 'cp869',
# cp932 codec
'932' : 'cp932',
'ms932' : 'cp932',
'mskanji' : 'cp932',
'ms_kanji' : 'cp932',
# cp949 codec
'949' : 'cp949',
'ms949' : 'cp949',
'uhc' : 'cp949',
# cp950 codec
'950' : 'cp950',
'ms950' : 'cp950',
# euc_jis_2004 codec
'jisx0213' : 'euc_jis_2004',
'eucjis2004' : 'euc_jis_2004',
'euc_jis2004' : 'euc_jis_2004',
# euc_jisx0213 codec
'eucjisx0213' : 'euc_jisx0213',
# euc_jp codec
'eucjp' : 'euc_jp',
'ujis' : 'euc_jp',
'u_jis' : 'euc_jp',
# euc_kr codec
'euckr' : 'euc_kr',
'korean' : 'euc_kr',
'ksc5601' : 'euc_kr',
'ks_c_5601' : 'euc_kr',
'ks_c_5601_1987' : 'euc_kr',
'ksx1001' : 'euc_kr',
'ks_x_1001' : 'euc_kr',
# gb18030 codec
'gb18030_2000' : 'gb18030',
# gb2312 codec
'chinese' : 'gb2312',
'csiso58gb231280' : 'gb2312',
'euc_cn' : 'gb2312',
'euccn' : 'gb2312',
'eucgb2312_cn' : 'gb2312',
'gb2312_1980' : 'gb2312',
'gb2312_80' : 'gb2312',
'iso_ir_58' : 'gb2312',
# gbk codec
'936' : 'gbk',
'cp936' : 'gbk',
'ms936' : 'gbk',
# hex_codec codec
'hex' : 'hex_codec',
# hp_roman8 codec
'roman8' : 'hp_roman8',
'r8' : 'hp_roman8',
'csHPRoman8' : 'hp_roman8',
# hz codec
'hzgb' : 'hz',
'hz_gb' : 'hz',
'hz_gb_2312' : 'hz',
# iso2022_jp codec
'csiso2022jp' : 'iso2022_jp',
'iso2022jp' : 'iso2022_jp',
'iso_2022_jp' : 'iso2022_jp',
# iso2022_jp_1 codec
'iso2022jp_1' : 'iso2022_jp_1',
'iso_2022_jp_1' : 'iso2022_jp_1',
# iso2022_jp_2 codec
'iso2022jp_2' : 'iso2022_jp_2',
'iso_2022_jp_2' : 'iso2022_jp_2',
# iso2022_jp_2004 codec
'iso_2022_jp_2004' : 'iso2022_jp_2004',
'iso2022jp_2004' : 'iso2022_jp_2004',
# iso2022_jp_3 codec
'iso2022jp_3' : 'iso2022_jp_3',
'iso_2022_jp_3' : 'iso2022_jp_3',
# iso2022_jp_ext codec
'iso2022jp_ext' : 'iso2022_jp_ext',
'iso_2022_jp_ext' : 'iso2022_jp_ext',
# iso2022_kr codec
'csiso2022kr' : 'iso2022_kr',
'iso2022kr' : 'iso2022_kr',
'iso_2022_kr' : 'iso2022_kr',
# iso8859_10 codec
'csisolatin6' : 'iso8859_10',
'iso_8859_10' : 'iso8859_10',
'iso_8859_10_1992' : 'iso8859_10',
'iso_ir_157' : 'iso8859_10',
'l6' : 'iso8859_10',
'latin6' : 'iso8859_10',
# iso8859_11 codec
'thai' : 'iso8859_11',
'iso_8859_11' : 'iso8859_11',
'iso_8859_11_2001' : 'iso8859_11',
# iso8859_13 codec
'iso_8859_13' : 'iso8859_13',
'l7' : 'iso8859_13',
'latin7' : 'iso8859_13',
# iso8859_14 codec
'iso_8859_14' : 'iso8859_14',
'iso_8859_14_1998' : 'iso8859_14',
'iso_celtic' : 'iso8859_14',
'iso_ir_199' : 'iso8859_14',
'l8' : 'iso8859_14',
'latin8' : 'iso8859_14',
# iso8859_15 codec
'iso_8859_15' : 'iso8859_15',
'l9' : 'iso8859_15',
'latin9' : 'iso8859_15',
# iso8859_16 codec
'iso_8859_16' : 'iso8859_16',
'iso_8859_16_2001' : 'iso8859_16',
'iso_ir_226' : 'iso8859_16',
'l10' : 'iso8859_16',
'latin10' : 'iso8859_16',
# iso8859_2 codec
'csisolatin2' : 'iso8859_2',
'iso_8859_2' : 'iso8859_2',
'iso_8859_2_1987' : 'iso8859_2',
'iso_ir_101' : 'iso8859_2',
'l2' : 'iso8859_2',
'latin2' : 'iso8859_2',
# iso8859_3 codec
'csisolatin3' : 'iso8859_3',
'iso_8859_3' : 'iso8859_3',
'iso_8859_3_1988' : 'iso8859_3',
'iso_ir_109' : 'iso8859_3',
'l3' : 'iso8859_3',
'latin3' : 'iso8859_3',
# iso8859_4 codec
'csisolatin4' : 'iso8859_4',
'iso_8859_4' : 'iso8859_4',
'iso_8859_4_1988' : 'iso8859_4',
'iso_ir_110' : 'iso8859_4',
'l4' : 'iso8859_4',
'latin4' : 'iso8859_4',
# iso8859_5 codec
'csisolatincyrillic' : 'iso8859_5',
'cyrillic' : 'iso8859_5',
'iso_8859_5' : 'iso8859_5',
'iso_8859_5_1988' : 'iso8859_5',
'iso_ir_144' : 'iso8859_5',
# iso8859_6 codec
'arabic' : 'iso8859_6',
'asmo_708' : 'iso8859_6',
'csisolatinarabic' : 'iso8859_6',
'ecma_114' : 'iso8859_6',
'iso_8859_6' : 'iso8859_6',
'iso_8859_6_1987' : 'iso8859_6',
'iso_ir_127' : 'iso8859_6',
# iso8859_7 codec
'csisolatingreek' : 'iso8859_7',
'ecma_118' : 'iso8859_7',
'elot_928' : 'iso8859_7',
'greek' : 'iso8859_7',
'greek8' : 'iso8859_7',
'iso_8859_7' : 'iso8859_7',
'iso_8859_7_1987' : 'iso8859_7',
'iso_ir_126' : 'iso8859_7',
# iso8859_8 codec
'csisolatinhebrew' : 'iso8859_8',
'hebrew' : 'iso8859_8',
'iso_8859_8' : 'iso8859_8',
'iso_8859_8_1988' : 'iso8859_8',
'iso_ir_138' : 'iso8859_8',
# iso8859_9 codec
'csisolatin5' : 'iso8859_9',
'iso_8859_9' : 'iso8859_9',
'iso_8859_9_1989' : 'iso8859_9',
'iso_ir_148' : 'iso8859_9',
'l5' : 'iso8859_9',
'latin5' : 'iso8859_9',
# johab codec
'cp1361' : 'johab',
'ms1361' : 'johab',
# koi8_r codec
'cskoi8r' : 'koi8_r',
# latin_1 codec
#
# Note that the latin_1 codec is implemented internally in C and a
# lot faster than the charmap codec iso8859_1 which uses the same
# encoding. This is why we discourage the use of the iso8859_1
# codec and alias it to latin_1 instead.
#
'8859' : 'latin_1',
'cp819' : 'latin_1',
'csisolatin1' : 'latin_1',
'ibm819' : 'latin_1',
'iso8859' : 'latin_1',
'iso8859_1' : 'latin_1',
'iso_8859_1' : 'latin_1',
'iso_8859_1_1987' : 'latin_1',
'iso_ir_100' : 'latin_1',
'l1' : 'latin_1',
'latin' : 'latin_1',
'latin1' : 'latin_1',
# mac_cyrillic codec
'maccyrillic' : 'mac_cyrillic',
# mac_greek codec
'macgreek' : 'mac_greek',
# mac_iceland codec
'maciceland' : 'mac_iceland',
# mac_latin2 codec
'maccentraleurope' : 'mac_latin2',
'maclatin2' : 'mac_latin2',
# mac_roman codec
'macroman' : 'mac_roman',
# mac_turkish codec
'macturkish' : 'mac_turkish',
# mbcs codec
'dbcs' : 'mbcs',
# ptcp154 codec
'csptcp154' : 'ptcp154',
'pt154' : 'ptcp154',
'cp154' : 'ptcp154',
'cyrillic_asian' : 'ptcp154',
# quopri_codec codec
'quopri' : 'quopri_codec',
'quoted_printable' : 'quopri_codec',
'quotedprintable' : 'quopri_codec',
# rot_13 codec
'rot13' : 'rot_13',
# shift_jis codec
'csshiftjis' : 'shift_jis',
'shiftjis' : 'shift_jis',
'sjis' : 'shift_jis',
's_jis' : 'shift_jis',
# shift_jis_2004 codec
'shiftjis2004' : 'shift_jis_2004',
'sjis_2004' : 'shift_jis_2004',
's_jis_2004' : 'shift_jis_2004',
# shift_jisx0213 codec
'shiftjisx0213' : 'shift_jisx0213',
'sjisx0213' : 'shift_jisx0213',
's_jisx0213' : 'shift_jisx0213',
# tactis codec
'tis260' : 'tactis',
# tis_620 codec
'tis620' : 'tis_620',
'tis_620_0' : 'tis_620',
'tis_620_2529_0' : 'tis_620',
'tis_620_2529_1' : 'tis_620',
'iso_ir_166' : 'tis_620',
# utf_16 codec
'u16' : 'utf_16',
'utf16' : 'utf_16',
# utf_16_be codec
'unicodebigunmarked' : 'utf_16_be',
'utf_16be' : 'utf_16_be',
# utf_16_le codec
'unicodelittleunmarked' : 'utf_16_le',
'utf_16le' : 'utf_16_le',
# utf_32 codec
'u32' : 'utf_32',
'utf32' : 'utf_32',
# utf_32_be codec
'utf_32be' : 'utf_32_be',
# utf_32_le codec
'utf_32le' : 'utf_32_le',
# utf_7 codec
'u7' : 'utf_7',
'utf7' : 'utf_7',
'unicode_1_1_utf_7' : 'utf_7',
# utf_8 codec
'u8' : 'utf_8',
'utf' : 'utf_8',
'utf8' : 'utf_8',
'utf8_ucs2' : 'utf_8',
'utf8_ucs4' : 'utf_8',
# uu_codec codec
'uu' : 'uu_codec',
# zlib_codec codec
'zip' : 'zlib_codec',
'zlib' : 'zlib_codec',
}
| mit |
neighborhoodhacker/msm-3.4 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
antoyo/qutebrowser | tests/unit/browser/webkit/network/test_schemehandler.py | 6 | 1186 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for browser.network.schemehandler."""
import pytest
from qutebrowser.browser.webkit.network import schemehandler
def test_init():
handler = schemehandler.SchemeHandler(0)
assert handler._win_id == 0
def test_create_request():
handler = schemehandler.SchemeHandler(0)
with pytest.raises(NotImplementedError):
handler.createRequest(None, None, None)
| gpl-3.0 |
qiaofuhui/zerorpc-python | zerorpc/gevent_zmq.py | 9 | 7333 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on https://github.com/traviscline/gevent-zeromq/
# We want to act like zmq
from zmq import *
# A way to access original zmq
import zmq as _zmq
import gevent.event
import gevent.core
import sys
class Context(_zmq.Context):
def socket(self, socket_type):
if self.closed:
raise _zmq.ZMQError(_zmq.ENOTSUP)
return Socket(self, socket_type)
class Socket(_zmq.Socket):
def __init__(self, context, socket_type):
super(Socket, self).__init__(context, socket_type)
on_state_changed_fd = self.getsockopt(_zmq.FD)
# NOTE: pyzmq 13.0.0 messed up with setattr (they turned it into a
# non-op) and you can't assign attributes normally anymore, hence the
# tricks with self.__dict__ here
self.__dict__["_readable"] = gevent.event.Event()
self.__dict__["_writable"] = gevent.event.Event()
try:
# gevent>=1.0
self.__dict__["_state_event"] = gevent.hub.get_hub().loop.io(
on_state_changed_fd, gevent.core.READ)
self._state_event.start(self._on_state_changed)
except AttributeError:
# gevent<1.0
self.__dict__["_state_event"] = \
gevent.core.read_event(on_state_changed_fd,
self._on_state_changed, persist=True)
def _on_state_changed(self, event=None, _evtype=None):
if self.closed:
self._writable.set()
self._readable.set()
return
events = self.getsockopt(_zmq.EVENTS)
if events & _zmq.POLLOUT:
self._writable.set()
if events & _zmq.POLLIN:
self._readable.set()
def close(self):
if not self.closed and getattr(self, '_state_event', None):
try:
# gevent>=1.0
self._state_event.stop()
except AttributeError:
# gevent<1.0
self._state_event.cancel()
super(Socket, self).close()
def send(self, data, flags=0, copy=True, track=False):
if flags & _zmq.NOBLOCK:
return super(Socket, self).send(data, flags, copy, track)
flags |= _zmq.NOBLOCK
while True:
try:
msg = super(Socket, self).send(data, flags, copy, track)
# The following call, force polling the state of the zmq socket
# (POLLIN and/or POLLOUT). It seems that a POLLIN event is often
# missed when the socket is used to send at the same time,
# forcing to poll at this exact moment seems to reduce the
# latencies when a POLLIN event is missed. The drawback is a
# reduced throughput (roughly 8.3%) in exchange of a normal
# concurrency. In other hand, without the following line, you
# loose 90% of the performances as soon as there is simultaneous
# send and recv on the socket.
self._on_state_changed()
return msg
except _zmq.ZMQError, e:
if e.errno != _zmq.EAGAIN:
raise
self._writable.clear()
# The following sleep(0) force gevent to switch out to another
# coroutine and seems to refresh the notion of time that gevent may
# have. This definitively eliminate the gevent bug that can trigger
# a timeout too soon under heavy load. In theory it will incur more
# CPU usage, but in practice it balance even with the extra CPU used
# when the timeout triggers too soon in the following loop. So for
# the same CPU load, you get a better throughput (roughly 18.75%).
gevent.sleep(0)
while not self._writable.wait(timeout=1):
if self.getsockopt(_zmq.EVENTS) & _zmq.POLLOUT:
print>>sys.stderr, "/!\\ gevent_zeromq BUG /!\\ " \
"catching up after missing event (SEND) /!\\"
break
def recv(self, flags=0, copy=True, track=False):
if flags & _zmq.NOBLOCK:
return super(Socket, self).recv(flags, copy, track)
flags |= _zmq.NOBLOCK
while True:
try:
msg = super(Socket, self).recv(flags, copy, track)
# The following call, force polling the state of the zmq socket
# (POLLIN and/or POLLOUT). It seems that a POLLOUT event is
# often missed when the socket is used to receive at the same
# time, forcing to poll at this exact moment seems to reduce the
# latencies when a POLLOUT event is missed. The drawback is a
# reduced throughput (roughly 8.3%) in exchange of a normal
# concurrency. In other hand, without the following line, you
# loose 90% of the performances as soon as there is simultaneous
# send and recv on the socket.
self._on_state_changed()
return msg
except _zmq.ZMQError, e:
if e.errno != _zmq.EAGAIN:
raise
self._readable.clear()
# The following sleep(0) force gevent to switch out to another
# coroutine and seems to refresh the notion of time that gevent may
# have. This definitively eliminate the gevent bug that can trigger
# a timeout too soon under heavy load. In theory it will incur more
# CPU usage, but in practice it balance even with the extra CPU used
# when the timeout triggers too soon in the following loop. So for
# the same CPU load, you get a better throughput (roughly 18.75%).
gevent.sleep(0)
while not self._readable.wait(timeout=1):
if self.getsockopt(_zmq.EVENTS) & _zmq.POLLIN:
print>>sys.stderr, "/!\\ gevent_zeromq BUG /!\\ " \
"catching up after missing event (RECV) /!\\"
break
| mit |
srimai/odoo | addons/portal/tests/__init__.py | 261 | 1078 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_portal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jabesq/home-assistant | tests/components/emulated_roku/test_binding.py | 11 | 2547 | """Tests for emulated_roku library bindings."""
from unittest.mock import Mock, patch
from homeassistant.components.emulated_roku.binding import EmulatedRoku, \
EVENT_ROKU_COMMAND, \
ATTR_SOURCE_NAME, ATTR_COMMAND_TYPE, ATTR_KEY, ATTR_APP_ID, \
ROKU_COMMAND_KEYPRESS, ROKU_COMMAND_KEYDOWN, \
ROKU_COMMAND_KEYUP, ROKU_COMMAND_LAUNCH
from tests.common import mock_coro_func
async def test_events_fired_properly(hass):
"""Test that events are fired correctly."""
binding = EmulatedRoku(hass, 'Test Emulated Roku',
'1.2.3.4', 8060,
None, None, None)
events = []
roku_event_handler = None
def instantiate(loop, handler,
roku_usn, host_ip, listen_port,
advertise_ip=None, advertise_port=None,
bind_multicast=None):
nonlocal roku_event_handler
roku_event_handler = handler
return Mock(start=mock_coro_func(), close=mock_coro_func())
def listener(event):
events.append(event)
with patch('emulated_roku.EmulatedRokuServer', instantiate):
hass.bus.async_listen(EVENT_ROKU_COMMAND, listener)
assert await binding.setup() is True
assert roku_event_handler is not None
roku_event_handler.on_keydown('Test Emulated Roku', 'A')
roku_event_handler.on_keyup('Test Emulated Roku', 'A')
roku_event_handler.on_keypress('Test Emulated Roku', 'C')
roku_event_handler.launch('Test Emulated Roku', '1')
await hass.async_block_till_done()
assert len(events) == 4
assert events[0].event_type == EVENT_ROKU_COMMAND
assert events[0].data[ATTR_COMMAND_TYPE] == ROKU_COMMAND_KEYDOWN
assert events[0].data[ATTR_SOURCE_NAME] == 'Test Emulated Roku'
assert events[0].data[ATTR_KEY] == 'A'
assert events[1].event_type == EVENT_ROKU_COMMAND
assert events[1].data[ATTR_COMMAND_TYPE] == ROKU_COMMAND_KEYUP
assert events[1].data[ATTR_SOURCE_NAME] == 'Test Emulated Roku'
assert events[1].data[ATTR_KEY] == 'A'
assert events[2].event_type == EVENT_ROKU_COMMAND
assert events[2].data[ATTR_COMMAND_TYPE] == ROKU_COMMAND_KEYPRESS
assert events[2].data[ATTR_SOURCE_NAME] == 'Test Emulated Roku'
assert events[2].data[ATTR_KEY] == 'C'
assert events[3].event_type == EVENT_ROKU_COMMAND
assert events[3].data[ATTR_COMMAND_TYPE] == ROKU_COMMAND_LAUNCH
assert events[3].data[ATTR_SOURCE_NAME] == 'Test Emulated Roku'
assert events[3].data[ATTR_APP_ID] == '1'
| apache-2.0 |
sperka/shogun | applications/classification/evaluate_multiclass_labels.py | 21 | 2929 | #!/usr/bin/env python
# Copyright (c) The Shogun Machine Learning Toolbox
# Written (w) 2014 Daniel Pyrathon
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the Shogun Development Team.
import argparse
import logging
import numpy as np
from modshogun import (LibSVMFile, MulticlassLabels, MulticlassAccuracy)
from utils import get_features_and_labels
LOGGER = logging.getLogger(__file__)
def parse_arguments():
parser = argparse.ArgumentParser(description="Evaluate predicted \
labels againsy bare truth")
parser.add_argument('--actual', required=True, type=str,
help='Path to LibSVM dataset.')
parser.add_argument('--predicted', required=True, type=str,
help='Path to serialized predicted labels')
return parser.parse_args()
def main(actual, predicted):
LOGGER.info("SVM Multiclass evaluator")
# Load SVMLight dataset
feats, labels = get_features_and_labels(LibSVMFile(actual))
# Load predicted labels
with open(predicted, 'r') as f:
predicted_labels_arr = np.array([float(l) for l in f])
predicted_labels = MulticlassLabels(predicted_labels_arr)
# Evaluate accuracy
multiclass_measures = MulticlassAccuracy()
LOGGER.info("Accuracy = %s" % multiclass_measures.evaluate(
labels, predicted_labels))
LOGGER.info("Confusion matrix:")
res = multiclass_measures.get_confusion_matrix(labels, predicted_labels)
print res
if __name__ == '__main__':
args = parse_arguments()
main(args.actual, args.predicted)
| gpl-3.0 |
jizhuoran/ucore | related_info/lab7/semaphore_condition/thr-ex5.py | 48 | 1705 | #coding=utf-8
#!/usr/bin/env python
import threading
import time
condition = threading.Condition()
products = 0
class Producer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global condition, products
while True:
if condition.acquire():
if products < 10:
products += 1;
print "Producer(%s):deliver one, now products:%s" %(self.name, products)
condition.notify()
else:
print "Producer(%s):already 10, stop deliver, now products:%s" %(self.name, products)
condition.wait();
condition.release()
time.sleep(2)
class Consumer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global condition, products
while True:
if condition.acquire():
if products > 1:
products -= 1
print "Consumer(%s):consume one, now products:%s" %(self.name, products)
condition.notify()
else:
print "Consumer(%s):only 1, stop consume, products:%s" %(self.name, products)
condition.wait();
condition.release()
time.sleep(2)
if __name__ == "__main__":
for p in range(0, 2):
p = Producer()
p.start()
for c in range(0, 10):
c = Consumer()
c.start()
| gpl-2.0 |
onceuponatimeforever/oh-mainline | vendor/packages/sphinx/sphinx/directives/other.py | 15 | 15215 | # -*- coding: utf-8 -*-
"""
sphinx.directives.other
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.parsers.rst.directives.misc import Class
from docutils.parsers.rst.directives.misc import Include as BaseInclude
from sphinx import addnodes
from sphinx.locale import versionlabels, _
from sphinx.util import url_re, docname_join
from sphinx.util.nodes import explicit_title_re, set_source_info, \
process_index_entry
from sphinx.util.matching import patfilter
def int_or_nothing(argument):
if not argument:
return 999
return int(argument)
class TocTree(Directive):
"""
Directive to notify Sphinx about the hierarchical structure of the docs,
and to include a table-of-contents like tree in the current document.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'maxdepth': int,
'glob': directives.flag,
'hidden': directives.flag,
'includehidden': directives.flag,
'numbered': int_or_nothing,
'titlesonly': directives.flag,
}
def run(self):
env = self.state.document.settings.env
suffix = env.config.source_suffix
glob = 'glob' in self.options
ret = []
# (title, ref) pairs, where ref may be a document, or an external link,
# and title may be None if the document's title is to be used
entries = []
includefiles = []
all_docnames = env.found_docs.copy()
# don't add the currently visited file in catch-all patterns
all_docnames.remove(env.docname)
for entry in self.content:
if not entry:
continue
if not glob:
# look for explicit titles ("Some Title <document>")
m = explicit_title_re.match(entry)
if m:
ref = m.group(2)
title = m.group(1)
docname = ref
else:
ref = docname = entry
title = None
# remove suffixes (backwards compatibility)
if docname.endswith(suffix):
docname = docname[:-len(suffix)]
# absolutize filenames
docname = docname_join(env.docname, docname)
if url_re.match(ref) or ref == 'self':
entries.append((title, ref))
elif docname not in env.found_docs:
ret.append(self.state.document.reporter.warning(
'toctree contains reference to nonexisting '
'document %r' % docname, line=self.lineno))
env.note_reread()
else:
entries.append((title, docname))
includefiles.append(docname)
else:
patname = docname_join(env.docname, entry)
docnames = sorted(patfilter(all_docnames, patname))
for docname in docnames:
all_docnames.remove(docname) # don't include it again
entries.append((None, docname))
includefiles.append(docname)
if not docnames:
ret.append(self.state.document.reporter.warning(
'toctree glob pattern %r didn\'t match any documents'
% entry, line=self.lineno))
subnode = addnodes.toctree()
subnode['parent'] = env.docname
# entries contains all entries (self references, external links etc.)
subnode['entries'] = entries
# includefiles only entries that are documents
subnode['includefiles'] = includefiles
subnode['maxdepth'] = self.options.get('maxdepth', -1)
subnode['glob'] = glob
subnode['hidden'] = 'hidden' in self.options
subnode['includehidden'] = 'includehidden' in self.options
subnode['numbered'] = self.options.get('numbered', 0)
subnode['titlesonly'] = 'titlesonly' in self.options
set_source_info(self, subnode)
wrappernode = nodes.compound(classes=['toctree-wrapper'])
wrappernode.append(subnode)
ret.append(wrappernode)
return ret
class Author(Directive):
"""
Directive to give the name of the author of the current document
or section. Shown in the output only if the show_authors option is on.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
if not env.config.show_authors:
return []
para = nodes.paragraph()
emph = nodes.emphasis()
para += emph
if self.name == 'sectionauthor':
text = _('Section author: ')
elif self.name == 'moduleauthor':
text = _('Module author: ')
elif self.name == 'codeauthor':
text = _('Code author: ')
else:
text = _('Author: ')
emph += nodes.Text(text, text)
inodes, messages = self.state.inline_text(self.arguments[0],
self.lineno)
emph.extend(inodes)
return [para] + messages
class Index(Directive):
"""
Directive to add entries to the index.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
arguments = self.arguments[0].split('\n')
env = self.state.document.settings.env
targetid = 'index-%s' % env.new_serialno('index')
targetnode = nodes.target('', '', ids=[targetid])
self.state.document.note_explicit_target(targetnode)
indexnode = addnodes.index()
indexnode['entries'] = ne = []
indexnode['inline'] = False
set_source_info(self, indexnode)
for entry in arguments:
ne.extend(process_index_entry(entry, targetid))
return [indexnode, targetnode]
class VersionChange(Directive):
"""
Directive to describe a change/addition/deprecation in a specific version.
"""
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
node = addnodes.versionmodified()
node.document = self.state.document
set_source_info(self, node)
node['type'] = self.name
node['version'] = self.arguments[0]
text = versionlabels[self.name] % self.arguments[0]
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1],
self.lineno+1)
para = nodes.paragraph(self.arguments[1], '', *inodes)
set_source_info(self, para)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content))
node[0].insert(0, nodes.inline('', '%s: ' % text))
else:
para = nodes.paragraph('', '', nodes.inline('', '%s.' % text))
node.append(para)
env = self.state.document.settings.env
# XXX should record node.source as well
env.note_versionchange(node['type'], node['version'], node, node.line)
return [node] + messages
class SeeAlso(BaseAdmonition):
"""
An admonition mentioning things to look at as reference.
"""
node_class = addnodes.seealso
class TabularColumns(Directive):
"""
Directive to give an explicit tabulary column definition to LaTeX.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
node = addnodes.tabular_col_spec()
node['spec'] = self.arguments[0]
set_source_info(self, node)
return [node]
class Centered(Directive):
"""
Directive to create a centered line of bold text.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
if not self.arguments:
return []
subnode = addnodes.centered()
inodes, messages = self.state.inline_text(self.arguments[0],
self.lineno)
subnode.extend(inodes)
return [subnode] + messages
class Acks(Directive):
"""
Directive for a list of names.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
node = addnodes.acks()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0],
nodes.bullet_list):
return [self.state.document.reporter.warning(
'.. acks content is not a list', line=self.lineno)]
return [node]
class HList(Directive):
"""
Directive for a list that gets compacted horizontally.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'columns': int,
}
def run(self):
ncolumns = self.options.get('columns', 2)
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0],
nodes.bullet_list):
return [self.state.document.reporter.warning(
'.. hlist content is not a list', line=self.lineno)]
fulllist = node.children[0]
# create a hlist node where the items are distributed
npercol, nmore = divmod(len(fulllist), ncolumns)
index = 0
newnode = addnodes.hlist()
for column in range(ncolumns):
endindex = index + (column < nmore and (npercol+1) or npercol)
col = addnodes.hlistcol()
col += nodes.bullet_list()
col[0] += fulllist.children[index:endindex]
index = endindex
newnode += col
return [newnode]
class Only(Directive):
"""
Directive to only include text if the given tag(s) are enabled.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
node = addnodes.only()
node.document = self.state.document
set_source_info(self, node)
node['expr'] = self.arguments[0]
# Same as util.nested_parse_with_titles but try to handle nested
# sections which should be raised higher up the doctree.
surrounding_title_styles = self.state.memo.title_styles
surrounding_section_level = self.state.memo.section_level
self.state.memo.title_styles = []
self.state.memo.section_level = 0
try:
self.state.nested_parse(self.content, self.content_offset,
node, match_titles=1)
title_styles = self.state.memo.title_styles
if (not surrounding_title_styles
or not title_styles
or title_styles[0] not in surrounding_title_styles
or not self.state.parent):
# No nested sections so no special handling needed.
return [node]
# Calculate the depths of the current and nested sections.
current_depth = 0
parent = self.state.parent
while parent:
current_depth += 1
parent = parent.parent
current_depth -= 2
title_style = title_styles[0]
nested_depth = len(surrounding_title_styles)
if title_style in surrounding_title_styles:
nested_depth = surrounding_title_styles.index(title_style)
# Use these depths to determine where the nested sections should
# be placed in the doctree.
n_sects_to_raise = current_depth - nested_depth + 1
parent = self.state.parent
for i in xrange(n_sects_to_raise):
if parent.parent:
parent = parent.parent
parent.append(node)
return []
finally:
self.state.memo.title_styles = surrounding_title_styles
self.state.memo.section_level = surrounding_section_level
class Include(BaseInclude):
"""
Like the standard "Include" directive, but interprets absolute paths
"correctly", i.e. relative to source directory.
"""
def run(self):
env = self.state.document.settings.env
if self.arguments[0].startswith('<') and \
self.arguments[0].endswith('>'):
# docutils "standard" includes, do not do path processing
return BaseInclude.run(self)
rel_filename, filename = env.relfn2path(self.arguments[0])
self.arguments[0] = filename
return BaseInclude.run(self)
directives.register_directive('toctree', TocTree)
directives.register_directive('sectionauthor', Author)
directives.register_directive('moduleauthor', Author)
directives.register_directive('codeauthor', Author)
directives.register_directive('index', Index)
directives.register_directive('deprecated', VersionChange)
directives.register_directive('versionadded', VersionChange)
directives.register_directive('versionchanged', VersionChange)
directives.register_directive('seealso', SeeAlso)
directives.register_directive('tabularcolumns', TabularColumns)
directives.register_directive('centered', Centered)
directives.register_directive('acks', Acks)
directives.register_directive('hlist', HList)
directives.register_directive('only', Only)
directives.register_directive('include', Include)
# register the standard rst class directive under a different name
# only for backwards compatibility now
directives.register_directive('cssclass', Class)
# new standard name when default-domain with "class" is in effect
directives.register_directive('rst-class', Class)
| agpl-3.0 |
kingvuplus/TT-gui | tools/genmetaindex.py | 155 | 1104 | # usage: genmetaindex.py <xml-files> > index.xml
import sys, os
from xml.etree.ElementTree import ElementTree, Element
root = Element("index")
for file in sys.argv[1:]:
p = ElementTree()
p.parse(file)
package = Element("package")
package.set("details", os.path.basename(file))
# we need all prerequisites
package.append(p.find("prerequisites"))
info = None
# we need some of the info, but not all
for i in p.findall("info"):
if not info:
info = i
assert info
for i in info[:]:
if i.tag not in ["name", "packagename", "packagetype", "shortdescription"]:
info.remove(i)
for i in info[:]:
package.set(i.tag, i.text)
root.append(package)
def indent(elem, level=0):
i = "\n" + level*"\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(root)
ElementTree(root).write(sys.stdout)
| gpl-2.0 |
BoGoEngine/bogo-python | doc/conf.py | 3 | 10172 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# bogo documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 22 21:36:53 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'bogo'
copyright = '2014, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bogodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bogo.tex', 'bogo Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bogo', 'bogo Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bogo', 'bogo Documentation',
'Author', 'bogo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'bogo'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'bogo'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| gpl-3.0 |
calamityman/ansible-modules-extras | monitoring/zabbix_hostmacro.py | 29 | 8815 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zabbix_hostmacro
short_description: Zabbix host macro creates/updates/deletes
description:
- manages Zabbix host macros, it can create, update or delete them.
version_added: "2.0"
author:
- "(@cave)"
- Dean Hailin Song
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
host_name:
description:
- Name of the host.
required: true
macro_name:
description:
- Name of the host macro.
required: true
macro_value:
description:
- Value of the host macro.
required: true
state:
description:
- State of the macro.
- On C(present), it will create if macro does not exist or update the macro if the associated data is different.
- On C(absent) will remove a macro if it exists.
required: false
choices: ['present', 'absent']
default: "present"
timeout:
description:
- The timeout of API request (seconds).
default: 10
'''
EXAMPLES = '''
- name: Create a new host macro or update an existing macro's value
local_action:
module: zabbix_hostmacro
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
macro_name:Example macro
macro_value:Example value
state: present
'''
import logging
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far).
class ZabbixAPIExtends(ZabbixAPI):
def __init__(self, server, timeout, user, passwd, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
class HostMacro(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# get host id by host name
def get_host_id(self, host_name):
try:
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
host_id = host_list[0]['hostid']
return host_id
except Exception, e:
self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e))
# get host macro
def get_host_macro(self, macro_name, host_id):
try:
host_macro_list = self._zapi.usermacro.get(
{"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}})
if len(host_macro_list) > 0:
return host_macro_list[0]
return None
except Exception, e:
self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e))
# create host macro
def create_host_macro(self, macro_name, macro_value, host_id):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value})
self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name)
except Exception, e:
self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e))
# update host macro
def update_host_macro(self, host_macro_obj, macro_name, macro_value):
host_macro_id = host_macro_obj['hostmacroid']
if host_macro_obj['macro'] == '{$'+macro_name+'}' and host_macro_obj['value'] == macro_value:
self._module.exit_json(changed=False, result="Host macro %s already up to date" % macro_name)
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value})
self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name)
except Exception, e:
self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e))
# delete host macro
def delete_host_macro(self, host_macro_obj, macro_name):
host_macro_id = host_macro_obj['hostmacroid']
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.delete([host_macro_id])
self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name)
except Exception, e:
self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e))
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
host_name=dict(type='str', required=True),
macro_name=dict(type='str', required=True),
macro_value=dict(type='str', required=True),
state=dict(default="present", choices=['present', 'absent']),
timeout=dict(type='int', default=10)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
host_name = module.params['host_name']
macro_name = (module.params['macro_name']).upper()
macro_value = module.params['macro_value']
state = module.params['state']
timeout = module.params['timeout']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception, e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host_macro_class_obj = HostMacro(module, zbx)
changed = False
if host_name:
host_id = host_macro_class_obj.get_host_id(host_name)
host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id)
if state == 'absent':
if not host_macro_obj:
module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name)
else:
# delete a macro
host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name)
else:
if not host_macro_obj:
# create host macro
host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id)
else:
# update host macro
host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
vityurkiv/Ox | python/TestHarness/testers/RunException.py | 23 | 1785 | from RunApp import RunApp
class RunException(RunApp):
@staticmethod
def validParams():
params = RunApp.validParams()
params.addParam('expect_err', "A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
params.addParam('expect_assert', "DEBUG MODE ONLY: A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
params.addParam('should_crash', True, "Inidicates that the test is expected to crash or otherwise terminate early")
# Printing errors in parallel often intertwine when multiple processors receive the same error. We will set max_parallel = 1 by default, but it can be overridden
params['max_parallel'] = 1
return params
def __init__(self, name, params):
RunApp.__init__(self, name, params)
def checkRunnable(self, options):
if options.enable_recover:
reason = 'skipped (RunException RECOVER)'
return (False, reason)
return RunApp.checkRunnable(self, options)
def processResults(self, moose_dir, retcode, options, output):
reason = ''
specs = self.specs
# Expected errors and assertions might do a lot of things including crash so we
# will handle them seperately
if specs.isValid('expect_err'):
if not self.checkOutputForPattern(output, specs['expect_err']):
reason = 'NO EXPECTED ERR'
elif specs.isValid('expect_assert'):
if options.method == 'dbg': # Only check asserts in debug mode
if not self.checkOutputForPattern(output, specs['expect_assert']):
reason = 'NO EXPECTED ASSERT'
if reason == '':
(reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)
return (reason, output)
| lgpl-2.1 |
Yong-Lee/django | django/utils/lru_cache.py | 94 | 7648 | try:
from functools import lru_cache
except ImportError:
# backport of Python's 3.3 lru_cache, written by Raymond Hettinger and
# licensed under MIT license, from:
# <http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/>
# Should be removed when Django only supports Python 3.2 and above.
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: https://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| bsd-3-clause |
youfoh/webkit-efl | Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py | 43 | 5176 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base stream class.
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
from mod_pywebsocket import util
# Exceptions
class ConnectionTerminatedException(Exception):
"""This exception will be raised when a connection is terminated
unexpectedly.
"""
pass
class InvalidFrameException(ConnectionTerminatedException):
"""This exception will be raised when we received an invalid frame we
cannot parse.
"""
pass
class BadOperationException(Exception):
"""This exception will be raised when send_message() is called on
server-terminated connection or receive_message() is called on
client-terminated connection.
"""
pass
class UnsupportedFrameException(Exception):
"""This exception will be raised when we receive a frame with flag, opcode
we cannot handle. Handlers can just catch and ignore this exception and
call receive_message() again to continue processing the next frame.
"""
pass
class InvalidUTF8Exception(Exception):
"""This exception will be raised when we receive a text frame which
contains invalid UTF-8 strings.
"""
pass
class StreamBase(object):
"""Base stream class."""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
self._logger = util.get_class_logger(self)
self._request = request
def _read(self, length):
"""Reads length bytes from connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
bytes = self._request.connection.read(length)
if not bytes:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Peer (%r) closed connection' %
(length, (self._request.connection.remote_addr,)))
return bytes
def _write(self, bytes):
"""Writes given bytes to connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
"""
try:
self._request.connection.write(bytes)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._request.connection.remote_addr,),
e)
raise
def receive_bytes(self, length):
"""Receives multiple bytes. Retries read when we couldn't receive the
specified amount.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
bytes = []
while length > 0:
new_bytes = self._read(length)
bytes.append(new_bytes)
length -= len(new_bytes)
return ''.join(bytes)
def _read_until(self, delim_char):
"""Reads bytes until we encounter delim_char. The result will not
contain delim_char.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
bytes = []
while True:
ch = self._read(1)
if ch == delim_char:
break
bytes.append(ch)
return ''.join(bytes)
# vi:sts=4 sw=4 et
| lgpl-2.1 |
dalf/searx | searx/metrics/error_recorder.py | 1 | 5795 | import typing
import inspect
from json import JSONDecodeError
from urllib.parse import urlparse
from httpx import HTTPError, HTTPStatusError
from searx.exceptions import (SearxXPathSyntaxException, SearxEngineXPathException, SearxEngineAPIException,
SearxEngineAccessDeniedException)
from searx import logger, searx_parent_dir
errors_per_engines = {}
class ErrorContext:
__slots__ = ('filename', 'function', 'line_no', 'code', 'exception_classname',
'log_message', 'log_parameters', 'secondary')
def __init__(self, filename, function, line_no, code, exception_classname, log_message, log_parameters, secondary):
self.filename = filename
self.function = function
self.line_no = line_no
self.code = code
self.exception_classname = exception_classname
self.log_message = log_message
self.log_parameters = log_parameters
self.secondary = secondary
def __eq__(self, o) -> bool:
if not isinstance(o, ErrorContext):
return False
return self.filename == o.filename and self.function == o.function and self.line_no == o.line_no\
and self.code == o.code and self.exception_classname == o.exception_classname\
and self.log_message == o.log_message and self.log_parameters == o.log_parameters \
and self.secondary == o.secondary
def __hash__(self):
return hash((self.filename, self.function, self.line_no, self.code, self.exception_classname, self.log_message,
self.log_parameters, self.secondary))
def __repr__(self):
return "ErrorContext({!r}, {!r}, {!r}, {!r}, {!r}, {!r}) {!r}".\
format(self.filename, self.line_no, self.code, self.exception_classname, self.log_message,
self.log_parameters, self.secondary)
def add_error_context(engine_name: str, error_context: ErrorContext) -> None:
errors_for_engine = errors_per_engines.setdefault(engine_name, {})
errors_for_engine[error_context] = errors_for_engine.get(error_context, 0) + 1
logger.debug('%s: %s', engine_name, str(error_context))
def get_trace(traces):
for trace in reversed(traces):
split_filename = trace.filename.split('/')
if '/'.join(split_filename[-3:-1]) == 'searx/engines':
return trace
if '/'.join(split_filename[-4:-1]) == 'searx/search/processors':
return trace
return traces[-1]
def get_hostname(exc: HTTPError) -> typing.Optional[None]:
url = exc.request.url
if url is None and exc.response is not None:
url = exc.response.url
return urlparse(url).netloc
def get_request_exception_messages(exc: HTTPError)\
-> typing.Tuple[typing.Optional[str], typing.Optional[str], typing.Optional[str]]:
url = None
status_code = None
reason = None
hostname = None
if hasattr(exc, 'request') and exc.request is not None:
url = exc.request.url
if url is None and hasattr(exc, 'response') and exc.respones is not None:
url = exc.response.url
if url is not None:
hostname = url.host
if isinstance(exc, HTTPStatusError):
status_code = str(exc.response.status_code)
reason = exc.response.reason_phrase
return (status_code, reason, hostname)
def get_messages(exc, filename) -> typing.Tuple:
if isinstance(exc, JSONDecodeError):
return (exc.msg, )
if isinstance(exc, TypeError):
return (str(exc), )
if isinstance(exc, ValueError) and 'lxml' in filename:
return (str(exc), )
if isinstance(exc, HTTPError):
return get_request_exception_messages(exc)
if isinstance(exc, SearxXPathSyntaxException):
return (exc.xpath_str, exc.message)
if isinstance(exc, SearxEngineXPathException):
return (exc.xpath_str, exc.message)
if isinstance(exc, SearxEngineAPIException):
return (str(exc.args[0]), )
if isinstance(exc, SearxEngineAccessDeniedException):
return (exc.message, )
return ()
def get_exception_classname(exc: Exception) -> str:
exc_class = exc.__class__
exc_name = exc_class.__qualname__
exc_module = exc_class.__module__
if exc_module is None or exc_module == str.__class__.__module__:
return exc_name
return exc_module + '.' + exc_name
def get_error_context(framerecords, exception_classname, log_message, log_parameters, secondary) -> ErrorContext:
searx_frame = get_trace(framerecords)
filename = searx_frame.filename
if filename.startswith(searx_parent_dir):
filename = filename[len(searx_parent_dir) + 1:]
function = searx_frame.function
line_no = searx_frame.lineno
code = searx_frame.code_context[0].strip()
del framerecords
return ErrorContext(filename, function, line_no, code, exception_classname, log_message, log_parameters, secondary)
def count_exception(engine_name: str, exc: Exception, secondary: bool = False) -> None:
framerecords = inspect.trace()
try:
exception_classname = get_exception_classname(exc)
log_parameters = get_messages(exc, framerecords[-1][1])
error_context = get_error_context(framerecords, exception_classname, None, log_parameters, secondary)
add_error_context(engine_name, error_context)
finally:
del framerecords
def count_error(engine_name: str, log_message: str, log_parameters: typing.Optional[typing.Tuple] = None,
secondary: bool = False) -> None:
framerecords = list(reversed(inspect.stack()[1:]))
try:
error_context = get_error_context(framerecords, None, log_message, log_parameters or (), secondary)
add_error_context(engine_name, error_context)
finally:
del framerecords
| agpl-3.0 |
devaha/loblolly | wp-content/plugins/wp-statistics/vendor/guzzle/guzzle/docs/conf.py | 469 | 3047 | import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# -- General configuration -----------------------------------------------------
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Guzzle'
copyright = u'2012, Michael Dowling'
version = '3.0.0'
release = '3.0.0'
exclude_patterns = ['_build']
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Guzzle documentation"
html_short_title = "Guzzle"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'leftbar.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Guzzledoc'
# -- Guzzle Sphinx theme setup ------------------------------------------------
sys.path.insert(0, '/Users/dowling/projects/guzzle_sphinx_theme')
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
"index_template": "index.html",
"project_nav_name": "Guzzle",
"github_user": "guzzle",
"github_repo": "guzzle",
"disqus_comments_shortname": "guzzle",
"google_analytics_account": "UA-22752917-1"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Guzzle.tex', u'Guzzle Documentation',
u'Michael Dowling', 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'guzzle', u'Guzzle Documentation',
[u'Michael Dowling'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Guzzle', u'Guzzle Documentation',
u'Michael Dowling', 'Guzzle', 'One line description of project.',
'Miscellaneous'),
]
| gpl-2.0 |
abzaloid/maps | django-project/lib/python2.7/site-packages/django/contrib/admin/filters.py | 95 | 17367 | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import (
get_limit_choices_to_from_path, get_model_from_relation,
prepare_lookup_value, reverse_field_path,
)
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.utils import timezone
from django.utils.encoding import force_text, smart_text
from django.utils.translation import ugettext_lazy as _
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name, None)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.lookup_choices = self.field_choices(field, request, model_admin)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (isinstance(self.field, ForeignObjectRel) and
self.field.field.null or hasattr(self.field, 'rel') and
self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def field_choices(self, field, request, model_admin):
return field.get_choices(include_blank=False)
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, ForeignObjectRel) and
(self.field.field.null or isinstance(self.field.field, ManyToManyField)) or
hasattr(self.field, 'rel') and (self.field.null or isinstance(self.field, ManyToManyField))):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: (
bool(f.rel) if hasattr(f, 'rel') else
isinstance(f, ForeignObjectRel)), RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(BooleanFieldListFilter, self).__init__(field,
request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f,
(models.BooleanField, models.NullBooleanField)), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_text(lookup) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup}),
'display': title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = {k: v for k, v in params.items()
if k.startswith(self.field_generic)}
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull,
None)
parent_model, reverse_path = reverse_field_path(model, field_path)
# Obey parent ModelAdmin queryset when deciding which options to show
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': (self.lookup_val is None
and self.lookup_val_isnull is None),
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_val == val,
'query_string': cl.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
class RelatedOnlyFieldListFilter(RelatedFieldListFilter):
def field_choices(self, field, request, model_admin):
limit_choices_to = {'pk__in': set(model_admin.get_queryset(request).values_list(field.name, flat=True))}
return field.get_choices(include_blank=False, limit_choices_to=limit_choices_to)
| mit |
eBay/restcommander | play-1.2.4/python/Lib/xml/dom/domreg.py | 85 | 3484 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
from xml.dom.minicompat import * # isinstance, StringTypes
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name = None, features = ()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif os.environ.has_key("PYTHON_DOM"):
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, StringTypes):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except StandardError: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError,"no suitable DOM implementation found"
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError, "bad feature name: %r" % (feature,)
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| apache-2.0 |
jlowdermilk/kubernetes | translations/extract.py | 377 | 3965 | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract strings from command files and externalize into translation files.
Expects to be run from the root directory of the repository.
Usage:
extract.py pkg/kubectl/cmd/apply.go
"""
import fileinput
import sys
import re
class MatchHandler(object):
""" Simple holder for a regular expression and a function
to run if that regular expression matches a line.
The function should expect (re.match, file, linenumber) as parameters
"""
def __init__(self, regex, replace_fn):
self.regex = re.compile(regex)
self.replace_fn = replace_fn
def short_replace(match, file, line_number):
"""Replace a Short: ... cobra command description with an internationalization
"""
sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2)))
SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace)
def import_replace(match, file, line_number):
"""Add an extra import for the i18n library.
Doesn't try to be smart and detect if it's already present, assumes a
gofmt round wil fix things.
"""
sys.stdout.write('{}\n"k8s.io/kubernetes/pkg/util/i18n"\n'.format(match.group(1)))
IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubernetes/pkg/kubectl/cmd/util")', import_replace)
def string_flag_replace(match, file, line_number):
"""Replace a cmd.Flags().String("...", "", "...") with an internationalization
"""
sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2)))
STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace)
def long_string_replace(match, file, line_number):
return '{}i18n.T({}){}'.format(match.group(1), match.group(2), match.group(3))
LONG_DESC_MATCH = MatchHandler('(LongDesc\()(`[^`]+`)([^\n]\n)', long_string_replace)
EXAMPLE_MATCH = MatchHandler('(Examples\()(`[^`]+`)([^\n]\n)', long_string_replace)
def replace(filename, matchers, multiline_matchers):
"""Given a file and a set of matchers, run those matchers
across the file and replace it with the results.
"""
# Run all the matchers
line_number = 0
for line in fileinput.input(filename, inplace=True):
line_number += 1
matched = False
for matcher in matchers:
match = matcher.regex.match(line)
if match:
matcher.replace_fn(match, filename, line_number)
matched = True
break
if not matched:
sys.stdout.write(line)
sys.stdout.flush()
with open(filename, 'r') as datafile:
content = datafile.read()
for matcher in multiline_matchers:
match = matcher.regex.search(content)
while match:
rep = matcher.replace_fn(match, filename, 0)
# Escape back references in the replacement string
# (And escape for Python)
# (And escape for regex)
rep = re.sub('\\\\(\\d)', '\\\\\\\\\\1', rep)
content = matcher.regex.sub(rep, content, 1)
match = matcher.regex.search(content)
sys.stdout.write(content)
# gofmt the file again
from subprocess import call
call(["goimports", "-w", filename])
replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH], [LONG_DESC_MATCH, EXAMPLE_MATCH])
| apache-2.0 |
dmuhlhei/AliPhysics | PWGJE/EMCALJetTasks/__init__.py | 123 | 1064 | #**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
| bsd-3-clause |
peterayeni/libforensics | code/lf/dtypes/composite.py | 13 | 7216 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Data types composed of primitive data types."""
# stdlib imports
from operator import itemgetter
from collections import OrderedDict
from ctypes import (
LittleEndianStructure, BigEndianStructure, c_uint8
)
# local imports
from lf.dtypes.base import Primitive
from lf.dtypes.consts import BIG_ENDIAN, LITTLE_ENDIAN
from lf.dtypes.bits import BitType
__docformat__ = "restructuredtext en"
__all__ = [
"Composite", "Record", "LERecord", "BERecord"
]
class MetaRecord(type):
"""Metaclass to build _fields_ attribute of Record data types"""
@classmethod
def __prepare__(metacls, name, bases):
"""Makes a Composite's dict an OrderedDict"""
return OrderedDict()
# end class __prepare__
def __new__(cls, name, bases, clsdict):
"""Builds the _fields_ attribute"""
new_cls = type.__new__(cls, name, bases, clsdict)
new_clsdict = new_cls.__dict__
fields = new_clsdict.get("_fields_")
mro = new_cls.__mro__
if fields is None:
fields = list()
klsdicts = [klass.__dict__ for klass in mro]
# Find the most recent _fields_, since it should contain every
# _fields_ before it.
for klsdict in klsdicts:
_fields_ = klsdict.get("_fields_")
if _fields_:
break
# end if
else:
_fields_ = list()
# end for
fields.extend(_fields_)
for (key, value) in clsdict.items():
if not key.startswith("_"):
fields.append((key, value))
# end if
# end for
new_cls._fields_ = fields
# end if
return new_cls
# end def __new__
def __init__(cls, name, bases, clsdict):
"""Makes the _ctype_ and _size_ attributes"""
# This we *have* to have
fields = cls._fields_
byte_order = cls._byte_order_
ctype = clsdict.get("_ctype_")
size = clsdict.get("_size_")
if ctype is None:
my_field_names = map(itemgetter(0), fields)
ctypes_fields = list()
for (field_name, field) in fields:
if isinstance(field, list):
ctype = field[0]._ctype_ * len(field)
ctypes_fields.append((field_name, ctype))
elif hasattr(field, "_int_type_"):
for(bname, bfield) in field._fields_:
if bname in my_field_names:
raise ValueError(
"Duplicate field name {0}".format(bname)
)
# end if
ctypes_fields.append(
(bname, field._int_type_, bfield._size_)
)
# end for
else:
ctypes_fields.append((field_name, field._ctype_))
# end if
# end for
anonymous = clsdict.get("_anonymous_")
if anonymous is None:
anonymous = []
# end if
pack = clsdict.get("_pack_")
if pack is None:
pack = 1
# end if
ctype_name = clsdict.get("_ctype_name_")
if ctype_name is None:
ctype_name = "".join(["__ctype_", name])
# end if
ctypes_dict = {
"_fields_": ctypes_fields,
"_pack_": pack,
"_anonymous_": anonymous
}
if byte_order == LITTLE_ENDIAN:
ctypes_bases = (LittleEndianStructure,)
else:
ctypes_bases = (BigEndianStructure,)
# end if
cls._ctype_ = type(ctype_name, ctypes_bases, ctypes_dict)
# end if
if size is None:
size = 0
for (field_name, field) in fields:
if isinstance(field, list):
size += field[0]._size_ * len(field)
else:
size += field._size_
# end if
# end for
cls._size_ = size
# end if
# end def __init__
# end class MetaRecord
class Composite(Primitive):
"""Base class for data types that can be composed of data types.
Since this is a :class:`Primitive` class, subclasses can be used to both
compose data types, as well as be composed of other data types.
Fields are implemented as class attributes. For instance:
>>> from lf.dtypes import LERecord, int8, uint8
>>> class SomeStruct(LERecord):
... field1 = int8
... field2 = uint8
...
>>>
Will create a class called SomeStruct, with two fields called field1 and
field2.
Composite objects can also inherit from each other, adding the new fields
to the old ones. Continuing the previous example:
>>> class AnotherStruct(SomeStruct):
... field3 = uint8
...
>>>
Will create a class called AnotherStruct, with three fields called field1,
field2, and field 3.
.. attribute:: _fields_
A list of (field name, ctype object) tuples. If this is None, it is
created automatically by the metaclass.
.. attribute:: _byte_order_
The byte ordering to use (:const:`LITTLE_ENDIAN` or
:const:`BIG_ENDIAN`)
.. attribute:: _pack_
The _pack_ attribute used when creating the :attr:`_ctype_` attribute.
The default is 1.
.. attribute:: _anonymous_
The value of the _anonymous_ attribute used when creating the
:attr:`_ctype_` attribute.
.. attribute:: _ctype_name_
The name to use for the :attr:`_ctype_` attribute. If this is not
specified, a name is autogenerated by a metaclass, based on the class
name.
"""
_fields_ = None
_pack_ = 1
_byte_order_ = LITTLE_ENDIAN
# end class Composite
class Record(Composite, metaclass=MetaRecord):
"""Base class for creating record data types."""
pass
# end class Record
class LERecord(Record):
"""Class for creating little endian record data types."""
_byte_order_ = LITTLE_ENDIAN
# end class LERecord
class BERecord(Record):
"""Class for creating big endian record data types."""
_byte_order_ = BIG_ENDIAN
# end class BERecord
| lgpl-3.0 |
linjoahow/W16_test1 | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/base.py | 603 | 4652 | #!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
| gpl-3.0 |
projectatomic/atomic-reactor | tests/utils/test_koji.py | 1 | 14193 | """
Copyright (c) 2016, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import, print_function, unicode_literals
import koji
import atomic_reactor.utils.koji as koji_util
from osbs.repo_utils import ModuleSpec
from atomic_reactor.utils.koji import (koji_login, create_koji_session,
TaskWatcher, tag_koji_build,
get_koji_module_build, KojiUploadLogger)
from atomic_reactor.plugin import BuildCanceledException
from atomic_reactor.constants import (KOJI_MAX_RETRIES, KOJI_RETRY_INTERVAL,
KOJI_OFFLINE_RETRY_INTERVAL)
import flexmock
import pytest
KOJI_RETRY_OPTS = {'anon_retry': True, 'max_retries': KOJI_MAX_RETRIES,
'retry_interval': KOJI_RETRY_INTERVAL, 'offline_retry': True,
'offline_retry_interval': KOJI_OFFLINE_RETRY_INTERVAL}
class TestKojiLogin(object):
@pytest.mark.parametrize('proxyuser', [None, 'proxy'])
def test_koji_login_krb_keyring(self, proxyuser):
session = flexmock()
expectation = session.should_receive('krb_login').once().and_return(True)
kwargs = {}
if proxyuser is not None:
expectation.with_args(proxyuser=proxyuser)
kwargs['proxyuser'] = proxyuser
else:
expectation.with_args()
koji_login(session, **kwargs)
@pytest.mark.parametrize('proxyuser', [None, 'proxy'])
def test_koji_login_krb_keytab(self, proxyuser):
session = flexmock()
expectation = session.should_receive('krb_login').once().and_return(True)
principal = 'user'
keytab = '/keytab'
call_kwargs = {
'krb_principal': principal,
'krb_keytab': keytab,
}
exp_kwargs = {
'principal': principal,
'keytab': keytab,
}
if proxyuser is not None:
call_kwargs['proxyuser'] = proxyuser
exp_kwargs['proxyuser'] = proxyuser
expectation.with_args(**exp_kwargs)
koji_login(session, **call_kwargs)
@pytest.mark.parametrize('proxyuser', [None, 'proxy'])
@pytest.mark.parametrize('serverca', [True, False])
def test_koji_login_ssl(self, tmpdir, proxyuser, serverca):
session = flexmock()
expectation = session.should_receive('ssl_login').once().and_return(True)
call_kwargs = {
'ssl_certs_dir': str(tmpdir),
}
exp_kwargs = {
'cert': str(tmpdir.join('cert')),
'ca': None,
}
if serverca:
serverca = tmpdir.join('serverca')
serverca.write('spam')
exp_kwargs['serverca'] = str(serverca)
if proxyuser:
call_kwargs['proxyuser'] = proxyuser
exp_kwargs['proxyuser'] = proxyuser
expectation.with_args(**exp_kwargs)
koji_login(session, **call_kwargs)
class TestCreateKojiSession(object):
def test_create_simple_session(self):
url = 'https://example.com'
session = flexmock()
opts = {'krb_rdns': False, 'use_fast_upload': True}
opts.update(KOJI_RETRY_OPTS)
(flexmock(koji_util.koji).should_receive('ClientSession').with_args(
url, opts=opts).and_return(session))
assert create_koji_session(url) == session
@pytest.mark.parametrize(('ssl_session'), [
(True, False),
])
def test_create_authenticated_session(self, tmpdir, ssl_session):
url = 'https://example.com'
args = {}
session = flexmock()
if ssl_session:
args['ssl_certs_dir'] = str(tmpdir)
session.should_receive('ssl_login').once().and_return(True)
else:
session.should_receive('krb_login').once().and_return(True)
opts = {'krb_rdns': False, 'use_fast_upload': True}
opts.update(KOJI_RETRY_OPTS)
(flexmock(koji_util.koji).should_receive('ClientSession').with_args(
url, opts=opts).and_return(session))
assert create_koji_session(url, args) == session
@pytest.mark.parametrize(('ssl_session'), [
(True, False),
])
def test_fail_authenticated_session(self, tmpdir, ssl_session):
url = 'https://example.com'
args = {}
session = flexmock()
if ssl_session:
args['ssl_certs_dir'] = str(tmpdir)
session.should_receive('ssl_login').once().and_return(False)
else:
session.should_receive('krb_login').once().and_return(False)
opts = {'krb_rdns': False, 'use_fast_upload': True}
opts.update(KOJI_RETRY_OPTS)
(flexmock(koji_util.koji).should_receive('ClientSession').with_args(
url, opts=opts).and_return(session))
with pytest.raises(RuntimeError):
create_koji_session(url, args)
class TestStreamTaskOutput(object):
def test_output_as_generator(self):
contents = 'this is the simulated file contents'
session = flexmock()
expectation = session.should_receive('downloadTaskOutput')
for chunk in contents:
expectation = expectation.and_return(chunk)
# Empty content to simulate end of stream.
expectation.and_return('')
streamer = koji_util.stream_task_output(session, 123, 'file.ext')
assert ''.join(list(streamer)) == contents
class TestTaskWatcher(object):
@pytest.mark.parametrize(('finished', 'info', 'exp_state', 'exp_failed'), [
([False, False, True],
{'state': koji.TASK_STATES['CANCELED']},
'CANCELED', True),
([False, True],
{'state': koji.TASK_STATES['FAILED']},
'FAILED', True),
([True],
{'state': koji.TASK_STATES['CLOSED']},
'CLOSED', False),
])
def test_wait(self, finished, info, exp_state, exp_failed):
session = flexmock()
task_id = 1234
task_finished = (session.should_receive('taskFinished')
.with_args(task_id))
for finished_value in finished:
task_finished = task_finished.and_return(finished_value)
(session.should_receive('getTaskInfo')
.with_args(task_id, request=True)
.once()
.and_return(info))
task = TaskWatcher(session, task_id, poll_interval=0)
assert task.wait() == exp_state
assert task.failed() == exp_failed
def test_cancel(self):
session = flexmock()
task_id = 1234
(session
.should_receive('taskFinished')
.with_args(task_id)
.and_raise(BuildCanceledException))
task = TaskWatcher(session, task_id, poll_interval=0)
with pytest.raises(BuildCanceledException):
task.wait()
assert task.failed()
class TestTagKojiBuild(object):
@pytest.mark.parametrize(('task_state', 'failure'), (
('CLOSED', False),
('CANCELED', True),
('FAILED', True),
))
def test_tagging(self, task_state, failure):
session = flexmock()
task_id = 9876
build_id = 1234
target_name = 'target'
tag_name = 'images-candidate'
target_info = {'dest_tag_name': tag_name}
task_info = {'state': koji.TASK_STATES[task_state]}
(session
.should_receive('getBuildTarget')
.with_args(target_name)
.and_return(target_info))
(session
.should_receive('tagBuild')
.with_args(tag_name, build_id)
.and_return(task_id))
(session
.should_receive('taskFinished')
.with_args(task_id)
.and_return(True))
(session
.should_receive('getTaskInfo')
.with_args(task_id, request=True)
.and_return(task_info))
if failure:
with pytest.raises(RuntimeError):
tag_koji_build(session, build_id, target_name)
else:
build_tag = tag_koji_build(session, build_id, target_name)
assert build_tag == tag_name
class TestGetKojiModuleBuild(object):
def mock_get_rpms(self, session):
(session
.should_receive('listArchives')
.with_args(buildID=1138198)
.once()
.and_return(
[{'btype': 'module',
'build_id': 1138198,
'filename': 'modulemd.txt',
'id': 147879},
{'btype': 'module',
'build_id': 1138198,
'filename': 'modulemd.x86_64.txt',
'id': 147880}]))
(session
.should_receive('listRPMs')
.with_args(imageID=147879)
.once()
.and_return([
{'arch': 'src',
'epoch': None,
'id': 15197182,
'name': 'eog',
'release': '1.module_2123+73a9ef6f',
'version': '3.28.3'},
{'arch': 'x86_64',
'epoch': None,
'id': 15197187,
'metadata_only': False,
'name': 'eog',
'release': '1.module_2123+73a9ef6f',
'version': '3.28.3'},
{'arch': 'ppc64le',
'epoch': None,
'id': 15197188,
'metadata_only': False,
'name': 'eog',
'release': '1.module_2123+73a9ef6f',
'version': '3.28.3'},
]))
def test_with_context(self):
module = 'eog:my-stream:20180821163756:775baa8e'
module_koji_nvr = 'eog-my_stream-20180821163756.775baa8e'
koji_return = {
'build_id': 1138198,
'name': 'eog',
'version': 'my_stream',
'release': '20180821163756.775baa8e',
'extra': {
'typeinfo': {
'module': {
'modulemd_str': 'document: modulemd\nversion: 2'
}
}
}
}
spec = ModuleSpec.from_str(module)
session = flexmock()
(session
.should_receive('getBuild')
.with_args(module_koji_nvr)
.and_return(koji_return))
self.mock_get_rpms(session)
get_koji_module_build(session, spec)
# CLOUDBLD-876
def test_with_context_without_build(self):
module = 'eog:my-stream:20180821163756:775baa8e'
module_koji_nvr = 'eog-my_stream-20180821163756.775baa8e'
koji_return = None
spec = ModuleSpec.from_str(module)
session = flexmock()
(session
.should_receive('getBuild')
.with_args(module_koji_nvr)
.and_return(koji_return))
with pytest.raises(Exception) as e:
get_koji_module_build(session, spec)
assert 'No build found' in str(e.value)
@pytest.mark.parametrize(('koji_return', 'should_raise'), [
([{
'build_id': 1138198,
'name': 'eog',
'version': 'master',
'release': '20180821163756.775baa8e',
'extra': {
'typeinfo': {
'module': {
'modulemd_str': 'document: modulemd\nversion: 2'
}
}
}
}], None),
([], "No build found for"),
([{
'build_id': 1138198,
'name': 'eog',
'version': 'master',
'release': '20180821163756.775baa8e',
},
{
'build_id': 1138199,
'name': 'eog',
'version': 'master',
'release': '20180821163756.88888888',
}],
"Multiple builds found for"),
])
def test_without_context(self, koji_return, should_raise):
module = 'eog:master:20180821163756'
spec = ModuleSpec.from_str(module)
session = flexmock()
(session
.should_receive('getPackageID')
.with_args('eog')
.and_return(303))
(session
.should_receive('listBuilds')
.with_args(packageID=303,
type='module',
state=koji.BUILD_STATES['COMPLETE'])
.and_return(koji_return))
if should_raise:
with pytest.raises(Exception) as e:
get_koji_module_build(session, spec)
assert should_raise in str(e.value)
else:
self.mock_get_rpms(session)
get_koji_module_build(session, spec)
class TestKojiUploadLogger(object):
@pytest.mark.parametrize('totalsize', [0, 1024])
def test_with_zero(self, totalsize):
logger = flexmock()
logger.should_receive('debug').once()
upload_logger = KojiUploadLogger(logger)
upload_logger.callback(0, totalsize, 0, 0, 0)
@pytest.mark.parametrize(('totalsize', 'step', 'expected_times'), [
(10, 1, 11),
(12, 1, 7),
(12, 3, 5),
])
def test_with_defaults(self, totalsize, step, expected_times):
logger = flexmock()
logger.should_receive('debug').times(expected_times)
upload_logger = KojiUploadLogger(logger)
upload_logger.callback(0, totalsize, 0, 0, 0)
for offset in range(step, totalsize + step, step):
upload_logger.callback(offset, totalsize, step, 1.0, 1.0)
@pytest.mark.parametrize(('totalsize', 'step', 'notable', 'expected_times'), [
(10, 1, 10, 11),
(10, 1, 20, 6),
(10, 1, 25, 5),
(12, 3, 25, 5),
])
def test_with_notable(self, totalsize, step, notable, expected_times):
logger = flexmock()
logger.should_receive('debug').times(expected_times)
upload_logger = KojiUploadLogger(logger, notable_percent=notable)
for offset in range(0, totalsize + step, step):
upload_logger.callback(offset, totalsize, step, 1.0, 1.0)
| bsd-3-clause |
javierhuerta/unach-photo-server | docs/conf.py | 1 | 8224 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import unach_photo_server
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'unach_photo_server'
copyright = u'2017, Javier Huerta'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = unach_photo_server.__version__
# The full version, including alpha/beta/rc tags.
release = unach_photo_server.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'unach-photo-serverdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'unach-photo-server.tex', u'unach_photo_server Documentation',
u'Javier Huerta', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'unach-photo-server', u'unach_photo_server Documentation',
[u'Javier Huerta'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'unach-photo-server', u'unach_photo_server Documentation',
u'Javier Huerta', 'unach-photo-server', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
popazerty/enigma2cuberevo | lib/python/Components/Converter/ServicePosition.py | 11 | 3466 | from Converter import Converter
from Poll import Poll
from enigma import iPlayableService
from Components.Element import cached, ElementError
class ServicePosition(Poll, Converter, object):
TYPE_LENGTH = 0
TYPE_POSITION = 1
TYPE_REMAINING = 2
TYPE_GAUGE = 3
def __init__(self, type):
Poll.__init__(self)
Converter.__init__(self, type)
args = type.split(',')
type = args.pop(0)
self.negate = 'Negate' in args
self.detailed = 'Detailed' in args
self.showHours = 'ShowHours' in args
self.showNoSeconds = 'ShowNoSeconds' in args
if type == "Length":
self.type = self.TYPE_LENGTH
elif type == "Position":
self.type = self.TYPE_POSITION
elif type == "Remaining":
self.type = self.TYPE_REMAINING
elif type == "Gauge":
self.type = self.TYPE_GAUGE
else:
raise ElementError("type must be {Length|Position|Remaining|Gauge} with optional arguments {Negate|Detailed|ShowHours|ShowNoSeconds} for ServicePosition converter")
if self.detailed:
self.poll_interval = 100
elif self.type == self.TYPE_LENGTH:
self.poll_interval = 2000
else:
self.poll_interval = 500
self.poll_enabled = True
def getSeek(self):
s = self.source.service
return s and s.seek()
@cached
def getPosition(self):
seek = self.getSeek()
if seek is None:
return None
pos = seek.getPlayPosition()
if pos[0]:
return 0
return pos[1]
@cached
def getLength(self):
seek = self.getSeek()
if seek is None:
return None
length = seek.getLength()
if length[0]:
return 0
return length[1]
@cached
def getCutlist(self):
service = self.source.service
cue = service and service.cueSheet()
return cue and cue.getCutList()
@cached
def getText(self):
seek = self.getSeek()
if seek is None:
return ""
else:
if self.type == self.TYPE_LENGTH:
l = self.length
elif self.type == self.TYPE_POSITION:
l = self.position
elif self.type == self.TYPE_REMAINING:
l = self.length - self.position
if not self.detailed:
l /= 90000
if self.negate: l = -l
if l > 0:
sign = ""
else:
l = -l
sign = "-"
if not self.detailed:
if self.showHours:
if self.showNoSeconds:
return sign + "%d:%02d" % (l/3600, l%3600/60)
else:
return sign + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
else:
if self.showNoSeconds:
return sign + "%d" % (l/60)
else:
return sign + "%d:%02d" % (l/60, l%60)
else:
if self.showHours:
return sign + "%d:%02d:%02d:%03d" % ((l/3600/90000), (l/90000)%3600/60, (l/90000)%60, (l%90000)/90)
else:
return sign + "%d:%02d:%03d" % ((l/60/90000), (l/90000)%60, (l%90000)/90)
# range/value are for the Progress renderer
range = 10000
@cached
def getValue(self):
pos = self.position
len = self.length
if pos is None or len is None or len <= 0:
return None
return pos * 10000 / len
position = property(getPosition)
length = property(getLength)
cutlist = property(getCutlist)
text = property(getText)
value = property(getValue)
def changed(self, what):
cutlist_refresh = what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evCuesheetChanged,)
time_refresh = what[0] == self.CHANGED_POLL or what[0] == self.CHANGED_SPECIFIC and what[1] in (iPlayableService.evCuesheetChanged,)
if cutlist_refresh:
if self.type == self.TYPE_GAUGE:
self.downstream_elements.cutlist_changed()
if time_refresh:
self.downstream_elements.changed(what)
| gpl-2.0 |
salamer/django | tests/unmanaged_models/tests.py | 296 | 2174 | from __future__ import unicode_literals
from django.db import connection
from django.test import TestCase
from .models import A01, A02, B01, B02, C01, C02, Managed1, Unmanaged2
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests whilst we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a = [a]
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertIsInstance(a2, A02)
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertIsInstance(b2, B02)
self.assertEqual(b2.f_a, "fred")
self.assertIsInstance(b2.fk_a, A02)
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertIsInstance(resp[0], C02)
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertNotIn(table, tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertIn(table, tables, "Table '%s' does not exist." % table)
| bsd-3-clause |
bac/horizon | openstack_dashboard/dashboards/admin/info/tests.py | 4 | 3642 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:admin:info:index')
class SystemInfoViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.base: ('is_service_enabled',),
api.nova: ('service_list',),
api.neutron: ('agent_list', 'is_extension_supported'),
api.cinder: ('service_list',),
api.heat: ('service_list',)})
def _test_base_index(self):
api.base.is_service_enabled(IsA(http.HttpRequest), IgnoreArg()) \
.MultipleTimes().AndReturn(True)
services = self.services.list()
api.nova.service_list(IsA(http.HttpRequest)).AndReturn(services)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'agent').AndReturn(True)
agents = self.agents.list()
api.neutron.agent_list(IsA(http.HttpRequest)).AndReturn(agents)
cinder_services = self.cinder_services.list()
api.cinder.service_list(IsA(http.HttpRequest)).\
AndReturn(cinder_services)
heat_services = self.heat_services.list()
api.heat.service_list(IsA(http.HttpRequest)).\
AndReturn(heat_services)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/info/index.html')
return res
def test_index(self):
res = self._test_base_index()
services_tab = res.context['tab_group'].get_tab('services')
self.assertIn("region", services_tab._tables['services'].data[0])
self.assertIn("endpoints",
services_tab._tables['services'].data[0])
self.mox.VerifyAll()
def test_neutron_index(self):
res = self._test_base_index()
network_agents_tab = res.context['tab_group'].get_tab('network_agents')
self.assertQuerysetEqual(
network_agents_tab._tables['network_agents'].data,
[agent.__repr__() for agent in self.agents.list()]
)
self.mox.VerifyAll()
def test_cinder_index(self):
res = self._test_base_index()
cinder_services_tab = res.context['tab_group'].\
get_tab('cinder_services')
self.assertQuerysetEqual(
cinder_services_tab._tables['cinder_services'].data,
[service.__repr__() for service in self.cinder_services.list()]
)
self.mox.VerifyAll()
def test_heat_index(self):
res = self._test_base_index()
heat_services_tab = res.context['tab_group'].\
get_tab('heat_services')
self.assertQuerysetEqual(
heat_services_tab._tables['heat_services'].data,
[service.__repr__() for service in self.heat_services.list()]
)
self.mox.VerifyAll()
| apache-2.0 |
3dfxsoftware/cbss-addons | l10n_in_hr_payroll/wizard/hr_yearly_salary_detail.py | 374 | 2376 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class yearly_salary_detail(osv.osv_memory):
_name ='yearly.salary.detail'
_description = 'Hr Salary Employee By Category Report'
_columns = {
'employee_ids': fields.many2many('hr.employee', 'payroll_emp_rel', 'payroll_id', 'employee_id', 'Employees', required=True),
'date_from': fields.date('Start Date', required=True),
'date_to': fields.date('End Date', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form':res})
return self.pool['report'].get_action(cr, uid, ids, 'l10n_in_hr_payroll.report_hryearlysalary', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 |
Lkhagvadelger/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/service_reflection_test.py | 560 | 5127 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.internal.service_reflection."""
__author__ = 'petar@google.com (Petar Petrov)'
import unittest
from google.protobuf import unittest_pb2
from google.protobuf import service_reflection
from google.protobuf import service
class FooUnitTest(unittest.TestCase):
def testService(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request, response, callback):
self.method = method
self.controller = controller
self.request = request
callback(response)
class MockRpcController(service.RpcController):
def SetFailed(self, msg):
self.failure_message = msg
self.callback_response = None
class MyService(unittest_pb2.TestService):
pass
self.callback_response = None
def MyCallback(response):
self.callback_response = response
rpc_controller = MockRpcController()
channel = MockRpcChannel()
srvc = MyService()
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual('Method Foo not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
rpc_controller.failure_message = None
service_descriptor = unittest_pb2.TestService.GetDescriptor()
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual('Method Bar not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
class MyServiceImpl(unittest_pb2.TestService):
def Foo(self, rpc_controller, request, done):
self.foo_called = True
def Bar(self, rpc_controller, request, done):
self.bar_called = True
srvc = MyServiceImpl()
rpc_controller.failure_message = None
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.foo_called)
rpc_controller.failure_message = None
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.bar_called)
def testServiceStub(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request,
response_class, callback):
self.method = method
self.controller = controller
self.request = request
callback(response_class())
self.callback_response = None
def MyCallback(response):
self.callback_response = response
channel = MockRpcChannel()
stub = unittest_pb2.TestService_Stub(channel)
rpc_controller = 'controller'
request = 'request'
# GetDescriptor now static, still works as instance method for compatability
self.assertEqual(unittest_pb2.TestService_Stub.GetDescriptor(),
stub.GetDescriptor())
# Invoke method.
stub.Foo(rpc_controller, request, MyCallback)
self.assertTrue(isinstance(self.callback_response,
unittest_pb2.FooResponse))
self.assertEqual(request, channel.request)
self.assertEqual(rpc_controller, channel.controller)
self.assertEqual(stub.GetDescriptor().methods[0], channel.method)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
CSB-IG/fantomine | fant_deep_search/fantom_crawler.py | 1 | 1653 | from consumer import Url_Id_Consumer
from db_control import Db_Controller
import Queue
import time
def main():
#name of threads
threadList = []
#list of consumers threads
threads = []
#max number of threads
MAX_NUM_T = 27
#queue of new TFBS to explore
TFBSQ = Queue.LifoQueue()
#queue of explored TFBS with interactions and weights
EXP_TFBSQ = Queue.Queue()
#exit flag for db_thread
exitFlag = Queue.Queue()
#exit flag for consumers
exitFlag_Consumers = Queue.Queue()
#a feature_id_gene for begin to explore
TFBSQ.queue.append(('SRF','5558263'))
init = time.time()
#enum the consumerthreads
for i in range(MAX_NUM_T):
threadList.append("Thread #"+str(i))
print "all threads enum"
#create Url_Id_Consumer threads
for name in threadList:
thread = Url_Id_Consumer(name, TFBSQ, EXP_TFBSQ, exitFlag_Consumers)
thread.start()
threads.append(thread)
print "Todos los thread en la lista se inicializaron ######"
#create Db_Controller thread
db_thread = Db_Controller("Db_thread", TFBSQ, EXP_TFBSQ, exitFlag)
db_thread.start()
threads.append(db_thread)
print "se creo el tread db_thread"
#while not TFBS.empty() and EXP_TFBS.empty():
while exitFlag.qsize() == 0:
pass
exitFlag_Consumers.put(1)
#Wait for all threads to complete
for t in threads:
t.join()
final = time.time()
print "Exiting Main Thread, DATA MINING COMPLETE jeje"
print "El timpo total fue: {0} seg".format(final-init)
# cave canem
if __name__ == '__main__':
main()
| gpl-3.0 |
deepesch/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
mdsol/rwslib | rwslib/tests/test_rwscmd.py | 1 | 19862 | # -*- coding: utf-8 -*-
__author__ = 'anewbigging'
import unittest
import httpretty
from click.testing import CliRunner
from rwslib.extras.rwscmd import rwscmd
class TestRWSCMD(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
@httpretty.activate
def test_version(self):
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/version",
status=200,
body='1.0.0')
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'version'], input="\n\n")
self.assertIn('1.0.0', result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_data_studies(self):
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/studies",
status=200,
body="""<ODM FileType="Snapshot" FileOID="" CreationDateTime="" ODMVersion="1.3"
xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" xmlns="http://www.cdisc.org/ns/odm/v1.3">
<Study OID="Lab Test">
<GlobalVariables>
<StudyName>Lab Test</StudyName>
<StudyDescription />
<ProtocolName>Lab Test</ProtocolName>
</GlobalVariables>
</Study>
<Study OID="Mediflex">
<GlobalVariables>
<StudyName>Mediflex</StudyName>
<StudyDescription />
<ProtocolName>Mediflex</ProtocolName>
</GlobalVariables>
</Study>
</ODM>""")
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'data'],
input="defuser\npassword\n")
self.assertIn('Lab Test\nMediflex', result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_data_subjects(self):
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/studies/Mediflex(Dev)/subjects",
status=200,
body="""<ODM FileType="Snapshot" FileOID="97794848-9e60-4d7c-a8f9-423ea8d08556" CreationDateTime="2016-03-07T20:59:34.175-00:00" ODMVersion="1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.cdisc.org/ns/odm/v1.3">
<ClinicalData StudyOID="Mediflex(Dev)" MetaDataVersionOID="23">
<SubjectData SubjectKey="0004-bbc-003" mdsol:SubjectKeyType="SubjectName">
<SiteRef LocationOID="MDSOL" />
</SubjectData>
</ClinicalData>
<ClinicalData StudyOID="Mediflex(Dev)" MetaDataVersionOID="23">
<SubjectData SubjectKey="001 atn" mdsol:SubjectKeyType="SubjectName">
<SiteRef LocationOID="MDSOL" />
</SubjectData>
</ClinicalData>
</ODM>""")
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'data', 'Mediflex', 'Dev'],
input="defuser\npassword\n")
self.assertIn('0004-bbc-003\n001 atn', result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_data_subject_data(self):
odm = """<ODM xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" ODMVersion="1.3" FileType="Transactional" FileOID="7b63fdca-6868-4bdf-9b41-66835c881c38" CreationDateTime="2016-03-01T10:52:02.000-00:00">
<ClinicalData StudyOID="Fixitol(Dev)" MetaDataVersionOID="158">
<SubjectData SubjectKey="c01343a3-d3d5-4005-9ea3-93f87b038d62" mdsol:SubjectKeyType="SubjectUUID" mdsol:SubjectName="001">
<SiteRef LocationOID="0100"/>
<StudyEventData StudyEventOID="SCRN">
<FormData FormOID="PTVISIT" FormRepeatKey="1">
<ItemGroupData ItemGroupOID="PTVISIT">
<ItemData ItemOID="PTVISIT.VS" Value=""/>
</ItemGroupData>
</FormData>
</StudyEventData>
</SubjectData>
</ClinicalData>
</ODM>"""
# path = "datasets/rwscmd_getdata.odm?StudyOID=Fixitol(Dev)&SubjectKey=001&IncludeIDs=0&IncludeValues=0"
path = "datasets/rwscmd_getdata.odm"
httpretty.register_uri(
httpretty.GET,
"https://innovate.mdsol.com/RaveWebServices/" + path,
# "https://innovate.mdsol.com/RaveWebServices",
status=200,
body=odm)
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'data', 'Fixitol', 'Dev', '001'],
input="defuser\npassword\n")
self.assertIn(odm, result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_post_data(self):
post_odm = """<ODM xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" ODMVersion="1.3" FileType="Transactional" FileOID="7b63fdca-6868-4bdf-9b41-66835c881c38" CreationDateTime="2016-03-01T10:52:02.000-00:00">
<ClinicalData StudyOID="Fixitol(Dev)" MetaDataVersionOID="158">
<SubjectData SubjectKey="c01343a3-d3d5-4005-9ea3-93f87b038d62" mdsol:SubjectKeyType="SubjectUUID" mdsol:SubjectName="001">
<SiteRef LocationOID="0100"/>
<StudyEventData StudyEventOID="SCRN">
<FormData FormOID="PTVISIT" FormRepeatKey="1">
<ItemGroupData ItemGroupOID="PTVISIT">
<ItemData ItemOID="PTVISIT.VS" Value="10 MAR 2016"/>
</ItemGroupData>
</FormData>
</StudyEventData>
</SubjectData>
</ClinicalData>
</ODM>"""
response_content = """<Response ReferenceNumber="82e942b0-48e8-4cf4-b299-51e2b6a89a1b"
InboundODMFileOID=""
IsTransactionSuccessful="1"
SuccessStatistics="Rave objects touched: Subjects=0; Folders=0; Forms=0; Fields=1; LogLines=0" NewRecords=""
SubjectNumberInStudy="1103" SubjectNumberInStudySite="55">
</Response>"""
httpretty.register_uri(
httpretty.POST, "https://innovate.mdsol.com/RaveWebServices/webservice.aspx?PostODMClinicalData",
status=200,
body=response_content)
with self.runner.isolated_filesystem():
with open('odm.xml', 'w') as odm:
odm.write(post_odm)
result = self.runner.invoke(rwscmd.rws, ['--raw', 'https://innovate.mdsol.com', 'post', 'odm.xml'],
input="defuser\npassword\n")
self.assertIn(response_content, result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_direct(self):
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/request?oid=1",
status=200,
body='<xml/>')
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'direct', 'request?oid=1'],
input="defuser\npassword\n")
self.assertIn('<xml/>', result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_metadata(self):
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/metadata/studies",
status=200,
body="""<ODM FileType="Snapshot" FileOID="" CreationDateTime="" ODMVersion="1.3"
xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" xmlns="http://www.cdisc.org/ns/odm/v1.3">
<Study OID="Lab Test">
<GlobalVariables>
<StudyName>Lab Test</StudyName>
<StudyDescription />
<ProtocolName>Lab Test</ProtocolName>
</GlobalVariables>
</Study>
<Study OID="Mediflex">
<GlobalVariables>
<StudyName>Mediflex</StudyName>
<StudyDescription />
<ProtocolName>Mediflex</ProtocolName>
</GlobalVariables>
</Study>
</ODM>""")
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'metadata'],
input="defuser\npassword\n")
self.assertIn('Lab Test\nMediflex', result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_metadata_versions(self):
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/metadata/studies/Fixitol/versions",
status=200,
body="""<ODM ODMVersion="1.3" Granularity="Metadata" FileType="Snapshot" FileOID="d26b4d33-376d-4037-9747-684411190179" CreationDateTime=" 2013-04-08T01:29:13 " xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata">
<Study OID="Fixitol">
<GlobalVariables>
<StudyName>Fixitol</StudyName>
<StudyDescription></StudyDescription>
<ProtocolName>Fixitol</ProtocolName>
</GlobalVariables>
<MetaDataVersion OID="1203" Name="Webservice Outbound" />
<MetaDataVersion OID="1195" Name="JC_Demo_Draft1" />
<MetaDataVersion OID="1165" Name="Initial" />
</Study>
</ODM>""")
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'metadata', 'Fixitol'],
input="defuser\npassword\n")
self.assertIn('1203\n1195\n1165', result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_metadata_drafts(self):
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/metadata/studies/Fixitol/drafts",
status=200,
body="""<ODM ODMVersion="1.3" Granularity="Metadata" FileType="Snapshot" FileOID="d26b4d33-376d-4037-9747-684411190179" CreationDateTime=" 2013-04-08T01:29:13 " xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata">
<Study OID="Fixitol">
<GlobalVariables>
<StudyName>Fixitol</StudyName>
<StudyDescription></StudyDescription>
<ProtocolName>Fixitol</ProtocolName>
</GlobalVariables>
<MetaDataVersion OID="1203" Name="Webservice Outbound" />
<MetaDataVersion OID="1195" Name="JC_Demo_Draft1" />
<MetaDataVersion OID="1165" Name="Initial" />
</Study>
</ODM>""")
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'metadata', '--drafts', 'Fixitol'],
input="defuser\npassword\n")
self.assertIn('1203\n1195\n1165', result.output)
self.assertEqual(result.exit_code, 0)
@httpretty.activate
def test_metadata_version(self):
odm = """<ODM FileType="Snapshot" FileOID="767a1f8b-7b72-4d12-adbe-37d4d62ba75e"
CreationDateTime="2013-04-08T10:02:17.781-00:00"
ODMVersion="1.3"
xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns="http://www.cdisc.org/ns/odm/v1.3">
<Study OID="Fixitol">
<GlobalVariables>
<StudyName>Fixitol</StudyName>
<StudyDescription/>
<ProtocolName>Fixitol</ProtocolName>
</GlobalVariables>
</Study>
"""
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/metadata/studies/Fixitol/versions/1165",
status=200,
body=odm)
result = self.runner.invoke(rwscmd.rws, ['https://innovate.mdsol.com', 'metadata', 'Fixitol', '1165'],
input="defuser\npassword\n")
self.assertIn(odm, result.output)
self.assertEqual(result.exit_code, 0)
class TestAutofill(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
self.odm_metadata = """<ODM FileType="Snapshot" Granularity="Metadata" CreationDateTime="2016-02-29T13:47:23.654-00:00" FileOID="d460fc96-4f08-445f-89b1-0182e8e810c1" ODMVersion="1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" xmlns="http://www.cdisc.org/ns/odm/v1.3">
<Study OID="Test">
<GlobalVariables>
<StudyName>Test</StudyName>
<StudyDescription></StudyDescription>
<ProtocolName>Test</ProtocolName>
</GlobalVariables>
<BasicDefinitions/>
<MetaDataVersion OID="1" Name="Metadata version 1">
<ItemDef OID="VSDT" Name="VSDT" DataType="date" mdsol:DateTimeFormat="dd MMM yyyy" mdsol:VariableOID="VSDT" mdsol:Active="Yes" mdsol:ControlType="DateTime" mdsol:SourceDocument="Yes" mdsol:SASLabel="Visit Date" mdsol:QueryFutureDate="Yes" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes" mdsol:CanSetStudyEventDate="Yes" />
<ItemDef OID="TIME" Name="TIME" DataType="time" mdsol:DateTimeFormat="HH:nn:ss" mdsol:VariableOID="TIME" mdsol:Active="Yes" mdsol:ControlType="DateTime" mdsol:SourceDocument="Yes" mdsol:SASLabel="Time" mdsol:QueryFutureDate="Yes" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes" mdsol:CanSetStudyEventDate="No" />
<ItemDef OID="VSNUM" Name="VSNUM" DataType="integer" mdsol:VariableOID="VSNUM" Length="2" mdsol:Active="Yes" mdsol:ControlType="DropDownList" mdsol:SourceDocument="Yes" mdsol:SASLabel="Follow Up Visit" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes" />
<ItemDef OID="SAE" Name="SAE" DataType="text" mdsol:VariableOID="SAE" Length="200" mdsol:Active="Yes" mdsol:ControlType="LongText" mdsol:SourceDocument="Yes" mdsol:SASLabel="eSAE Desription" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes" />
<ItemDef OID="YN" Name="YN" DataType="integer" mdsol:VariableOID="YN" Length="1" mdsol:Active="Yes" mdsol:ControlType="DropDownList" mdsol:SourceDocument="Yes" mdsol:SASLabel="Subject Received Dose" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes">
<CodeListRef CodeListOID="YES_NO_UNKNOWN" />
</ItemDef>
<CodeList OID="YES_NO_UNKNOWN" Name="YES_NO_UNKNOWN" DataType="integer">
<CodeListItem CodedValue="0" mdsol:OrderNumber="1" />
<CodeListItem CodedValue="1" mdsol:OrderNumber="2" />
<CodeListItem CodedValue="97" mdsol:OrderNumber="3" />
</CodeList>
</MetaDataVersion>
</Study>
</ODM>
"""
self.odm_empty = """<ODM xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" ODMVersion="1.3" FileType="Transactional" FileOID="c3f15f2d-eb69-42e6-bed4-811bff27ebf9" CreationDateTime="2016-03-02T09:27:14.000-00:00">
<ClinicalData StudyOID="Test(Prod)" MetaDataVersionOID="1">
<SubjectData SubjectKey="9e15f698-327e-4e9c-8ed5-8be9b27b67b0" mdsol:SubjectKeyType="SubjectUUID" mdsol:SubjectName="001">
<SiteRef LocationOID="0100"/>
<StudyEventData StudyEventOID="SCRN">
<FormData FormOID="FORM1" FormRepeatKey="1">
<ItemGroupData ItemGroupOID="FORM1">
<ItemData ItemOID="YN" Value=""/>
</ItemGroupData>
</FormData>
</StudyEventData>
</SubjectData>
</ClinicalData>
</ODM>"""
self.path = "datasets/rwscmd_getdata.odm"
self.response_content = """<Response ReferenceNumber="82e942b0-48e8-4cf4-b299-51e2b6a89a1b"
InboundODMFileOID=""
IsTransactionSuccessful="1"
SuccessStatistics="Rave objects touched: Subjects=0; Folders=0; Forms=0; Fields=1; LogLines=0" NewRecords=""
SubjectNumberInStudy="1103" SubjectNumberInStudySite="55">
</Response>"""
# NOTE: HTTPretty is not supported on Python3, need to migrate this (get weird breakages in Travis)
httpretty.enable()
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/metadata/studies/Test/versions/1",
status=200,
body=self.odm_metadata)
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/" + self.path,
status=200,
body=self.odm_empty)
httpretty.register_uri(
httpretty.POST, "https://innovate.mdsol.com/RaveWebServices/webservice.aspx?PostODMClinicalData",
status=200,
body=self.response_content)
def test_autofill(self):
result = self.runner.invoke(rwscmd.rws,
["--verbose", 'https://innovate.mdsol.com', 'autofill', 'Test', 'Prod', '001'],
input="defuser\npassword\n")
output = result.output
self.assertIn("Step 1\nGetting data list", output)
self.assertIn("Getting metadata version 1", output)
self.assertIn("Step 10\nGetting data list", output)
self.assertIn("Generating data", output)
self.assertEqual(10, output.count("Generating data"))
self.assertNotIn("Step 11", result.output)
self.assertEqual(result.exit_code, 0)
def test_autofill_steps(self):
result = self.runner.invoke(rwscmd.rws,
['--verbose', 'https://innovate.mdsol.com', 'autofill', '--steps', '1',
'Test', 'Prod', '001'],
input="defuser\npassword\n")
self.assertIn("Step 1\nGetting data list", result.output)
self.assertIn("Getting metadata version 1\n", result.output)
self.assertIn("Generating data", result.output)
self.assertNotIn("Step 2", result.output)
self.assertEqual(result.exit_code, 0)
def test_autofill_no_data(self):
odm = """
<ODM xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" ODMVersion="1.3" FileType="Transactional" FileOID="c3f15f2d-eb69-42e6-bed4-811bff27ebf9" CreationDateTime="2016-03-02T09:27:14.000-00:00">
</ODM>"""
httpretty.register_uri(
httpretty.GET, "https://innovate.mdsol.com/RaveWebServices/" + self.path,
status=200,
body=odm)
result = self.runner.invoke(rwscmd.rws,
['--verbose', 'https://innovate.mdsol.com', 'autofill', 'Test', 'Prod', '001'],
input="defuser\npassword\n")
self.assertIn("Step 1\nGetting data list\n", result.output)
self.assertIn("No data found", result.output)
self.assertNotIn("Generating data", result.output)
self.assertEqual(result.exit_code, 0)
def test_autofill_fixed(self):
with self.runner.isolated_filesystem():
with open('fixed.txt', 'w') as f:
f.write("YN,99")
result = self.runner.invoke(rwscmd.rws,
['--verbose', 'https://innovate.mdsol.com', 'autofill', '--steps', '1',
'--fixed', 'fixed.txt', 'Test', 'Prod', '001'],
input=u"defuser\npassword\n", catch_exceptions=False)
self.assertFalse(result.exception)
self.assertIn("Step 1\nGetting data list", result.output)
self.assertIn("Getting metadata version 1", result.output)
self.assertIn("Generating data", result.output)
self.assertIn('Fixing YN to value: 99', result.output)
self.assertNotIn("Step 2", result.output)
self.assertEqual(result.exit_code, 0)
def test_autofill_metadata(self):
with self.runner.isolated_filesystem():
with open('odm.xml', 'w') as f:
f.write(self.odm_metadata)
result = self.runner.invoke(rwscmd.rws,
['--verbose', 'https://innovate.mdsol.com', 'autofill', '--steps', '1',
'--metadata', 'odm.xml', 'Test', 'Prod', '001'],
input=u"defuser\npassword\n", catch_exceptions=False)
self.assertFalse(result.exception)
self.assertIn("Step 1\nGetting data list", result.output)
self.assertIn("Generating data", result.output)
self.assertNotIn("Step 2", result.output)
self.assertEqual(result.exit_code, 0)
if __name__ == '__main__':
unittest.main()
| mit |
waytai/django | django/db/backends/oracle/introspection.py | 517 | 11463 | import cx_Oracle
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type objects to Django Field types.
data_types_reverse = {
cx_Oracle.BLOB: 'BinaryField',
cx_Oracle.CLOB: 'TextField',
cx_Oracle.DATETIME: 'DateField',
cx_Oracle.FIXED_CHAR: 'CharField',
cx_Oracle.NCLOB: 'TextField',
cx_Oracle.NUMBER: 'DecimalField',
cx_Oracle.STRING: 'CharField',
cx_Oracle.TIMESTAMP: 'DateTimeField',
}
try:
data_types_reverse[cx_Oracle.NATIVE_FLOAT] = 'FloatField'
except AttributeError:
pass
try:
data_types_reverse[cx_Oracle.UNICODE] = 'CharField'
except AttributeError:
pass
cache_bust_counter = 1
def get_field_type(self, data_type, description):
# If it's a NUMBER with scale == 0, consider it an IntegerField
if data_type == cx_Oracle.NUMBER:
precision, scale = description[4:6]
if scale == 0:
if precision > 11:
return 'BigIntegerField'
elif precision == 1:
return 'BooleanField'
else:
return 'IntegerField'
elif scale == -127:
return 'FloatField'
return super(DatabaseIntrospection, self).get_field_type(data_type, description)
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL "
"SELECT VIEW_NAME, 'v' FROM USER_VIEWS")
return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
self.cache_bust_counter += 1
cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format(
self.connection.ops.quote_name(table_name),
self.cache_bust_counter))
description = []
for desc in cursor.description:
name = force_text(desc[0]) # cx_Oracle always returns a 'str' on both Python 2 and 3
name = name % {} # cx_Oracle, for some reason, doubles percent signs.
description.append(FieldInfo(*(name.lower(),) + desc[1:]))
return description
def table_name_converter(self, name):
"Table name comparison is case insensitive under Oracle"
return name.lower()
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return {d[0]: i for i, d in enumerate(self.get_table_description(cursor, table_name))}
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
table_name = table_name.upper()
cursor.execute("""
SELECT ta.column_name, tb.table_name, tb.column_name
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
user_tab_cols ta, user_tab_cols tb
WHERE user_constraints.table_name = %s AND
ta.table_name = user_constraints.table_name AND
ta.column_name = ca.column_name AND
ca.table_name = ta.table_name AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
cb.table_name = tb.table_name AND
cb.column_name = tb.column_name AND
ca.position = cb.position""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[0].lower()] = (row[2].lower(), row[1].lower())
return relations
def get_key_columns(self, cursor, table_name):
cursor.execute("""
SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
FROM user_constraints c
JOIN user_cons_columns ccol
ON ccol.constraint_name = c.constraint_name
JOIN user_cons_columns rcol
ON rcol.constraint_name = c.r_constraint_name
WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()])
return [tuple(cell.lower() for cell in row)
for row in cursor.fetchall()]
def get_indexes(self, cursor, table_name):
sql = """
SELECT LOWER(uic1.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1 ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1 ELSE 0
END AS is_unique
FROM user_constraints, user_indexes, user_ind_columns uic1
WHERE user_constraints.constraint_type (+) = 'P'
AND user_constraints.index_name (+) = uic1.index_name
AND user_indexes.uniqueness (+) = 'UNIQUE'
AND user_indexes.index_name (+) = uic1.index_name
AND uic1.table_name = UPPER(%s)
AND uic1.column_position = 1
AND NOT EXISTS (
SELECT 1
FROM user_ind_columns uic2
WHERE uic2.index_name = uic1.index_name
AND uic2.column_position = 2
)
"""
cursor.execute(sql, [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': bool(row[1]),
'unique': bool(row[2])}
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs and uniques
cursor.execute("""
SELECT
user_constraints.constraint_name,
LOWER(cols.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint
FROM
user_constraints
INNER JOIN
user_indexes ON user_indexes.index_name = user_constraints.index_name
LEFT OUTER JOIN
user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
WHERE
(
user_constraints.constraint_type = 'P' OR
user_constraints.constraint_type = 'U'
)
AND user_constraints.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, pk, unique, check in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": pk,
"unique": unique,
"foreign_key": None,
"check": check,
"index": True, # All P and U come with index, see inner join above
}
# Record the details
constraints[constraint]['columns'].append(column)
# Check constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name
FROM
user_constraints cons
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'C' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Foreign key constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name,
LOWER(rcons.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_constraints rcons ON cons.r_constraint_name = rcons.constraint_name
INNER JOIN
user_cons_columns rcols ON rcols.constraint_name = rcons.constraint_name
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, other_table, other_column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": (other_table, other_column),
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
index_name,
LOWER(column_name)
FROM
user_ind_columns cols
WHERE
table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE cols.index_name = cons.index_name
)
ORDER BY cols.column_position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": False,
"index": True,
}
# Record the details
constraints[constraint]['columns'].append(column)
return constraints
| bsd-3-clause |
burzillibus/RobHome | venv/lib/python2.7/site-packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| mit |
gibiansky/tensorflow | tensorflow/python/ops/io_ops.py | 3 | 15423 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""## Placeholders
TensorFlow provides a placeholder operation that must be fed with data
on execution. For more info, see the section on [Feeding
data](../../how_tos/reading_data/index.md#feeding).
@@placeholder
@@placeholder_with_default
For feeding `SparseTensor`s which are composite type,
there is a convenience function:
@@sparse_placeholder
## Readers
TensorFlow provides a set of Reader classes for reading data formats.
For more information on inputs and readers, see [Reading
data](../../how_tos/reading_data/index.md).
@@ReaderBase
@@TextLineReader
@@WholeFileReader
@@IdentityReader
@@TFRecordReader
@@FixedLengthRecordReader
## Converting
TensorFlow provides several operations that you can use to convert various data
formats into tensors.
@@decode_csv
@@decode_raw
- - -
### Example protocol buffer
TensorFlow's [recommended format for training
examples](../../how_tos/reading_data/index.md#standard-tensorflow-format)
is serialized `Example` protocol buffers, [described
here](https://www.tensorflow.org/code/tensorflow/core/example/example.proto).
They contain `Features`, [described
here](https://www.tensorflow.org/code/tensorflow/core/example/feature.proto).
@@VarLenFeature
@@FixedLenFeature
@@FixedLenSequenceFeature
@@SparseFeature
@@parse_example
@@parse_single_example
@@parse_tensor
@@decode_json_example
## Queues
TensorFlow provides several implementations of 'Queues', which are
structures within the TensorFlow computation graph to stage pipelines
of tensors together. The following describe the basic Queue interface
and some implementations. To see an example use, see [Threading and
Queues](../../how_tos/threading_and_queues/index.md).
@@QueueBase
@@FIFOQueue
@@PaddingFIFOQueue
@@RandomShuffleQueue
@@PriorityQueue
## Conditional Accumulators
@@ConditionalAccumulatorBase
@@ConditionalAccumulator
@@SparseConditionalAccumulator
## Dealing with the filesystem
@@matching_files
@@read_file
@@write_file
## Input pipeline
TensorFlow functions for setting up an input-prefetching pipeline.
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
### Beginning of an input pipeline
The "producer" functions add a queue to the graph and a corresponding
`QueueRunner` for running the subgraph that fills that queue.
@@match_filenames_once
@@limit_epochs
@@input_producer
@@range_input_producer
@@slice_input_producer
@@string_input_producer
### Batching at the end of an input pipeline
These functions add a queue to the graph to assemble a batch of
examples, with possible shuffling. They also add a `QueueRunner` for
running the subgraph that fills that queue.
Use [`batch`](#batch) or [`batch_join`](#batch_join) for batching
examples that have already been well shuffled. Use
[`shuffle_batch`](#shuffle_batch) or
[`shuffle_batch_join`](#shuffle_batch_join) for examples that would
benefit from additional shuffling.
Use [`batch`](#batch) or [`shuffle_batch`](#shuffle_batch) if you want a
single thread producing examples to batch, or if you have a
single subgraph producing examples but you want to run it in *N* threads
(where you increase *N* until it can keep the queue full). Use
[`batch_join`](#batch_join) or [`shuffle_batch_join`](#shuffle_batch_join)
if you have *N* different subgraphs producing examples to batch and you
want them run by *N* threads. Use `maybe_*` to enqueue conditionally.
@@batch
@@maybe_batch
@@batch_join
@@maybe_batch_join
@@shuffle_batch
@@maybe_shuffle_batch
@@shuffle_batch_join
@@maybe_shuffle_batch_join
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import gen_io_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_io_ops import *
# pylint: enable=wildcard-import
# pylint: disable=protected-access
def _save(filename, tensor_names, tensors, tensor_slices=None, name="save"):
"""Save a list of tensors to a file with given names.
Example usage without slice info:
Save("/foo/bar", ["w", "b"], [w, b])
Example usage with slices:
Save("/foo/bar", ["w", "w"], [slice0, slice1],
tensor_slices=["4 10 0,2:-", "4 10 2,2:-"])
Args:
filename: the file name of the sstable.
tensor_names: a list of strings.
tensors: the list of tensors to be saved.
tensor_slices: Optional list of strings to specify the shape and slices of
a larger virtual tensor that each tensor is a part of. If not specified
each tensor is saved as a full slice.
name: string. Optional name for the op.
Requires:
The length of tensors should match the size of tensor_names and of
tensor_slices.
Returns:
An Operation that saves the tensors.
"""
if tensor_slices is None:
return gen_io_ops._save(filename, tensor_names, tensors, name=name)
else:
return gen_io_ops._save_slices(filename, tensor_names, tensor_slices,
tensors, name=name)
def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type,
name="restore_slice", preferred_shard=-1):
"""Restore a tensor slice from a set of files with a given pattern.
Example usage:
RestoreSlice("/foo/bar-?????-of-?????", "w", "10 10 0,2:-", DT_FLOAT)
Args:
file_pattern: the file pattern used to match a set of checkpoint files.
tensor_name: the name of the tensor to restore.
shape_and_slice: the shape-and-slice spec of the slice.
tensor_type: the type of the tensor to restore.
name: string. Optional name for the op.
preferred_shard: Int. Optional shard to open first in the checkpoint file.
Returns:
A tensor of type "tensor_type".
"""
base_type = dtypes.as_dtype(tensor_type).base_dtype
return gen_io_ops._restore_slice(
file_pattern, tensor_name, shape_and_slice, base_type,
preferred_shard, name=name)
class ReaderBase(object):
"""Base class for different Reader types, that produce a record every step.
Conceptually, Readers convert string 'work units' into records (key,
value pairs). Typically the 'work units' are filenames and the
records are extracted from the contents of those files. We want a
single record produced per step, but a work unit can correspond to
many records.
Therefore we introduce some decoupling using a queue. The queue
contains the work units and the Reader dequeues from the queue when
it is asked to produce a record (via Read()) but it has finished the
last work unit.
"""
def __init__(self, reader_ref, supports_serialize=False):
"""Creates a new ReaderBase.
Args:
reader_ref: The operation that implements the reader.
supports_serialize: True if the reader implementation can
serialize its state.
"""
self._reader_ref = reader_ref
self._supports_serialize = supports_serialize
@property
def reader_ref(self):
"""Op that implements the reader."""
return self._reader_ref
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g. when the
Reader needs to start reading from a new file since it has
finished with the previous file).
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (key, value).
key: A string scalar Tensor.
value: A string scalar Tensor.
"""
if isinstance(queue, ops.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
return gen_io_ops._reader_read(self._reader_ref, queue_ref, name=name)
def read_up_to(self, queue, num_records, # pylint: disable=invalid-name
name=None):
"""Returns up to num_records (key, value pairs) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values).
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
if isinstance(queue, ops.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
return gen_io_ops._reader_read_up_to(self._reader_ref,
queue_ref,
num_records,
name=name)
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
This is the same as the number of Read executions that have
succeeded.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
return gen_io_ops._reader_num_records_produced(self._reader_ref, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
return gen_io_ops._reader_num_work_units_completed(self._reader_ref,
name=name)
def serialize_state(self, name=None):
"""Produce a string tensor that encodes the state of a reader.
Not all Readers support being serialized, so this can produce an
Unimplemented error.
Args:
name: A name for the operation (optional).
Returns:
A string Tensor.
"""
return gen_io_ops._reader_serialize_state(self._reader_ref, name=name)
def restore_state(self, state, name=None):
"""Restore a reader to a previously saved state.
Not all Readers support being restored, so this can produce an
Unimplemented error.
Args:
state: A string Tensor.
Result of a SerializeState of a Reader with matching type.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
return gen_io_ops._reader_restore_state(self._reader_ref, state, name=name)
@property
def supports_serialize(self):
"""Whether the Reader implementation can serialize its state."""
return self._supports_serialize
def reset(self, name=None):
"""Restore a reader to its initial clean state.
Args:
name: A name for the operation (optional).
Returns:
The created Operation.
"""
return gen_io_ops._reader_reset(self._reader_ref, name=name)
ops.NotDifferentiable("ReaderRead")
ops.NotDifferentiable("ReaderReadUpTo")
ops.NotDifferentiable("ReaderNumRecordsProduced")
ops.NotDifferentiable("ReaderNumWorkUnitsCompleted")
ops.NotDifferentiable("ReaderSerializeState")
ops.NotDifferentiable("ReaderRestoreState")
ops.NotDifferentiable("ReaderReset")
class WholeFileReader(ReaderBase):
"""A Reader that outputs the entire contents of a file as a value.
To use, enqueue filenames in a Queue. The output of Read will
be a filename (key) and the contents of that file (value).
See ReaderBase for supported methods.
"""
def __init__(self, name=None):
"""Create a WholeFileReader.
Args:
name: A name for the operation (optional).
"""
rr = gen_io_ops._whole_file_reader(name=name)
super(WholeFileReader, self).__init__(rr, supports_serialize=True)
ops.NotDifferentiable("WholeFileReader")
class TextLineReader(ReaderBase):
"""A Reader that outputs the lines of a file delimited by newlines.
Newlines are stripped from the output.
See ReaderBase for supported methods.
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self, skip_header_lines=None, name=None):
"""Create a TextLineReader.
Args:
skip_header_lines: An optional int. Defaults to 0. Number of lines
to skip from the beginning of every file.
name: A name for the operation (optional).
"""
rr = gen_io_ops._text_line_reader(skip_header_lines=skip_header_lines,
name=name)
super(TextLineReader, self).__init__(rr)
ops.NotDifferentiable("TextLineReader")
class FixedLengthRecordReader(ReaderBase):
"""A Reader that outputs fixed-length records from a file.
See ReaderBase for supported methods.
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self, record_bytes, header_bytes=None, footer_bytes=None,
name=None):
"""Create a FixedLengthRecordReader.
Args:
record_bytes: An int.
header_bytes: An optional int. Defaults to 0.
footer_bytes: An optional int. Defaults to 0.
name: A name for the operation (optional).
"""
rr = gen_io_ops._fixed_length_record_reader(
record_bytes=record_bytes, header_bytes=header_bytes,
footer_bytes=footer_bytes, name=name)
super(FixedLengthRecordReader, self).__init__(rr)
ops.NotDifferentiable("FixedLengthRecordReader")
class TFRecordReader(ReaderBase):
"""A Reader that outputs the records from a TFRecords file.
See ReaderBase for supported methods.
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self, name=None, options=None):
"""Create a TFRecordReader.
Args:
name: A name for the operation (optional).
options: A TFRecordOptions object (optional).
"""
compression_type = python_io.TFRecordOptions.get_compression_type_string(
options)
rr = gen_io_ops._tf_record_reader(
name=name, compression_type=compression_type)
super(TFRecordReader, self).__init__(rr)
ops.NotDifferentiable("TFRecordReader")
class IdentityReader(ReaderBase):
"""A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. Read will take the front
work string and output (work, work).
See ReaderBase for supported methods.
"""
def __init__(self, name=None):
"""Create a IdentityReader.
Args:
name: A name for the operation (optional).
"""
rr = gen_io_ops._identity_reader(name=name)
super(IdentityReader, self).__init__(rr, supports_serialize=True)
ops.NotDifferentiable("IdentityReader")
| apache-2.0 |
nagyistoce/odoo-dev-odoo | addons/l10n_cl/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
silentfuzzle/calibre | src/calibre/ebooks/tweak.py | 14 | 5485 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, shlex, subprocess, shutil, unicodedata
from calibre import prints, as_unicode, walk
from calibre.constants import iswindows, __appname__
from calibre.ptempfile import TemporaryDirectory, TemporaryFile
from calibre.libunzip import extract as zipextract
from calibre.utils.zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
from calibre.utils.ipc.simple_worker import WorkerError
class Error(ValueError):
pass
def ask_cli_question(msg):
prints(msg, end=' [y/N]: ')
sys.stdout.flush()
if iswindows:
import msvcrt
ans = msvcrt.getch()
else:
import tty, termios
old_settings = termios.tcgetattr(sys.stdin.fileno())
try:
tty.setraw(sys.stdin.fileno())
try:
ans = sys.stdin.read(1)
except KeyboardInterrupt:
ans = b''
finally:
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)
print()
return ans == b'y'
def mobi_exploder(path, tdir, question=lambda x:True):
from calibre.ebooks.mobi.tweak import explode, BadFormat
try:
return explode(path, tdir, question=question)
except BadFormat as e:
raise Error(as_unicode(e))
def zip_exploder(path, tdir, question=lambda x:True):
zipextract(path, tdir)
for f in walk(tdir):
if f.lower().endswith('.opf'):
return f
raise Error('Invalid book: Could not find .opf')
def zip_rebuilder(tdir, path):
with ZipFile(path, 'w', compression=ZIP_DEFLATED) as zf:
# Write mimetype
mt = os.path.join(tdir, 'mimetype')
if os.path.exists(mt):
zf.write(mt, 'mimetype', compress_type=ZIP_STORED)
# Write everything else
exclude_files = {'.DS_Store', 'mimetype', 'iTunesMetadata.plist'}
for root, dirs, files in os.walk(tdir):
for fn in files:
if fn in exclude_files:
continue
absfn = os.path.join(root, fn)
zfn = unicodedata.normalize('NFC', os.path.relpath(absfn, tdir).replace(os.sep, '/'))
zf.write(absfn, zfn)
def docx_exploder(path, tdir, question=lambda x:True):
zipextract(path, tdir)
from calibre.ebooks.docx.dump import pretty_all_xml_in_dir
pretty_all_xml_in_dir(tdir)
for f in walk(tdir):
if os.path.basename(f) == 'document.xml':
return f
raise Error('Invalid book: Could not find document.xml')
def get_tools(fmt):
fmt = fmt.lower()
if fmt in {'mobi', 'azw', 'azw3'}:
from calibre.ebooks.mobi.tweak import rebuild
ans = mobi_exploder, rebuild
elif fmt in {'epub', 'htmlz'}:
ans = zip_exploder, zip_rebuilder
elif fmt == 'docx':
ans = docx_exploder, zip_rebuilder
else:
ans = None, None
return ans
def tweak(ebook_file):
''' Command line interface to the Tweak Book tool '''
fmt = ebook_file.rpartition('.')[-1].lower()
exploder, rebuilder = get_tools(fmt)
if exploder is None:
prints('Cannot tweak %s files. Supported formats are: EPUB, HTMLZ, AZW3, MOBI' % fmt.upper()
, file=sys.stderr)
raise SystemExit(1)
with TemporaryDirectory('_tweak_'+
os.path.basename(ebook_file).rpartition('.')[0]) as tdir:
try:
opf = exploder(ebook_file, tdir, question=ask_cli_question)
except WorkerError as e:
prints('Failed to unpack', ebook_file)
prints(e.orig_tb)
raise SystemExit(1)
except Error as e:
prints(as_unicode(e), file=sys.stderr)
raise SystemExit(1)
if opf is None:
# The question was answered with No
return
ed = os.environ.get('EDITOR', 'dummy')
cmd = shlex.split(ed)
isvim = bool([x for x in cmd[0].split('/') if x.endswith('vim')])
proceed = False
prints('Book extracted to', tdir)
if not isvim:
prints('Make your tweaks and once you are done,', __appname__,
'will rebuild', ebook_file, 'from', tdir)
print()
proceed = ask_cli_question('Rebuild ' + ebook_file + '?')
else:
base = os.path.basename(ebook_file)
with TemporaryFile(base+'.zip') as zipf:
with ZipFile(zipf, 'w') as zf:
zf.add_dir(tdir)
try:
subprocess.check_call(cmd + [zipf])
except:
prints(ed, 'failed, aborting...')
raise SystemExit(1)
with ZipFile(zipf, 'r') as zf:
shutil.rmtree(tdir)
os.mkdir(tdir)
zf.extractall(path=tdir)
proceed = True
if proceed:
prints('Rebuilding', ebook_file, 'please wait ...')
try:
rebuilder(tdir, ebook_file)
except WorkerError as e:
prints('Failed to rebuild', ebook_file)
prints(e.orig_tb)
raise SystemExit(1)
prints(ebook_file, 'successfully tweaked')
| gpl-3.0 |
wwright2/dcim3-angstrom1 | sources/bitbake/lib/bb/ui/puccho.py | 15 | 16787 | #
# BitBake Graphical GTK User Interface
#
# Copyright (C) 2008 Intel Corporation
#
# Authored by Rob Bradford <rob@linux.intel.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gtk
import gobject
import gtk.glade
import threading
import urllib2
import os
import contextlib
from bb.ui.crumbs.buildmanager import BuildManager, BuildConfiguration
from bb.ui.crumbs.buildmanager import BuildManagerTreeView
from bb.ui.crumbs.runningbuild import RunningBuild, RunningBuildTreeView
# The metadata loader is used by the BuildSetupDialog to download the
# available options to populate the dialog
class MetaDataLoader(gobject.GObject):
""" This class provides the mechanism for loading the metadata (the
fetching and parsing) from a given URL. The metadata encompasses details
on what machines are available. The distribution and images available for
the machine and the the uris to use for building the given machine."""
__gsignals__ = {
'success' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
'error' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,))
}
# We use these little helper functions to ensure that we take the gdk lock
# when emitting the signal. These functions are called as idles (so that
# they happen in the gtk / main thread's main loop.
def emit_error_signal (self, remark):
gtk.gdk.threads_enter()
self.emit ("error", remark)
gtk.gdk.threads_leave()
def emit_success_signal (self):
gtk.gdk.threads_enter()
self.emit ("success")
gtk.gdk.threads_leave()
def __init__ (self):
gobject.GObject.__init__ (self)
class LoaderThread(threading.Thread):
""" This class provides an asynchronous loader for the metadata (by
using threads and signals). This is useful since the metadata may be
at a remote URL."""
class LoaderImportException (Exception):
pass
def __init__(self, loader, url):
threading.Thread.__init__ (self)
self.url = url
self.loader = loader
def run (self):
result = {}
try:
with contextlib.closing (urllib2.urlopen (self.url)) as f:
# Parse the metadata format. The format is....
# <machine>;<default distro>|<distro>...;<default image>|<image>...;<type##url>|...
for line in f:
components = line.split(";")
if (len (components) < 4):
raise MetaDataLoader.LoaderThread.LoaderImportException
machine = components[0]
distros = components[1].split("|")
images = components[2].split("|")
urls = components[3].split("|")
result[machine] = (distros, images, urls)
# Create an object representing this *potential*
# configuration. It can become concrete if the machine, distro
# and image are all chosen in the UI
configuration = BuildConfiguration()
configuration.metadata_url = self.url
configuration.machine_options = result
self.loader.configuration = configuration
# Emit that we've actually got a configuration
gobject.idle_add (MetaDataLoader.emit_success_signal,
self.loader)
except MetaDataLoader.LoaderThread.LoaderImportException as e:
gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader,
"Repository metadata corrupt")
except Exception as e:
gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader,
"Unable to download repository metadata")
print(e)
def try_fetch_from_url (self, url):
# Try and download the metadata. Firing a signal if successful
thread = MetaDataLoader.LoaderThread(self, url)
thread.start()
class BuildSetupDialog (gtk.Dialog):
RESPONSE_BUILD = 1
# A little helper method that just sets the states on the widgets based on
# whether we've got good metadata or not.
def set_configurable (self, configurable):
if (self.configurable == configurable):
return
self.configurable = configurable
for widget in self.conf_widgets:
widget.set_sensitive (configurable)
if not configurable:
self.machine_combo.set_active (-1)
self.distribution_combo.set_active (-1)
self.image_combo.set_active (-1)
# GTK widget callbacks
def refresh_button_clicked (self, button):
# Refresh button clicked.
url = self.location_entry.get_chars (0, -1)
self.loader.try_fetch_from_url(url)
def repository_entry_editable_changed (self, entry):
if (len (entry.get_chars (0, -1)) > 0):
self.refresh_button.set_sensitive (True)
else:
self.refresh_button.set_sensitive (False)
self.clear_status_message()
# If we were previously configurable we are no longer since the
# location entry has been changed
self.set_configurable (False)
def machine_combo_changed (self, combobox):
active_iter = combobox.get_active_iter()
if not active_iter:
return
model = combobox.get_model()
if model:
chosen_machine = model.get (active_iter, 0)[0]
(distros_model, images_model) = \
self.loader.configuration.get_distro_and_images_models (chosen_machine)
self.distribution_combo.set_model (distros_model)
self.image_combo.set_model (images_model)
# Callbacks from the loader
def loader_success_cb (self, loader):
self.status_image.set_from_icon_name ("info",
gtk.ICON_SIZE_BUTTON)
self.status_image.show()
self.status_label.set_label ("Repository metadata successfully downloaded")
# Set the models on the combo boxes based on the models generated from
# the configuration that the loader has created
# We just need to set the machine here, that then determines the
# distro and image options. Cunning huh? :-)
self.configuration = self.loader.configuration
model = self.configuration.get_machines_model ()
self.machine_combo.set_model (model)
self.set_configurable (True)
def loader_error_cb (self, loader, message):
self.status_image.set_from_icon_name ("error",
gtk.ICON_SIZE_BUTTON)
self.status_image.show()
self.status_label.set_text ("Error downloading repository metadata")
for widget in self.conf_widgets:
widget.set_sensitive (False)
def clear_status_message (self):
self.status_image.hide()
self.status_label.set_label (
"""<i>Enter the repository location and press _Refresh</i>""")
def __init__ (self):
gtk.Dialog.__init__ (self)
# Cancel
self.add_button (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
# Build
button = gtk.Button ("_Build", None, True)
image = gtk.Image ()
image.set_from_stock (gtk.STOCK_EXECUTE, gtk.ICON_SIZE_BUTTON)
button.set_image (image)
self.add_action_widget (button, BuildSetupDialog.RESPONSE_BUILD)
button.show_all ()
# Pull in *just* the table from the Glade XML data.
gxml = gtk.glade.XML (os.path.dirname(__file__) + "/crumbs/puccho.glade",
root = "build_table")
table = gxml.get_widget ("build_table")
self.vbox.pack_start (table, True, False, 0)
# Grab all the widgets that we need to turn on/off when we refresh...
self.conf_widgets = []
self.conf_widgets += [gxml.get_widget ("machine_label")]
self.conf_widgets += [gxml.get_widget ("distribution_label")]
self.conf_widgets += [gxml.get_widget ("image_label")]
self.conf_widgets += [gxml.get_widget ("machine_combo")]
self.conf_widgets += [gxml.get_widget ("distribution_combo")]
self.conf_widgets += [gxml.get_widget ("image_combo")]
# Grab the status widgets
self.status_image = gxml.get_widget ("status_image")
self.status_label = gxml.get_widget ("status_label")
# Grab the refresh button and connect to the clicked signal
self.refresh_button = gxml.get_widget ("refresh_button")
self.refresh_button.connect ("clicked", self.refresh_button_clicked)
# Grab the location entry and connect to editable::changed
self.location_entry = gxml.get_widget ("location_entry")
self.location_entry.connect ("changed",
self.repository_entry_editable_changed)
# Grab the machine combo and hook onto the changed signal. This then
# allows us to populate the distro and image combos
self.machine_combo = gxml.get_widget ("machine_combo")
self.machine_combo.connect ("changed", self.machine_combo_changed)
# Setup the combo
cell = gtk.CellRendererText()
self.machine_combo.pack_start(cell, True)
self.machine_combo.add_attribute(cell, 'text', 0)
# Grab the distro and image combos. We need these to populate with
# models once the machine is chosen
self.distribution_combo = gxml.get_widget ("distribution_combo")
cell = gtk.CellRendererText()
self.distribution_combo.pack_start(cell, True)
self.distribution_combo.add_attribute(cell, 'text', 0)
self.image_combo = gxml.get_widget ("image_combo")
cell = gtk.CellRendererText()
self.image_combo.pack_start(cell, True)
self.image_combo.add_attribute(cell, 'text', 0)
# Put the default descriptive text in the status box
self.clear_status_message()
# Mark as non-configurable, this is just greys out the widgets the
# user can't yet use
self.configurable = False
self.set_configurable(False)
# Show the table
table.show_all ()
# The loader and some signals connected to it to update the status
# area
self.loader = MetaDataLoader()
self.loader.connect ("success", self.loader_success_cb)
self.loader.connect ("error", self.loader_error_cb)
def update_configuration (self):
""" A poorly named function but it updates the internal configuration
from the widgets. This can make that configuration concrete and can
thus be used for building """
# Extract the chosen machine from the combo
model = self.machine_combo.get_model()
active_iter = self.machine_combo.get_active_iter()
if (active_iter):
self.configuration.machine = model.get(active_iter, 0)[0]
# Extract the chosen distro from the combo
model = self.distribution_combo.get_model()
active_iter = self.distribution_combo.get_active_iter()
if (active_iter):
self.configuration.distro = model.get(active_iter, 0)[0]
# Extract the chosen image from the combo
model = self.image_combo.get_model()
active_iter = self.image_combo.get_active_iter()
if (active_iter):
self.configuration.image = model.get(active_iter, 0)[0]
# This function operates to pull events out from the event queue and then push
# them into the RunningBuild (which then drives the RunningBuild which then
# pushes through and updates the progress tree view.)
#
# TODO: Should be a method on the RunningBuild class
def event_handle_timeout (eventHandler, build):
# Consume as many messages as we can ...
event = eventHandler.getEvent()
while event:
build.handle_event (event)
event = eventHandler.getEvent()
return True
class MainWindow (gtk.Window):
# Callback that gets fired when the user hits a button in the
# BuildSetupDialog.
def build_dialog_box_response_cb (self, dialog, response_id):
conf = None
if (response_id == BuildSetupDialog.RESPONSE_BUILD):
dialog.update_configuration()
print(dialog.configuration.machine, dialog.configuration.distro, \
dialog.configuration.image)
conf = dialog.configuration
dialog.destroy()
if conf:
self.manager.do_build (conf)
def build_button_clicked_cb (self, button):
dialog = BuildSetupDialog ()
# For some unknown reason Dialog.run causes nice little deadlocks ... :-(
dialog.connect ("response", self.build_dialog_box_response_cb)
dialog.show()
def __init__ (self):
gtk.Window.__init__ (self)
# Pull in *just* the main vbox from the Glade XML data and then pack
# that inside the window
gxml = gtk.glade.XML (os.path.dirname(__file__) + "/crumbs/puccho.glade",
root = "main_window_vbox")
vbox = gxml.get_widget ("main_window_vbox")
self.add (vbox)
# Create the tree views for the build manager view and the progress view
self.build_manager_view = BuildManagerTreeView()
self.running_build_view = RunningBuildTreeView()
# Grab the scrolled windows that we put the tree views into
self.results_scrolledwindow = gxml.get_widget ("results_scrolledwindow")
self.progress_scrolledwindow = gxml.get_widget ("progress_scrolledwindow")
# Put the tree views inside ...
self.results_scrolledwindow.add (self.build_manager_view)
self.progress_scrolledwindow.add (self.running_build_view)
# Hook up the build button...
self.build_button = gxml.get_widget ("main_toolbutton_build")
self.build_button.connect ("clicked", self.build_button_clicked_cb)
# I'm not very happy about the current ownership of the RunningBuild. I have
# my suspicions that this object should be held by the BuildManager since we
# care about the signals in the manager
def running_build_succeeded_cb (running_build, manager):
# Notify the manager that a build has succeeded. This is necessary as part
# of the 'hack' that we use for making the row in the model / view
# representing the ongoing build change into a row representing the
# completed build. Since we know only one build can be running a time then
# we can handle this.
# FIXME: Refactor all this so that the RunningBuild is owned by the
# BuildManager. It can then hook onto the signals directly and drive
# interesting things it cares about.
manager.notify_build_succeeded ()
print("build succeeded")
def running_build_failed_cb (running_build, manager):
# As above
print("build failed")
manager.notify_build_failed ()
def main (server, eventHandler):
# Initialise threading...
gobject.threads_init()
gtk.gdk.threads_init()
main_window = MainWindow ()
main_window.show_all ()
# Set up the build manager stuff in general
builds_dir = os.path.join (os.getcwd(), "results")
manager = BuildManager (server, builds_dir)
main_window.build_manager_view.set_model (manager.model)
# Do the running build setup
running_build = RunningBuild ()
main_window.running_build_view.set_model (running_build.model)
running_build.connect ("build-succeeded", running_build_succeeded_cb,
manager)
running_build.connect ("build-failed", running_build_failed_cb, manager)
# We need to save the manager into the MainWindow so that the toolbar
# button can use it.
# FIXME: Refactor ?
main_window.manager = manager
# Use a timeout function for probing the event queue to find out if we
# have a message waiting for us.
gobject.timeout_add (200,
event_handle_timeout,
eventHandler,
running_build)
gtk.main()
| mit |
sjlangley/csgc-ndb | api.py | 1 | 22340 | """
"""
from datetime import *
import json
import logging
import math
import urllib
import webapp2
from models import *
from operator import itemgetter
RESULT_NAMES = [
'winner', 'runner_up', 'third_place', 'fourth_place', 'closest_pin_4th',
'drive_chip_5th', 'drive_chip_6th', 'closest_pin_9th', 'closest_pin_10th',
'closest_pin_16th', 'closest_pin_17th', 'longest_drive_0_18',
'longest_drive_19plus', 'longest_drive_60over',
]
# Club Management
class AddClub(webapp2.RequestHandler):
"""Add a new club to the database."""
def post(self):
"""Everything comes in via the post."""
club_name = self.request.get('clubNameInput')
courses = []
for i in xrange(2):
course_name = self.request.get('courseNameInput_' + str(i))
tees = []
for j in xrange(4):
tee_name = self.request.get('teeNameInput_'+str(i)+'_'+str(j))
tee_amcr = self.request.get('teeAmcrInput_'+str(i)+'_'+str(j))
slope = self.request.get('teeSlopeInput_'+str(i)+'_'+str(j))
dist = self.request.get('teeDistanceInput_'+str(i)+'_'+str(j))
par = self.request.get('teeParInput_'+str(i)+'_'+str(j))
if tee_name and par:
tees.append({
'name': tee_name,
'amcr': tee_amcr,
'slope': slope,
'distance': int(dist),
'par': int(par)
})
if tees:
courses.append({
'name': course_name,
'tees': tees
})
club_data = {
'name': club_name,
'courses': courses,
}
# failure message
heading = "Could Not Add New Club"
message = None
if not club_data['name']:
message = "Club Name Not Speficied"
elif not club_data['courses']:
message = "No courses specified"
else:
club = Club(name=club_data['name'])
club.put()
for course_data in club_data['courses']:
course = Course(name=course_data['name'], club=club.key)
course.put()
for tee_data in course_data['tees']:
tee = Tee(name=tee_data['name'],
course=course.key,
par=tee_data['par'])
if tee_data['amcr']:
tee.amcr = int(tee_data['amcr'])
if tee_data['slope']:
tee.slope = int(tee_data['slope'])
if tee_data['distance']:
tee.distance = int(tee_data['distance'])
tee.put()
if message:
query = urllib.urlencode({'heading': heading, 'message': message})
self.redirect('/failure?' + query)
else:
dest = '/list-club-general?club=%s&added' % club.key.urlsafe()
self.redirect(dest)
class ListClub(webapp2.RequestHandler):
"""List a club in the database."""
def get(self):
club_key = self.request.get('club_key', None)
if club_key:
self._get_club_by_key(ndb.Key(urlsafe=club_key))
else:
self._get_all_clubs()
def _get_club_by_key(self, club_key):
"""Return club details from a given key."""
club_future = club_key.get_async()
club = club_future.get_result()
result = [{
'name': club.name,
'key': club.key.urlsafe(),
'courses': self._get_courses_for_club(club.key)
}]
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
def _get_all_clubs(self):
result = []
club_query = Club.query().order(Club.name)
for club in club_query:
result.append({
'name': club.name,
'key': club.key.urlsafe(),
'courses': self._get_courses_for_club(club.key)
})
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
def _get_courses_for_club(self, club_key):
"""Return a list of courses for a given club."""
result = []
course_query = Course.query(Course.club == club_key)
for course in course_query:
result.append({
'name': course.name,
'tees': self._get_tees_for_course(course.key)
})
return result
def _get_tees_for_course(self, course_key):
"""Return the list of tees for a given course."""
result = []
tee_query = Tee.query(Tee.course == course_key)
for tee in tee_query:
result.append({
'name': tee.name,
'amcr': tee.amcr,
'slope': tee.slope,
'distance': tee.distance,
'par': tee.par,
'key': tee.key.urlsafe()
})
return result
# Member Management
class AddMember(webapp2.RequestHandler):
""" """
def post(self):
"""Add members to the database."""
form_size = int(self.request.get('form_size', default_value=10))
member_list = []
for i in xrange(form_size):
first_name = self.request.get('first_name' + str(i))
last_name = self.request.get('last_name' + str(i))
email = self.request.get('email' + str(i), default_value=None)
member_no = self.request.get('member_no' + str(i), default_value=0)
phone_number1 = self.request.get('phone_number1' + str(i), default_value=None)
phone_number2 = self.request.get('phone_number2' + str(i), default_value=None)
handicap = self.request.get('hc_' + str(i), default_value=None)
if first_name and last_name:
member = Member(first_name=first_name, last_name=last_name,
email=email, phone_number1=phone_number1,
phone_number2=phone_number2, member_no=member_no)
if handicap:
member.initial_handicap = float(handicap)
member_list.append(member)
member_keys = ndb.put_multi(member_list)
result = {
'member_keys': [key.urlsafe() for key in member_keys]
}
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
class ListMembers(webapp2.RequestHandler):
""" """
def get(self):
with_scores = self.request.get('with_scores', default_value=None)
show_inactive_str = self.request.get('show_inactive', default_value="True")
show_inactive = show_inactive_str in ['True', '1', 'yes']
@ndb.tasklet
def callback(member):
member_data = {
'key': member.key.urlsafe(),
'first_name': member.first_name,
'last_name' : member.last_name,
'email': member.email,
'member_no': member.member_no,
'phone_numbers': [
member.phone_number1,
member.phone_number2,
],
'last_match': _get_last_match_for_member(member.key),
'match_wins': _get_total_wins_for_member(member.key),
'handicap': _get_handicap_for_member(member.key),
}
if with_scores:
member_data['scores'] = _get_scores_for_member(member.key)
raise ndb.Return(member_data)
query = Member.query()
result = query.map(callback)
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
class ListMembersWithoutStatstics(webapp2.RequestHandler):
""" Faster when we just need names """
def get(self):
@ndb.tasklet
def callback(member):
member_data = {
'key': member.key.urlsafe(),
'first_name': member.first_name,
'last_name' : member.last_name,
'email': member.email,
'member_no': member.member_no,
'phone_numbers': [
member.phone_number1,
member.phone_number2,
],
}
raise ndb.Return(member_data)
query = Member.query()
result = query.map(callback)
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
class GetMember(webapp2.RequestHandler):
def get(self):
member_key = self.request.get('member_key', default_value=None)
key = ndb.Key(urlsafe=member_key)
member_future = key.get_async()
member = member_future.get_result()
result = {
'first_name': member.first_name,
'last_name': member.last_name,
'nick_name': member.nick_name,
'email': member.email,
'member_no': member.member_no,
'phone_number1': member.phone_number1,
'phone_number2': member.phone_number2,
'initial_handicap': member.initial_handicap,
}
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
# Score Management
class GetScores(webapp2.RequestHandler):
"""Retrieves all of the scores for a single member."""
def get(self):
member_key = self.request.get('member_key', default_value=None)
key = ndb.Key(urlsafe=member_key)
member_future = key.get_async()
member = member_future.get_result()
result = {
'scores': _get_scores_for_member(member.key),
'handicap': _get_handicap_for_member(member.key),
'wins': _get_total_wins_for_member(member.key),
}
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
class DeleteScore(webapp2.RequestHandler):
""" """
def get(self):
pass
class UpdateScoreDates(webapp2.RequestHandler):
def post(self):
date = self.request.get('date')
date_object = datetime.strptime(date, '%Y-%m-%d')
match_date = date_object.date()
score_key_values = self.request.get_all('score_keys')
logging.debug('Score keys: %s', ','.join(score_key_values))
# score_keys = [ndb.Key(urlsafe=key) for key in score_key_values]
# scores = ndb.get_multi(score_keys)
# for score in scores:
# score.date = match_date
# ndb.put_multi(scores)
# Match Management
class AddMatch(webapp2.RequestHandler):
def post(self):
match_result = self._extract_match_results()
tee = ndb.Key(urlsafe=match_result['tee_key'])
date_object = datetime.strptime(match_result['date'], '%Y-%m-%d')
match_date = date_object.date()
scores = []
for score in match_result['scores']:
scores.append(
Score(member=ndb.Key(urlsafe=score['player_key']),
tee=tee,
date=match_date,
scratch=score['score'],
handicap=score['hc'],
nett=score['nett'],
points=score['pts'],
)
)
score_keys = ndb.put_multi(scores)
match = Match(date=match_date, tee=tee, scores=score_keys)
if match_result['winner'] and match_result['winner'] != 'none':
match.winner = ndb.Key(urlsafe=match_result['winner'])
if match_result['runner_up'] and match_result['runner_up'] != 'none':
match.runner_up = ndb.Key(urlsafe=match_result['runner_up'])
if match_result['third_place'] and match_result['third_place'] != 'none':
match.third_place = ndb.Key(urlsafe=match_result['third_place'])
if match_result['fourth_place'] and match_result['fourth_place'] != 'none':
match.fourth_place = ndb.Key(urlsafe=match_result['fourth_place'])
if match_result['closest_pin_4th'] and match_result['closest_pin_4th'] != 'none':
match.closest_pin_4th = ndb.Key(urlsafe=match_result['closest_pin_4th'])
if match_result['drive_chip_5th'] and match_result['drive_chip_5th'] != 'none':
match.drive_chip_5th = ndb.Key(urlsafe=match_result['drive_chip_5th'])
if match_result['drive_chip_6th'] and match_result['drive_chip_6th'] != 'none':
match.drive_chip_6th = ndb.Key(urlsafe=match_result['drive_chip_6th'])
if match_result['closest_pin_9th'] and match_result['closest_pin_9th'] != 'none':
match.closest_pin_9th = ndb.Key(urlsafe=match_result['closest_pin_9th'])
if match_result['closest_pin_10th'] and match_result['closest_pin_10th'] != 'none':
match.closest_pin_10th = ndb.Key(urlsafe=match_result['closest_pin_10th'])
if match_result['closest_pin_16th'] and match_result['closest_pin_16th'] != 'none':
match.closest_pin_16th = ndb.Key(urlsafe=match_result['closest_pin_16th'])
if match_result['closest_pin_17th'] and match_result['closest_pin_17th'] != 'none':
match.closest_pin_17th = ndb.Key(urlsafe=match_result['closest_pin_17th'])
if match_result['longest_drive_0_18'] and match_result['longest_drive_0_18'] != 'none':
match.longest_drive_0_18 = ndb.Key(urlsafe=match_result['longest_drive_0_18'])
if match_result['longest_drive_19plus'] and match_result['longest_drive_19plus'] != 'none':
match.longest_drive_19plus = ndb.Key(urlsafe=match_result['longest_drive_19plus'])
if match_result['longest_drive_60over'] and match_result['longest_drive_60over'] != 'none':
match.longest_drive_60over = ndb.Key(urlsafe=match_result['longest_drive_60over'])
match.put()
self.redirect('/show-match-result?match_key=%s' % match.key.urlsafe())
def _extract_match_results(self):
result = {
'tee_key': self.request.get('teeRadio'),
'date': self.request.get('game_date'),
'winner': self.request.get('winner'),
'runner_up': self.request.get('runner_up'),
'third_place': self.request.get('third_place'),
'fourth_place': self.request.get('fourth_place'),
'closest_pin_4th': self.request.get('closest_pin_4th'),
'drive_chip_5th': self.request.get('drive_chip_5th'),
'drive_chip_6th': self.request.get('drive_chip_6th'),
'closest_pin_9th': self.request.get('closest_pin_9th'),
'closest_pin_10th': self.request.get('closest_pin_10th'),
'closest_pin_16th': self.request.get('closest_pin_16th'),
'closest_pin_17th': self.request.get('closest_pin_17th'),
'longest_drive_0_18': self.request.get('longest_drive_0_18'),
'longest_drive_19plus': self.request.get('longest_drive_19plus'),
'longest_drive_60over': self.request.get('longest_drive_60over'),
}
scores = []
for i in xrange(30):
player_key = self.request.get('score_' + str(i), default_value='none')
score = self.request.get('score_value_' + str(i))
hc = self.request.get('hc_value_' + str(i))
nett = self.request.get('nett_value_' + str(i))
pts = self.request.get('pts_value_' + str(i))
if player_key != 'none':
scores.append({
'player_key': player_key,
'score': int(score),
'hc': int(hc),
'nett': int(nett),
'pts': int(pts),
})
result['scores'] = scores
return result
class GetMatch(webapp2.RequestHandler):
def get(self):
match_key = self.request.get('match_key')
if match_key:
result = self._return_detailed_match(match_key)
else:
result = self._return_match_summaries()
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
def _return_detailed_match(self, match_key):
key = ndb.Key(urlsafe=match_key)
match = key.get()
tee = _get_tee_by_key(match.tee)
scores = ndb.get_multi(match.scores)
result = {
'date': match.date.strftime('%Y-%m-%d'),
'score_count': len(match.scores),
'tee': {
'name': tee['name'],
'par': tee['par'],
},
}
score_data = []
for score in scores:
score_data.append({
'member': _get_member_by_key(score.member),
'scratch': score.scratch,
'nett': score.nett,
'points': score.points,
'handicap': score.handicap,
'date': score.date.strftime('%Y-%m-%d'),
'key': score.key.urlsafe(),
})
result['scores'] = score_data
prizes = []
for name in RESULT_NAMES:
prizes.append({
'name': name,
'winner': _get_member_by_key(getattr(match, name))
})
result['prizes'] = prizes
return result
def _return_match_summaries(self):
@ndb.tasklet
def callback(match):
tee = _get_tee_by_key(match.tee)
winner = _get_member_by_key(match.winner)
raise ndb.Return({
'match_key': match.key.urlsafe(),
'date': match.date.strftime('%Y-%m-%d'),
'tee' : {
'name': tee['name'],
'slope': tee['slope'],
'amcr': tee['amcr'],
'par': tee['par'],
},
'winner': _get_member_by_key(match.winner),
'runner_up': _get_member_by_key(match.runner_up),
})
matches = Match.query().order(-Match.date)
return matches.map(callback)
class DeleteMatch(webapp2.RequestHandler):
def get(self):
match_key = self.request.get('match_key')
result = {'error': False}
if match_key:
try:
self._do_match_delete(match_key)
except:
result = {'error': True}
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(result))
def _do_match_delete(self, match_key):
key = ndb.Key(urlsafe=match_key)
match = key.get()
scores = ndb.get_multi(match.scores)
keys = [s.key for s in scores]
keys.append(key)
self._tx_match_delete(keys)
@ndb.transactional(xg=True)
def _tx_match_delete(self, keys_list):
ndb.delete_multi(keys_list)
# Misc Functions
def _get_scores_for_member(member_key):
@ndb.tasklet
def callback(score):
raise ndb.Return({
'date': score.date.strftime('%Y-%m-%d'),
'handicap': score.handicap,
'scratch': score.scratch,
'nett': score.nett,
'tee': _get_tee_by_key(score.tee),
})
score_query = Score.query(Score.member == member_key).order(Score.date)
return score_query.map(callback)
@ndb.synctasklet
def _get_tee_by_key(tee_key):
tee = yield tee_key.get_async()
course = yield tee.course.get_async()
club = yield course.club.get_async()
raise ndb.Return({
'name': tee.name,
'course': {
'name': course.name,
'club': {
'name': club.name,
},
},
'slope': tee.slope,
'amcr': tee.amcr,
'par': tee.par,
})
@ndb.synctasklet
def _get_member_by_key(member_key):
member = None
if member_key:
member = yield member_key.get_async()
if not member:
logging.debug('Member was missing from database.')
if member_key:
logging.debug('Missing member key is %s.', member_key.urlsafe())
raise ndb.Return({
'first_name': member.first_name if member else '',
'last_name': member.last_name if member else '',
})
@ndb.synctasklet
def _get_last_match_for_member(member_key):
@ndb.tasklet
def callback(score):
raise ndb.Return({
'date': score.date.strftime('%Y-%m-%d'),
'handicap': score.handicap,
'scratch': score.scratch,
'nett': score.nett,
'points': score.points,
})
score = Score.query(Score.member == member_key).order(-Score.date)
r = yield score.map_async(callback, limit=1)
raise ndb.Return(r)
@ndb.synctasklet
def _get_total_wins_for_member(member_key):
"""How many times a member is marked as a 'winner'."""
count = yield Match.query(Match.winner == member_key).count_async()
raise ndb.Return(count)
def _get_handicap_for_member(member_key):
"""Get the handicap for a member."""
@ndb.tasklet
def callback(score):
tee = yield score.tee.get_async()
win = yield Match.query(ndb.AND(Match.winner == member_key,
Match.date == score.date)).count_async()
raise ndb.Return({
'date': score.date.strftime('%Y-%m-%d'),
'scratch': score.scratch,
'nett': score.nett,
'points': score.points,
'par': tee.par,
'slope': tee.slope,
'amcr': tee.amcr,
'win': win,
'used_for_handicap': False,
})
member_future = member_key.get_async()
scores = Score.query(Score.member == member_key)
adj_scores_future = scores.map_async(callback)
member = member_future.get_result()
adj_scores = adj_scores_future.get_result()
handicap = member.initial_handicap
average = 0.0
if adj_scores:
adj_scores = sorted(_calculate_differetntial(adj_scores),
key=itemgetter('date'),
reverse=True)
count = _get_scores_for_handicap(len(adj_scores))
first_twenty = adj_scores[:20]
the_rest = adj_scores[20:]
first_twenty = sorted(first_twenty, key=itemgetter('differential'))
total = 0.0
for i in xrange(count):
total += first_twenty[i]['differential']
first_twenty[i]['used_for_handicap'] = True
average = math.floor((total / count) * 10) / 10
# Multiplier needs to be applied to daily handicap, not GA handicap.
handicap = round(average * 0.93, 1)
adj_scores = first_twenty + the_rest
adj_scores = sorted(adj_scores, key=itemgetter('date'))
return {
'handicap': handicap,
'initial_handicap': member.initial_handicap,
'scores': adj_scores,
'average': average,
'total_scores_used': _get_scores_for_handicap(len(adj_scores)),
}
def _calculate_differetntial(scores):
"""Calculate the differential for a list of scores."""
result = []
for score in scores:
if not score['slope']:
score['slope'] = 113
# ESC adjustment is the maximum score that's possible for the round, either
# - 2x18 = 36 if double bogey is the max.
# - 3x18 = 54 if triple bogey is he max.
# In 2020 Australian golf introduced max mens handicap of 54, so we will
# use triple bogey as the max.
# ESC is how many shots above or below par the player was for the round.
esc_adjustment = min(54, score['scratch'] - score['amcr'])
score['esc_adjustment'] = esc_adjustment
# The differential takes into account how hard the course was. Here we
# need to clip to the max handicap, which is now 54.0 since 2020.
differential = min((esc_adjustment * 113) / score['slope'], 54.0)
score['differential'] = round(differential, 2)
result.append(score)
return result
def _get_scores_for_handicap(score_count):
""" From http://www.golf.org.au/howtocalculateahandicap """
if score_count < 7:
return 1
if score_count < 9:
return 2
if score_count < 11:
return 3
if score_count < 13:
return 4
if score_count < 15:
return 5
if score_count < 17:
return 6
if score_count < 19:
return 7
return 8
app = webapp2.WSGIApplication([
('/api/add-club', AddClub),
('/api/list-clubs', ListClub),
('/api/add-members', AddMember),
('/api/list-members', ListMembers),
('/api/list-members-without-statistics', ListMembersWithoutStatstics),
('/api/get-member', GetMember),
('/api/get-scores', GetScores),
('/api/delete-score', DeleteScore),
('/api/update-score-dates', UpdateScoreDates),
('/api/add-match', AddMatch),
('/api/get-match', GetMatch),
('/api/delete-match', DeleteMatch),
], debug=True)
| apache-2.0 |
ngosang/adarkroom | tools/po2js.py | 36 | 1480 | #!/usr/bin/python
"""convert .po to .js file."""
import json
import optparse
import os
import polib
import re
import sys
parser = optparse.OptionParser(usage="usage: %prog [options] pofile...")
parser.add_option("--callback", default="_.setTranslation", dest="callback",
help="callback function to call with data")
parser.add_option("--quiet", action="store_false", default=True,
dest="verbose", help="don't print status messages to stdout")
(options, args) = parser.parse_args()
if args is None or len(args) == 0:
print("ERROR: you must specify at least one po file to translate")
sys.exit(1)
paramFix = re.compile("(\\(([0-9])\\))")
for srcfile in args:
destfile = os.path.splitext(srcfile)[0] + ".js"
if options.verbose:
print("INFO: converting %s to %s" % (srcfile, destfile))
xlate_map = {}
po = polib.pofile(srcfile, autodetect_encoding=False,
encoding="utf-8", wrapwidth=-1)
for entry in po:
if entry.obsolete or entry.msgstr == '' or entry.msgstr == entry.msgid:
continue
xlate_map[entry.msgid] = entry.msgstr
dest = open(destfile, "w")
dest.write(options.callback)
dest.write("(")
encoder = json.JSONEncoder()
for part in encoder.iterencode(xlate_map):
if part.startswith('"function('):
dest.write(part[1:-1])
else:
dest.write(part)
dest.write(");\n")
dest.close()
| mpl-2.0 |
gauribhoite/personfinder | app/unidecode/x068.py | 252 | 4674 | data = (
'Zhi ', # 0x00
'Liu ', # 0x01
'Mei ', # 0x02
'Hoy ', # 0x03
'Rong ', # 0x04
'Zha ', # 0x05
'[?] ', # 0x06
'Biao ', # 0x07
'Zhan ', # 0x08
'Jie ', # 0x09
'Long ', # 0x0a
'Dong ', # 0x0b
'Lu ', # 0x0c
'Sayng ', # 0x0d
'Li ', # 0x0e
'Lan ', # 0x0f
'Yong ', # 0x10
'Shu ', # 0x11
'Xun ', # 0x12
'Shuan ', # 0x13
'Qi ', # 0x14
'Zhen ', # 0x15
'Qi ', # 0x16
'Li ', # 0x17
'Yi ', # 0x18
'Xiang ', # 0x19
'Zhen ', # 0x1a
'Li ', # 0x1b
'Su ', # 0x1c
'Gua ', # 0x1d
'Kan ', # 0x1e
'Bing ', # 0x1f
'Ren ', # 0x20
'Xiao ', # 0x21
'Bo ', # 0x22
'Ren ', # 0x23
'Bing ', # 0x24
'Zi ', # 0x25
'Chou ', # 0x26
'Yi ', # 0x27
'Jie ', # 0x28
'Xu ', # 0x29
'Zhu ', # 0x2a
'Jian ', # 0x2b
'Zui ', # 0x2c
'Er ', # 0x2d
'Er ', # 0x2e
'You ', # 0x2f
'Fa ', # 0x30
'Gong ', # 0x31
'Kao ', # 0x32
'Lao ', # 0x33
'Zhan ', # 0x34
'Li ', # 0x35
'Yin ', # 0x36
'Yang ', # 0x37
'He ', # 0x38
'Gen ', # 0x39
'Zhi ', # 0x3a
'Chi ', # 0x3b
'Ge ', # 0x3c
'Zai ', # 0x3d
'Luan ', # 0x3e
'Fu ', # 0x3f
'Jie ', # 0x40
'Hang ', # 0x41
'Gui ', # 0x42
'Tao ', # 0x43
'Guang ', # 0x44
'Wei ', # 0x45
'Kuang ', # 0x46
'Ru ', # 0x47
'An ', # 0x48
'An ', # 0x49
'Juan ', # 0x4a
'Yi ', # 0x4b
'Zhuo ', # 0x4c
'Ku ', # 0x4d
'Zhi ', # 0x4e
'Qiong ', # 0x4f
'Tong ', # 0x50
'Sang ', # 0x51
'Sang ', # 0x52
'Huan ', # 0x53
'Jie ', # 0x54
'Jiu ', # 0x55
'Xue ', # 0x56
'Duo ', # 0x57
'Zhui ', # 0x58
'Yu ', # 0x59
'Zan ', # 0x5a
'Kasei ', # 0x5b
'Ying ', # 0x5c
'Masu ', # 0x5d
'[?] ', # 0x5e
'Zhan ', # 0x5f
'Ya ', # 0x60
'Nao ', # 0x61
'Zhen ', # 0x62
'Dang ', # 0x63
'Qi ', # 0x64
'Qiao ', # 0x65
'Hua ', # 0x66
'Kuai ', # 0x67
'Jiang ', # 0x68
'Zhuang ', # 0x69
'Xun ', # 0x6a
'Suo ', # 0x6b
'Sha ', # 0x6c
'Zhen ', # 0x6d
'Bei ', # 0x6e
'Ting ', # 0x6f
'Gua ', # 0x70
'Jing ', # 0x71
'Bo ', # 0x72
'Ben ', # 0x73
'Fu ', # 0x74
'Rui ', # 0x75
'Tong ', # 0x76
'Jue ', # 0x77
'Xi ', # 0x78
'Lang ', # 0x79
'Liu ', # 0x7a
'Feng ', # 0x7b
'Qi ', # 0x7c
'Wen ', # 0x7d
'Jun ', # 0x7e
'Gan ', # 0x7f
'Cu ', # 0x80
'Liang ', # 0x81
'Qiu ', # 0x82
'Ting ', # 0x83
'You ', # 0x84
'Mei ', # 0x85
'Bang ', # 0x86
'Long ', # 0x87
'Peng ', # 0x88
'Zhuang ', # 0x89
'Di ', # 0x8a
'Xuan ', # 0x8b
'Tu ', # 0x8c
'Zao ', # 0x8d
'Ao ', # 0x8e
'Gu ', # 0x8f
'Bi ', # 0x90
'Di ', # 0x91
'Han ', # 0x92
'Zi ', # 0x93
'Zhi ', # 0x94
'Ren ', # 0x95
'Bei ', # 0x96
'Geng ', # 0x97
'Jian ', # 0x98
'Huan ', # 0x99
'Wan ', # 0x9a
'Nuo ', # 0x9b
'Jia ', # 0x9c
'Tiao ', # 0x9d
'Ji ', # 0x9e
'Xiao ', # 0x9f
'Lu ', # 0xa0
'Huan ', # 0xa1
'Shao ', # 0xa2
'Cen ', # 0xa3
'Fen ', # 0xa4
'Song ', # 0xa5
'Meng ', # 0xa6
'Wu ', # 0xa7
'Li ', # 0xa8
'Li ', # 0xa9
'Dou ', # 0xaa
'Cen ', # 0xab
'Ying ', # 0xac
'Suo ', # 0xad
'Ju ', # 0xae
'Ti ', # 0xaf
'Jie ', # 0xb0
'Kun ', # 0xb1
'Zhuo ', # 0xb2
'Shu ', # 0xb3
'Chan ', # 0xb4
'Fan ', # 0xb5
'Wei ', # 0xb6
'Jing ', # 0xb7
'Li ', # 0xb8
'Bing ', # 0xb9
'Fumoto ', # 0xba
'Shikimi ', # 0xbb
'Tao ', # 0xbc
'Zhi ', # 0xbd
'Lai ', # 0xbe
'Lian ', # 0xbf
'Jian ', # 0xc0
'Zhuo ', # 0xc1
'Ling ', # 0xc2
'Li ', # 0xc3
'Qi ', # 0xc4
'Bing ', # 0xc5
'Zhun ', # 0xc6
'Cong ', # 0xc7
'Qian ', # 0xc8
'Mian ', # 0xc9
'Qi ', # 0xca
'Qi ', # 0xcb
'Cai ', # 0xcc
'Gun ', # 0xcd
'Chan ', # 0xce
'Te ', # 0xcf
'Fei ', # 0xd0
'Pai ', # 0xd1
'Bang ', # 0xd2
'Pou ', # 0xd3
'Hun ', # 0xd4
'Zong ', # 0xd5
'Cheng ', # 0xd6
'Zao ', # 0xd7
'Ji ', # 0xd8
'Li ', # 0xd9
'Peng ', # 0xda
'Yu ', # 0xdb
'Yu ', # 0xdc
'Gu ', # 0xdd
'Hun ', # 0xde
'Dong ', # 0xdf
'Tang ', # 0xe0
'Gang ', # 0xe1
'Wang ', # 0xe2
'Di ', # 0xe3
'Xi ', # 0xe4
'Fan ', # 0xe5
'Cheng ', # 0xe6
'Zhan ', # 0xe7
'Qi ', # 0xe8
'Yuan ', # 0xe9
'Yan ', # 0xea
'Yu ', # 0xeb
'Quan ', # 0xec
'Yi ', # 0xed
'Sen ', # 0xee
'Ren ', # 0xef
'Chui ', # 0xf0
'Leng ', # 0xf1
'Qi ', # 0xf2
'Zhuo ', # 0xf3
'Fu ', # 0xf4
'Ke ', # 0xf5
'Lai ', # 0xf6
'Zou ', # 0xf7
'Zou ', # 0xf8
'Zhuo ', # 0xf9
'Guan ', # 0xfa
'Fen ', # 0xfb
'Fen ', # 0xfc
'Chen ', # 0xfd
'Qiong ', # 0xfe
'Nie ', # 0xff
)
| apache-2.0 |
yejingxin/kaggle-ndsb | configurations/featharalick_pl_blend5_convroll4_doublescale_fs5_no_dropout_33_66.py | 6 | 2932 | import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import tmp_dnn
import tta
features = [
# "hu",
# "tutorial",
"haralick",
# "aaronmoments",
# "lbp",
# "pftas",
# "zernike_moments",
# "image_size",
]
batch_size = 128
chunk_size = 32768
num_chunks_train = 240
momentum = 0.9
learning_rate_schedule = {
0: 0.001,
100: 0.0001,
200: 0.00001,
}
validate_every = 40
save_every = 40
sdir = "/mnt/storage/users/avdnoord/git/kaggle-plankton/predictions/"
train_pred_file = sdir+"train--pl_blend5_convroll4_doublescale_fs5_no_dropout_33_66--pl_blend5_convroll4_doublescale_fs5_no_dropout_33_66-paard-20150313-180958--avg-probs.npy"
valid_pred_file = sdir+"valid--pl_blend5_convroll4_doublescale_fs5_no_dropout_33_66--pl_blend5_convroll4_doublescale_fs5_no_dropout_33_66-paard-20150313-180958--avg-probs.npy"
test_pred_file = sdir+"test--pl_blend5_convroll4_doublescale_fs5_no_dropout_33_66--pl_blend5_convroll4_doublescale_fs5_no_dropout_33_66-paard-20150313-180958--avg-probs.npy"
data_loader = load.PredictionsWithFeaturesDataLoader(
features = features,
train_pred_file=train_pred_file,
valid_pred_file=valid_pred_file,
test_pred_file=test_pred_file,
num_chunks_train=num_chunks_train,
chunk_size=chunk_size)
create_train_gen = lambda: data_loader.create_random_gen()
create_eval_train_gen = lambda: data_loader.create_fixed_gen("train")
create_eval_valid_gen = lambda: data_loader.create_fixed_gen("valid")
create_eval_test_gen = lambda: data_loader.create_fixed_gen("test")
def build_model():
l0 = nn.layers.InputLayer((batch_size, data.num_classes))
l0_size = nn.layers.InputLayer((batch_size, 52))
l1_size = nn.layers.DenseLayer(l0_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l2_size = nn.layers.DenseLayer(l1_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l3_size = nn.layers.DenseLayer(l2_size, num_units=data.num_classes, W=nn_plankton.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=None)
l1 = nn_plankton.NonlinLayer(l0, T.log)
ltot = nn.layers.ElemwiseSumLayer([l1, l3_size])
# norm_by_sum = lambda x: x / x.sum(1).dimshuffle(0, "x")
lout = nn_plankton.NonlinLayer(ltot, nonlinearity=T.nnet.softmax)
return [l0, l0_size], lout
def build_objective(l_ins, l_out):
reg_param = 0.0002
alpha = 0. # 0 -> L2 1-> L1
print "regu", reg_param, alpha
# lambda_reg = 0.005
params = nn.layers.get_all_non_bias_params(l_out)
# reg_term = sum(T.sum(p**2) for p in params)
L2 = sum(T.sum(p**2) for p in params)
L1 = sum(T.sum(T.abs_(p)) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + reg_param*(alpha * L1 + (1-alpha) * L2)
return nn.objectives.Objective(l_out, loss_function=loss) | mit |
gnu3ra/SCC15HPCRepast | INSTALLATION/boost_1_54_0/libs/geometry/doc/index/make_qbk.py | 7 | 1807 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ===========================================================================
# Copyright (c) 2011-2012 Barend Gehrels, Amsterdam, the Netherlands.
# Copyright (c) 2011-2013 Adam Wulkiewicz, Lodz, Poland.
#
# Use, modification and distribution is subject to the Boost Software License,
# Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)9
# ============================================================================
import os, sys
cmd = "doxygen_xml2qbk"
cmd = cmd + " --xml xml/%s.xml"
cmd = cmd + " --start_include boost/"
cmd = cmd + " --output_style alt"
cmd = cmd + " > generated/%s.qbk"
os.system("doxygen Doxyfile")
os.system(cmd % ("classboost_1_1geometry_1_1index_1_1rtree", "rtree"))
os.system(cmd % ("group__rtree__functions", "rtree_functions"))
os.system(cmd % ("structboost_1_1geometry_1_1index_1_1linear", "rtree_linear"))
os.system(cmd % ("structboost_1_1geometry_1_1index_1_1quadratic", "rtree_quadratic"))
os.system(cmd % ("structboost_1_1geometry_1_1index_1_1rstar", "rtree_rstar"))
os.system(cmd % ("classboost_1_1geometry_1_1index_1_1dynamic__linear", "rtree_dynamic_linear"))
os.system(cmd % ("classboost_1_1geometry_1_1index_1_1dynamic__quadratic", "rtree_dynamic_quadratic"))
os.system(cmd % ("classboost_1_1geometry_1_1index_1_1dynamic__rstar", "rtree_dynamic_rstar"))
os.system(cmd % ("structboost_1_1geometry_1_1index_1_1indexable", "indexable"))
os.system(cmd % ("structboost_1_1geometry_1_1index_1_1equal__to", "equal_to"))
os.system(cmd % ("group__predicates", "predicates"))
#os.system(cmd % ("group__nearest__relations", "nearest_relations"))
os.system(cmd % ("group__adaptors", "adaptors"))
os.system(cmd % ("group__inserters", "inserters"))
#os.system("b2")
| bsd-3-clause |
leiferikb/bitpop | src/third_party/trace-viewer/third_party/pywebsocket/src/mod_pywebsocket/_stream_base.py | 43 | 5176 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base stream class.
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
from mod_pywebsocket import util
# Exceptions
class ConnectionTerminatedException(Exception):
"""This exception will be raised when a connection is terminated
unexpectedly.
"""
pass
class InvalidFrameException(ConnectionTerminatedException):
"""This exception will be raised when we received an invalid frame we
cannot parse.
"""
pass
class BadOperationException(Exception):
"""This exception will be raised when send_message() is called on
server-terminated connection or receive_message() is called on
client-terminated connection.
"""
pass
class UnsupportedFrameException(Exception):
"""This exception will be raised when we receive a frame with flag, opcode
we cannot handle. Handlers can just catch and ignore this exception and
call receive_message() again to continue processing the next frame.
"""
pass
class InvalidUTF8Exception(Exception):
"""This exception will be raised when we receive a text frame which
contains invalid UTF-8 strings.
"""
pass
class StreamBase(object):
"""Base stream class."""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
self._logger = util.get_class_logger(self)
self._request = request
def _read(self, length):
"""Reads length bytes from connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
bytes = self._request.connection.read(length)
if not bytes:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Peer (%r) closed connection' %
(length, (self._request.connection.remote_addr,)))
return bytes
def _write(self, bytes):
"""Writes given bytes to connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
"""
try:
self._request.connection.write(bytes)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._request.connection.remote_addr,),
e)
raise
def receive_bytes(self, length):
"""Receives multiple bytes. Retries read when we couldn't receive the
specified amount.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
bytes = []
while length > 0:
new_bytes = self._read(length)
bytes.append(new_bytes)
length -= len(new_bytes)
return ''.join(bytes)
def _read_until(self, delim_char):
"""Reads bytes until we encounter delim_char. The result will not
contain delim_char.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
bytes = []
while True:
ch = self._read(1)
if ch == delim_char:
break
bytes.append(ch)
return ''.join(bytes)
# vi:sts=4 sw=4 et
| gpl-3.0 |
arjen75/icecold-kernel | Documentation/target/tcm_mod_builder.py | 3119 | 42754 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
holtzermann17/key-mon | src/keymon/shaped_window.py | 15 | 3172 | #!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a shaped window to show mouse events.
Thanks to mathias.gumz for the original code.
"""
import gobject
import gtk
import lazy_pixbuf_creator
class ShapedWindow(gtk.Window):
"""Create a window shaped as fname."""
def __init__(self, fname, scale=1.0, timeout=0.2):
gtk.Window.__init__(self)
self.connect('size-allocate', self._on_size_allocate)
self.set_decorated(False)
self.set_keep_above(True)
self.set_accept_focus(False)
self.scale = scale
self.shown = False
self.timeout = timeout
self.timeout_timer = None
self.name_fnames = {
'mouse' : [fname],
}
self.pixbufs = lazy_pixbuf_creator.LazyPixbufCreator(self.name_fnames,
self.scale)
self.pixbuf = self.pixbufs.get('mouse')
self.resize(self.pixbuf.get_width(), self.pixbuf.get_height())
# a pixmap widget to contain the pixmap
self.image = gtk.Image()
bitmap, self.mask = self.pixbuf.render_pixmap_and_mask()
self.image.set_from_pixmap(bitmap, self.mask)
self.image.show()
self.add(self.image)
def _on_size_allocate(self, win, unused_allocation):
"""Called when first allocated."""
# Set the window shape
win.shape_combine_mask(self.mask, 0, 0)
win.set_property('skip-taskbar-hint', True)
if not win.is_composited():
print 'Unable to fade the window'
else:
win.set_opacity(0.5)
def center_on_cursor(self, x=None, y=None):
if x is None or y is None:
root = gtk.gdk.screen_get_default().get_root_window()
x, y, _ = root.get_pointer()
w, h = self.get_size()
new_x, new_y = x - w/2, y - h/2
pos = self.get_position()
if pos[0] != new_x or pos[1] != new_y:
self.move(new_x, new_y)
self.show()
def show(self):
"""Show this mouse indicator and ignore awaiting fade away request."""
if self.timeout_timer and self.shown:
# There is a fade away request, ignore it
gobject.source_remove(self.timeout_timer)
self.timeout_timer = None
# This method only is called when mouse is pressed, so there will be a
# release and fade_away call, no need to set up another timer.
super(ShapedWindow, self).show()
def maybe_show(self):
if self.shown or not self.timeout_timer:
return
self.shown = True
self.show()
def fade_away(self):
"""Make the window fade in a little bit."""
# TODO this isn't doing any fading out
self.shown = False
self.timeout_timer = gobject.timeout_add(int(self.timeout * 1000), self.hide)
| apache-2.0 |
adoosii/edx-platform | common/lib/xmodule/xmodule/tests/xml/test_policy.py | 248 | 1262 | """
Tests that policy json files import correctly when loading XML
"""
from nose.tools import assert_equals, assert_raises # pylint: disable=no-name-in-module
from xmodule.tests.xml.factories import CourseFactory
from xmodule.tests.xml import XModuleXmlImportTest
class TestPolicy(XModuleXmlImportTest):
"""
Tests that policy json files import correctly when loading xml
"""
def test_no_attribute_mapping(self):
# Policy files are json, and thus the values aren't passed through 'deserialize_field'
# Therefor, the string 'null' is passed unchanged to the Float field, which will trigger
# a ValueError
with assert_raises(ValueError):
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': 'null'}))
# Trigger the exception by looking at the imported data
course.days_early_for_beta # pylint: disable=pointless-statement
def test_course_policy(self):
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': None}))
assert_equals(None, course.days_early_for_beta)
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': 9}))
assert_equals(9, course.days_early_for_beta)
| agpl-3.0 |
paulmcquad/projecteuler | 500-600/problem587.py | 1 | 5271 | #
# Solution to Project Euler problem 587
# Copyright (c) Project Nayuki. All rights reserved.
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
import itertools, math
# Start by defining the coordinate system in a convenient way. The position and scale of the diagram don't
# matter because we only care about the ratio of areas, not the absolute areas. So, let the bottom left
# of the diagram be the origin (x = 0, y = 0), and let each circle to have a radius of 1.
#
# The leftmost circle is centered at (1, 1), and its equation is (x - 1)^2 + (y - 1)^2 = 1.
# The diagonal line has slope = s = 1 / n (for any positive n), and the line's equation is y = s * x.
# From basic geometry, the area of the blue L-section is 1 - pi / 4.
#
# Let's find the x-coordinate where the diagonal line intersects the first circle.
# Take the equation of the circle and substitute y = s * x for the line:
#
# (x - 1)^2 + (s*x - 1)^2 = 1.
# (x^2 - 2x + 1) + (s^2 x^2 - 2s*x + 1) = 1.
# (1 + s^2)x^2 + (-2 - 2s)x + 1 = 0.
#
# We can apply the quadratic formula with a = 1 + s^2, b = -2 - 2s, c = 1. There are two solutions for x,
# and we only want the smaller value. Thus, let X = (-b - sqrt(b^2 - 4ac)) / (2a). Or equivalently
# with more numerical stability (using the Citardauq formula), X = (2c) / (-b + sqrt(b^2 - 4ac)).
#
# The orange concave triangle can be divided into two parts by a vertical line:
#
# - The left part is a proper triangle, whose area is easily seen as x * y / 2 = X^2 * s / 2.
#
# - The right part is the region between the circle and the baseline. Let's re-express
# the circle's equation in terms of y, and only keep the lower semicircle:
#
# (x - 1)^2 + (y - 1)^2 = 1.
# (y - 1)^2 = 1 - (x - 1)^2.
# y - 1 = -sqrt(1 - (x - 1)^2).
# y = 1 - sqrt(1 - (x - 1)^2).
# y = 1 - sqrt(1 - (x^2 - 2x + 1)).
# y = 1 - sqrt(2x - x^2).
#
# Now, the indefinite integral of f(x) = 1 - sqrt(2x - x^2) with respect to x
# is F(x) = (x - 1) - [sqrt(2x - x^2) * (x - 1) + asin(x - 1)] / 2.
# Finding this integral is not obvious, but verifying it is a fairly straightforward
# mechanical procedure involving differentiation and simplification.
#
# The area of the right part is the integral of f(x) for x from X to 1, because the start is
# the x-coordinate where line meets the circle, and the end is where the circle meets the baseline.
# Hence the area is equal to F(1) - F(X).
#
# All in all, for any given n, the area of the orange concave triangle is X^2 * s / 2 + F(1) - F(X).
# The rest of the algorithm is a brute-force search with n = 1, 2, 3, ... until the ratio condition is met.
#
# Additional notes:
# - Intuitively, as n increases and the slope gets smaller, the area of the orange concave triangle should strictly
# decrease. This statement is in fact true, but proving it involves a big pile of differentiation and algebra.
# 0. We need to show that X (which is the x-coordinate of the line-circle intersection) increases with n.
# We'd differentiate X with respect to n, and get an expression that is always positive for any positive n.
# 1. Because X increases with n, the area of the right part, with its always-positive integrand, must decrease.
# 2. As for the left part, we'd differentiate X^2 * s / 2 with respect to n, and get a huge messy formula.
# It turns out this formula is negative for all n > 1. Hence the area of this triangle also decreases with n.
# After we prove that increasing n leads to decreasing orange area, we could use
# binary search to find the minimum value of n needed to meet the ratio requirement.
# - The use of floating-point arithmetic, for basic arithmetic operations (+ - * /) and irrational functions (sqrt,
# asin) alike, is inherently difficult or impossible to prove the correctness of. Furthermore, the algorithms
# for irrational functions are hard to understand and beyond the scope of this problem, and the error bounds for
# all operations are difficult to reason about.
# It should be possible to solve this particular problem using only integer arithmetic in a provably correct way.
# The basic idea would be to round the result of each operation both down and up to an integer fraction,
# keep track of pessimistic intervals that are guaranteed to contain the true value, accept a comparison only
# if the intervals don't overlap, and recompute everything at a higher precision if a comparison is inconclusive.
# Note: Because it doesn't seem easy to compute pi and asin(), it might be better to
# approximate integrals directly using the Darboux definition of lower and upper sums.
def compute():
# The indefinite integral of (1 - sqrt(2x - x^2)) dx.
def integral(x):
t = x - 1.0
return t - (math.sqrt(x * (2.0 - x)) * t + math.asin(t)) / 2.0
lsectionarea = 1.0 - math.pi / 4.0
for i in itertools.count(1):
slope = 1.0 / i
a = slope**2 + 1.0
b = -2.0 * (slope + 1.0)
c = 1.0
x = (2.0 * c) / (-b + math.sqrt(b * b - 4 * a * c))
concavetrianglearea = (x**2 * slope / 2) + (integral(1.0) - integral(x))
if concavetrianglearea / lsectionarea < 0.001:
return str(i)
if __name__ == "__main__":
print(compute())
| gpl-3.0 |
kyunghyuncho/GroundHog | experiments/nmt/sample.py | 10 | 11073 | #!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
import experiments.nmt
from experiments.nmt import\
RNNEncoderDecoder,\
prototype_state,\
parse_input
from experiments.nmt.numpy_compat import argpartition
logger = logging.getLogger(__name__)
class Timer(object):
def __init__(self):
self.total = 0
def start(self):
self.start_time = time.time()
def finish(self):
self.total += time.time() - self.start_time
class BeamSearch(object):
def __init__(self, enc_dec):
self.enc_dec = enc_dec
state = self.enc_dec.state
self.eos_id = state['null_sym_target']
self.unk_id = state['unk_sym_target']
def compile(self):
self.comp_repr = self.enc_dec.create_representation_computer()
self.comp_init_states = self.enc_dec.create_initializers()
self.comp_next_probs = self.enc_dec.create_next_probs_computer()
self.comp_next_states = self.enc_dec.create_next_states_computer()
def search(self, seq, n_samples, ignore_unk=False, minlen=1):
c = self.comp_repr(seq)[0]
states = map(lambda x : x[None, :], self.comp_init_states(c))
dim = states[0].shape[1]
num_levels = len(states)
fin_trans = []
fin_costs = []
trans = [[]]
costs = [0.0]
for k in range(3 * len(seq)):
if n_samples == 0:
break
# Compute probabilities of the next words for
# all the elements of the beam.
beam_size = len(trans)
last_words = (numpy.array(map(lambda t : t[-1], trans))
if k > 0
else numpy.zeros(beam_size, dtype="int64"))
log_probs = numpy.log(self.comp_next_probs(c, k, last_words, *states)[0])
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:,self.unk_id] = -numpy.inf
# TODO: report me in the paper!!!
if k < minlen:
log_probs[:,self.eos_id] = -numpy.inf
# Find the best options by calling argpartition of flatten array
next_costs = numpy.array(costs)[:, None] - log_probs
flat_next_costs = next_costs.flatten()
best_costs_indices = argpartition(
flat_next_costs.flatten(),
n_samples)[:n_samples]
# Decypher flatten indices
voc_size = log_probs.shape[1]
trans_indices = best_costs_indices / voc_size
word_indices = best_costs_indices % voc_size
costs = flat_next_costs[best_costs_indices]
# Form a beam for the next iteration
new_trans = [[]] * n_samples
new_costs = numpy.zeros(n_samples)
new_states = [numpy.zeros((n_samples, dim), dtype="float32") for level
in range(num_levels)]
inputs = numpy.zeros(n_samples, dtype="int64")
for i, (orig_idx, next_word, next_cost) in enumerate(
zip(trans_indices, word_indices, costs)):
new_trans[i] = trans[orig_idx] + [next_word]
new_costs[i] = next_cost
for level in range(num_levels):
new_states[level][i] = states[level][orig_idx]
inputs[i] = next_word
new_states = self.comp_next_states(c, k, inputs, *new_states)
# Filter the sequences that end with end-of-sequence character
trans = []
costs = []
indices = []
for i in range(n_samples):
if new_trans[i][-1] != self.enc_dec.state['null_sym_target']:
trans.append(new_trans[i])
costs.append(new_costs[i])
indices.append(i)
else:
n_samples -= 1
fin_trans.append(new_trans[i])
fin_costs.append(new_costs[i])
states = map(lambda x : x[indices], new_states)
# Dirty tricks to obtain any translation
if not len(fin_trans):
if ignore_unk:
logger.warning("Did not manage without UNK")
return self.search(seq, n_samples, False, minlen)
elif n_samples < 500:
logger.warning("Still no translations: try beam size {}".format(n_samples * 2))
return self.search(seq, n_samples * 2, False, minlen)
else:
logger.error("Translation failed")
fin_trans = numpy.array(fin_trans)[numpy.argsort(fin_costs)]
fin_costs = numpy.array(sorted(fin_costs))
return fin_trans, fin_costs
def indices_to_words(i2w, seq):
sen = []
for k in xrange(len(seq)):
if i2w[seq[k]] == '<eol>':
break
sen.append(i2w[seq[k]])
return sen
def sample(lm_model, seq, n_samples,
sampler=None, beam_search=None,
ignore_unk=False, normalize=False,
alpha=1, verbose=False):
if beam_search:
sentences = []
trans, costs = beam_search.search(seq, n_samples,
ignore_unk=ignore_unk, minlen=len(seq) / 2)
if normalize:
counts = [len(s) for s in trans]
costs = [co / cn for co, cn in zip(costs, counts)]
for i in range(len(trans)):
sen = indices_to_words(lm_model.word_indxs, trans[i])
sentences.append(" ".join(sen))
for i in range(len(costs)):
if verbose:
print "{}: {}".format(costs[i], sentences[i])
return sentences, costs, trans
elif sampler:
sentences = []
all_probs = []
costs = []
values, cond_probs = sampler(n_samples, 3 * (len(seq) - 1), alpha, seq)
for sidx in xrange(n_samples):
sen = []
for k in xrange(values.shape[0]):
if lm_model.word_indxs[values[k, sidx]] == '<eol>':
break
sen.append(lm_model.word_indxs[values[k, sidx]])
sentences.append(" ".join(sen))
probs = cond_probs[:, sidx]
probs = numpy.array(cond_probs[:len(sen) + 1, sidx])
all_probs.append(numpy.exp(-probs))
costs.append(-numpy.sum(probs))
if normalize:
counts = [len(s.strip().split(" ")) for s in sentences]
costs = [co / cn for co, cn in zip(costs, counts)]
sprobs = numpy.argsort(costs)
if verbose:
for pidx in sprobs:
print "{}: {} {} {}".format(pidx, -costs[pidx], all_probs[pidx], sentences[pidx])
print
return sentences, costs, None
else:
raise Exception("I don't know what to do")
def parse_args():
parser = argparse.ArgumentParser(
"Sample (of find with beam-serch) translations from a translation model")
parser.add_argument("--state",
required=True, help="State to use")
parser.add_argument("--beam-search",
action="store_true", help="Beam size, turns on beam-search")
parser.add_argument("--beam-size",
type=int, help="Beam size")
parser.add_argument("--ignore-unk",
default=False, action="store_true",
help="Ignore unknown words")
parser.add_argument("--source",
help="File of source sentences")
parser.add_argument("--trans",
help="File to save translations in")
parser.add_argument("--normalize",
action="store_true", default=False,
help="Normalize log-prob with the word count")
parser.add_argument("--verbose",
action="store_true", default=False,
help="Be verbose")
parser.add_argument("model_path",
help="Path to the model")
parser.add_argument("changes",
nargs="?", default="",
help="Changes to state")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
rng = numpy.random.RandomState(state['seed'])
enc_dec = RNNEncoderDecoder(state, rng, skip_init=True)
enc_dec.build()
lm_model = enc_dec.create_lm_model()
lm_model.load(args.model_path)
indx_word = cPickle.load(open(state['word_indx'],'rb'))
sampler = None
beam_search = None
if args.beam_search:
beam_search = BeamSearch(enc_dec)
beam_search.compile()
else:
sampler = enc_dec.create_sampler(many_samples=True)
idict_src = cPickle.load(open(state['indx_word'],'r'))
if args.source and args.trans:
# Actually only beam search is currently supported here
assert beam_search
assert args.beam_size
fsrc = open(args.source, 'r')
ftrans = open(args.trans, 'w')
start_time = time.time()
n_samples = args.beam_size
total_cost = 0.0
logging.debug("Beam size: {}".format(n_samples))
for i, line in enumerate(fsrc):
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
if args.verbose:
print "Parsed Input:", parsed_in
trans, costs, _ = sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize)
best = numpy.argmin(costs)
print >>ftrans, trans[best]
if args.verbose:
print "Translation:", trans[best]
total_cost += costs[best]
if (i + 1) % 100 == 0:
ftrans.flush()
logger.debug("Current speed is {} per sentence".
format((time.time() - start_time) / (i + 1)))
print "Total cost of the translations: {}".format(total_cost)
fsrc.close()
ftrans.close()
else:
while True:
try:
seqin = raw_input('Input Sequence: ')
n_samples = int(raw_input('How many samples? '))
alpha = None
if not args.beam_search:
alpha = float(raw_input('Inverse Temperature? '))
seq,parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
print "Parsed Input:", parsed_in
except Exception:
print "Exception while parsing your input:"
traceback.print_exc()
continue
sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search,
ignore_unk=args.ignore_unk, normalize=args.normalize,
alpha=alpha, verbose=True)
if __name__ == "__main__":
main()
| bsd-3-clause |
vmahuli/tempest | tempest/api/compute/v3/servers/test_server_metadata_negative.py | 4 | 6645 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class ServerMetadataV3NegativeTest(base.BaseV3ComputeTest):
@classmethod
def setUpClass(cls):
super(ServerMetadataV3NegativeTest, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
cls.tenant_id = cls.client.tenant_id
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
cls.server_id = server['id']
@test.skip_because(bug="1273948")
@test.attr(type=['gate', 'negative'])
def test_server_create_metadata_key_too_long(self):
# Attempt to start a server with a meta-data key that is > 255
# characters
# Tryset_server_metadata_item a few values
for sz in [256, 257, 511, 1023]:
key = "k" * sz
meta = {key: 'data1'}
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
meta=meta)
# no teardown - all creates should fail
@test.attr(type=['negative', 'gate'])
def test_create_server_metadata_blank_key(self):
# Blank key should trigger an error.
meta = {'': 'data1'}
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
meta=meta)
@test.attr(type=['negative', 'gate'])
def test_server_metadata_non_existent_server(self):
# GET on a non-existent server should not succeed
non_existent_server_id = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.get_server_metadata_item,
non_existent_server_id,
'test2')
@test.attr(type=['negative', 'gate'])
def test_list_server_metadata_non_existent_server(self):
# List metadata on a non-existent server should not succeed
non_existent_server_id = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.list_server_metadata,
non_existent_server_id)
@test.attr(type=['negative', 'gate'])
def test_wrong_key_passed_in_body(self):
# Raise BadRequest if key in uri does not match
# the key passed in body.
meta = {'testkey': 'testvalue'}
self.assertRaises(exceptions.BadRequest,
self.client.set_server_metadata_item,
self.server_id, 'key', meta)
@test.attr(type=['negative', 'gate'])
def test_set_metadata_non_existent_server(self):
# Set metadata on a non-existent server should not succeed
non_existent_server_id = data_utils.rand_uuid()
meta = {'meta1': 'data1'}
self.assertRaises(exceptions.NotFound,
self.client.set_server_metadata,
non_existent_server_id,
meta)
@test.attr(type=['negative', 'gate'])
def test_update_metadata_non_existent_server(self):
# An update should not happen for a non-existent server
non_existent_server_id = data_utils.rand_uuid()
meta = {'key1': 'value1', 'key2': 'value2'}
self.assertRaises(exceptions.NotFound,
self.client.update_server_metadata,
non_existent_server_id,
meta)
@test.attr(type=['negative', 'gate'])
def test_update_metadata_with_blank_key(self):
# Blank key should trigger an error
meta = {'': 'data1'}
self.assertRaises(exceptions.BadRequest,
self.client.update_server_metadata,
self.server_id, meta=meta)
@test.attr(type=['negative', 'gate'])
def test_delete_metadata_non_existent_server(self):
# Should not be able to delete metadata item from a non-existent server
non_existent_server_id = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.delete_server_metadata_item,
non_existent_server_id,
'd')
@test.attr(type=['negative', 'gate'])
def test_metadata_items_limit(self):
# Raise a 413 OverLimit exception while exceeding metadata items limit
# for tenant.
_, quota_set = self.quotas.get_quota_set(self.tenant_id)
quota_metadata = quota_set['metadata_items']
req_metadata = {}
for num in range(1, quota_metadata + 2):
req_metadata['key' + str(num)] = 'val' + str(num)
self.assertRaises(exceptions.OverLimit,
self.client.set_server_metadata,
self.server_id, req_metadata)
# Raise a 413 OverLimit exception while exceeding metadata items limit
# for tenant (update).
self.assertRaises(exceptions.OverLimit,
self.client.update_server_metadata,
self.server_id, req_metadata)
@test.attr(type=['negative', 'gate'])
def test_set_server_metadata_blank_key(self):
# Raise a bad request error for blank key.
# set_server_metadata will replace all metadata with new value
meta = {'': 'data1'}
self.assertRaises(exceptions.BadRequest,
self.client.set_server_metadata,
self.server_id, meta=meta)
@test.attr(type=['negative', 'gate'])
def test_set_server_metadata_missing_metadata(self):
# Raise a bad request error for a missing metadata field
# set_server_metadata will replace all metadata with new value
meta = {'meta1': 'data1'}
self.assertRaises(exceptions.BadRequest,
self.client.set_server_metadata,
self.server_id, meta=meta, no_metadata_field=True)
| apache-2.0 |
sparkslabs/kamaelia_ | Sketches/MPS/Experiments/360/Compose/GUI/ArgumentsPanel.py | 6 | 5781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from Kamaelia.UI.Tk.TkWindow import TkWindow
from Kamaelia.Support.Tk.Scrolling import ScrollingMenu
from Axon.Ipc import producerFinished, shutdownMicroprocess
import Tkinter
import pprint
class ArgumentsPanel(Tkinter.Frame):
def __init__(self, parent, theclass):
Tkinter.Frame.__init__(self, parent)
self.theclass = theclass
# pprint.pprint(theclass)
# build widgets
row=0
if self.theclass['classdoc']:
self.classdoclabel = Tkinter.Label(self, text = self.theclass['classdoc'], justify="left")
self.classdoclabel['font'] = " ".join(self.classdoclabel['font'].split(" ")[0:2])
self.classdoclabel.grid(row=row, column=0,columnspan=2,
sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
if self.theclass['initdoc']:
self.initdoclabel = Tkinter.Label(self, text = self.theclass['initdoc'], justify="left")
self.initdoclabel['font'] = " ".join(self.initdoclabel['font'].split(" ")[0:2])
self.initdoclabel.grid(row=row, column=0, columnspan=2,
sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
self.label = Tkinter.Label(self, text="ARGUMENTS:")
self.label.grid(row=row, column=0, columnspan=2,sticky=Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
# enumerate std args
self.args = []
for arg in self.theclass['args']['std']:
arglabel = Tkinter.Label(self, text=arg[0])
arglabel.grid(row=row,column=0, sticky=Tkinter.E)
svar = Tkinter.StringVar()
argfield = Tkinter.Entry(self, bg="white", textvariable=svar, takefocus=1)
default=""
if len(arg)>=2:
default = arg[1]
svar.set(default)
argfield.grid(row=row,column=1, sticky=Tkinter.W)
self.args.append( (arg[0], svar, default) )
row+=1
# now do * and ** args
for argname in ["*","**"]:
if self.theclass['args'][argname]:
arglabel = Tkinter.Label(self, text=argname)
arglabel.grid(row=row,column=0, sticky=Tkinter.E)
arglabel = None
svar = Tkinter.StringVar()
argfield = Tkinter.Entry(self, bg="white", textvariable=svar, takefocus=1)
argfield.grid(row=row,column=1, sticky=Tkinter.W)
self.args.append( (argname, svar, "") )
row+=1
# self.rowconfigure(row, weight=1)
# self.grid()
def getDef(self):
return { "name" : self.theclass['class'],
"module" : self.theclass['module'],
"instantiation" : self.getInstantiation(),
"configuration" : self.getConfiguration()
}
def getConfiguration(self):
"""Return the instantiation string"""
argstr = ""
prefix = ""
SEQUENTIALARGS = []
TUPLEARGS = None
DICTARGS = None
for (argname, svar, default) in self.args:
unspecified = False
value = None
text = svar.get().strip()
default = default.strip()
if argname != "*" and argname != "**":
if default=="" or text != default:
if not text:
unspecified = True
value = text
SEQUENTIALARGS.append( [argname, unspecified,value, default ] )
else:
if text:
if argname == "*":
TUPLEARGS = text
if argname == "**":
DICTARGS = text
return { "args" : SEQUENTIALARGS,
"tupleargs" : TUPLEARGS ,
"dictargs" : DICTARGS,
"theclass" : self.theclass["theclass"], # FIXME: Is this a mistake, should we pass everything out?
}
def getInstantiation(self):
"""Return the instantiation string"""
argstr = ""
prefix = ""
for (argname, svar, default) in self.args:
text = svar.get().strip()
default = default.strip()
if argname != "*" and argname != "**":
if default=="" or text != default:
if not text:
text = "<<unspecified>>"
argstr = argstr + prefix + argname + " = " + text
prefix=", "
else:
if text:
argstr = argstr + prefix + text
prefix=", "
return argstr
| apache-2.0 |
superDross/pdVCF | pdVCF/compare.py | 1 | 1289 | ''' A collection of functions used to compare Vcf objects.'''
import re
def common_variants(vcf1, vcf2):
''' Find common variants between two VCF objects
and return common variants in a list.
Args:
vcf1: first VCF object
vcf2: second VCF object
Notes:
this could be replaced with a magic method
in the Vcf Class
'''
return list(set.intersection(set(vcf1.vcf.index.values),
set(vcf2.vcf.index.values)))
# relic function that could be potentially useful in the near future
def multi2bi(df):
''' Convert multi-allelic UIDs, deriving from a
pdVCF, in a list to bi-allelic UIDs.
Args:
variants: a pdVCF dataframe
Returns:
list of UIDs from pdVCF with multi-allelic
ones converted to bi-allelic e.g.
['2:1234-G/C,T'] -> ['2:1234-G/C', '2:1234-G/T']
'''
variants = df.index.tolist()
result = variants[:]
for variant in variants:
if ',' in variant:
multi = re.split('[,/]', variant)
bi = ["/".join((multi[0], x)) for x in multi[1:]]
result.pop(result.index(variant)) # Removes multi-allelic variant from list
result = result + bi
return result
| gpl-3.0 |
davehouse/image_diff | ssim_diff.py | 1 | 2303 | '''
Compare images for similarity using the scikit-image structural similarity test
'''
import os
import sys
from skssim.ssim import structural_similarity as ssim
from skssim.dtype import img_as_float
import numpy as np
import Image, ImageMath
import ImageChops
import math
fname1 = "imageA.jpeg"
fname2 = "imageB.jpeg"
if len(sys.argv) > 2:
fname1 = sys.argv[1]
fname2 = sys.argv[2]
# PIL image difference
# abs( image2 - image1 )
# does not account for perceptual similarity
img = Image.open(fname1)
img2 = Image.open(fname2)
diff = ImageChops.difference(img2, img)
box = diff.getbbox()
if box:
box_size = (box[2] - box[0]) * (box[3] - box[1])
else:
box_size = 0
print "PIL difference bounding box is %d pixels." % box_size
def image_entropy(img):
"""calculate the entropy of an image
http://brainacle.com/calculating-image-entropy-with-python-how-and-why.html
same thing using numpy was slower running locally:
http://stackoverflow.com/questions/5524179/how-to-detect-motion-between-two-pil-images-wxpython-webcam-integration-exampl
"""
w, h = img.size
histogram = img.histogram()
histogram_length = sum(histogram)
samples_probability = [float(h) / histogram_length for h in histogram]
return -sum([p * math.log(p, 2) for p in samples_probability if p != 0])
i_pdiff = image_entropy(diff)
print "PIL image difference %f" % i_pdiff
# SSIM comparison
# Structural Similarity Index
# http://scikit-image.org/docs/dev/api/skimage.measure.html#structural-similarity
# http://scikit-image.org/docs/dev/auto_examples/plot_ssim.html#example-plot-ssim-py
img = img_as_float(img)
img2 = img_as_float(img2)
if img.size > img2.size:
img2 = np.resize(img2, (img.shape[0], img.shape[1]))
img = np.resize(img, (img.shape[0], img.shape[1]))
elif img.size < img2.size:
img = np.resize(img, (img2.shape[0], img2.shape[1]))
img2 = np.resize(img2, (img2.shape[0], img2.shape[1]))
else:
img2 = np.resize(img2, (img.shape[0], img.shape[1]))
img = np.resize(img, (img2.shape[0], img2.shape[1]))
def mse(x, y):
return np.mean((x.astype(float) - y) ** 2)
i_mse = mse(img, img2)
print "MSE %f" % i_mse
i_ssim = ssim(img, img2) # , dynamic_range=img2.max() - img2.min())
print "SSIM Structural Similarity %f" % i_ssim
| mit |
Yenthe666/Odoo_Samples | web_widget_color/__openerp__.py | 4 | 1595 | # -*- encoding: utf-8 -*-
############################################################################
#
# Odoo, Open Source Web Widget Color
# Copyright (C) 2012 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
# Copyright (C) 2014 Anybox <http://anybox.fr>
# Copyright (C) 2015 Taktik SA <http://taktik.be>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# @author Étienne Beaudry Auger <etienne.b.auger@savoirfairelinux.com>
# @author Adil Houmadi <ah@taktik.be>
#
##############################################################################
{
'name': "Web Widget Color",
'category': "web",
'version': "1.0",
"author": "Savoir-faire Linux, "
"Anybox, "
"Taktik SA, "
"Odoo Community Association (OCA)",
'depends': ['base', 'web'],
'data': [
'view/web_widget_color_view.xml'
],
'qweb': [
'static/src/xml/widget.xml',
],
'auto_install': False,
'installable': True,
'web_preload': True,
}
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.