repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
ypwalter/fxos-certsuite
|
mcts/webapi_tests/runner.py
|
6
|
5731
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import importlib
import inspect
import os
import sys
import json
from fnmatch import fnmatch
from mozdevice import DeviceManagerADB
from mozlog.structured import commandline
from mcts.webapi_tests import semiauto
from mcts.webapi_tests.semiauto import environment
stingray_test = ['apps', 'device_storage', 'geolocation',
'moztime', 'notification', 'tcp_socket']
def iter_tests(start_dir, pattern="test_*.py", mode='phone'):
"""List available Web API tests and yield a tuple of (group, tests),
where tests is a list of test names."""
start_dir = os.path.abspath(start_dir)
visited = set()
for root, dirs, files in os.walk(start_dir, followlinks=True):
if root in visited:
raise ImportError("Recursive symlink: %r" % root)
visited.add(root)
group = os.path.relpath(root, start_dir)
if mode == 'stingray' and group not in stingray_test:
continue
tests = []
for file in files:
path = os.path.abspath(os.path.join(root, file))
if not fnmatch(file, pattern) or not os.path.exists(path):
continue
relpath = os.path.relpath(path, start_dir)
if relpath.endswith(".py"):
relpath = relpath[:-3]
name = "mcts.webapi_tests.%s" % relpath.replace(os.path.sep, ".")
module = None
try:
module = importlib.import_module(name)
except ImportError:
# Module has import problems which shouldn't affect listing
# tests
# print "WebAPI module ImportError: %s" % name
continue
members = inspect.getmembers(module)
ts = [t for t in zip(*members)[1] if isinstance(t, type)]
for cls in ts:
if not issubclass(cls, semiauto.testcase.TestCase):
continue
if getattr(cls, "__module__", None) != name:
continue
tests.extend(
[member[0] for member in inspect.getmembers(cls) if member[0].startswith("test_")])
if len(tests) > 0:
yield group, tests
def main():
parser = argparse.ArgumentParser(
description="Runner for guided Web API tests.")
parser.add_argument("-l", "--list-test-groups", action="store_true",
help="List all logical test groups")
parser.add_argument("-a", "--list-all-tests", action="store_true",
help="List all tests")
parser.add_argument("-i", "--include", metavar="GROUP", action="append", default=[],
help="Only include specified group(s) in run, include several "
"groups by repeating flag")
parser.add_argument("-n", "--no-browser", action="store_true",
help="Don't start a browser but wait for manual connection")
parser.add_argument("--version", action="store", dest="version",
help="B2G version")
parser.add_argument('-H', '--host',
help='Hostname or ip for target device',
action='store', default='localhost')
parser.add_argument('-P', '--port',
help='Port for target device',
action='store', default=2828)
parser.add_argument('-m', '--mode',
help='Test mode (stingray, phone) default (phone)',
action='store', default='phone')
parser.add_argument('-p', "--device-profile", action="store", type=os.path.abspath,
help="specify the device profile file path which could include skipped test case information")
parser.add_argument(
"-v", dest="verbose", action="store_true", help="Verbose output")
commandline.add_logging_group(parser)
args = parser.parse_args(sys.argv[1:])
logger = commandline.setup_logging(
"webapi", vars(args), {"raw": sys.stdout})
if args.list_test_groups and len(args.include) > 0:
print >> sys.stderr("%s: error: cannot list and include test "
"groups at the same time" % sys.argv[0])
parser.print_usage()
sys.exit(1)
testgen = iter_tests(os.path.dirname(__file__), mode=args.mode)
if args.list_test_groups:
for group, _ in testgen:
print(group)
return 0
elif args.list_all_tests:
for group, tests in testgen:
for test in tests:
print("%s.%s" % (group, test))
return 0
semiauto.testcase._host = args.host
semiauto.testcase._port = int(args.port)
env = environment.get(environment.InProcessTestEnvironment)
environment.env.device_profile = None
if args.device_profile:
with open(args.device_profile, 'r') as device_profile_file:
environment.env.device_profile = json.load(device_profile_file)['result']
test_loader = semiauto.TestLoader(version=args.version)
tests = test_loader.loadTestsFromNames(
map(lambda t: "mcts.webapi_tests.%s" % t, args.include or [g for g, _ in testgen]), None)
results = semiauto.run(tests,
logger=logger,
spawn_browser=not args.no_browser,
verbosity=2 if args.verbose else 1)
return 0 if results.wasSuccessful() else 1
if __name__ == "__main__":
sys.exit(main())
|
mpl-2.0
|
papouso/odoo
|
addons/account_anglo_saxon/purchase.py
|
427
|
2043
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class purchase_order(osv.osv):
_name = "purchase.order"
_inherit = "purchase.order"
_description = "Purchase Order"
def _choose_account_from_po_line(self, cr, uid, order_line, context=None):
account_id = super(purchase_order, self)._choose_account_from_po_line(cr, uid, order_line, context=context)
if order_line.product_id and not order_line.product_id.type == 'service':
acc_id = order_line.product_id.property_stock_account_input and order_line.product_id.property_stock_account_input.id
if not acc_id:
acc_id = order_line.product_id.categ_id.property_stock_account_input_categ and order_line.product_id.categ_id.property_stock_account_input_categ.id
if acc_id:
fpos = order_line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, acc_id)
return account_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
vFense/vFenseAgent-nix
|
agent/deps/mac/Python-2.7.5/lib/python2.7/encodings/latin_1.py
|
853
|
1264
|
""" Python 'latin-1' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.latin_1_encode
decode = codecs.latin_1_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.latin_1_encode(input,self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.latin_1_decode(input,self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.latin_1_decode
decode = codecs.latin_1_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
lgpl-3.0
|
krasin/distcc
|
include_server/statistics.py
|
26
|
5225
|
#! /usr/bin/python2.4
#
# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
"""Statistics gathering for the distcc-pump include server."""
__author__ = "Nils Klarlund"
import time
resolve_expr_counter = 0 # number of computed includes
master_hit_counter = 0 # summary node hits
master_miss_counter = 0 # summary node misses
resolve_counter = 0 # calls of Resolve method
search_counter = 0 # number of probes in directory lists
build_stat_counter = 0 # number of stats in build_stat_cache
sys_stat_counter = 0 # number of calls to OS stat
translation_unit_counter = 0 # number of translation units
start_time = None
translation_unit_time = None
min_time = float('Inf')
max_time = 0.0
total_time = 0.0
parse_file_total_time = 0.0
parse_file_counter = 0 # number of files parsed
parse_file_counter_last = 0 # the number of files parsed after previous
# translation unit
quote_path_total = 0 # total length of quote directory lists
angle_path_total = 0 # total length of angle directory lists
len_calculated_closure = 0 # number of all included files
len_calculated_closure_nonsys = 0 # same, but excluding system files
# known to compiler
len_exact_closure = 0 # number of all files in CPP-calculated closure
len_surplus_nonsys = 0 # the difference between
# len_calculated_closure and number of files
# in exact closure that are not known to compiler
find_node_counter = 0 # number of times FindNode is called
def StartTiming():
global start_time, translation_unit_counter
"""Mark the start of a request to find an include closure."""
translation_unit_counter += 1
start_time = time.clock()
def EndTiming():
"""Mark the end of an include closure calculation."""
global translation_unit_time, min_time, max_time, total_time
translation_unit_time = time.clock() - start_time
min_time = min(translation_unit_time, min_time)
max_time = max(translation_unit_time, max_time)
total_time += translation_unit_time
def PrintStatistics(include_analyzer):
# Avoid division by zero in non-interesting case.
if translation_unit_counter == 0: return
print "TRANSLATION_UNIT: %s" % include_analyzer.translation_unit
print (("TIME: last %-2.3fs, min %-2.3fs, "
"max %-2.3fs, average %-2.3fs, #: %5d, total: %5.1fs") %
(translation_unit_time, min_time, max_time,
total_time/translation_unit_counter,
translation_unit_counter, total_time))
print ("PARSING: total %-5.3fs, total count: %4d, new files: %-5d" %
(parse_file_total_time, parse_file_counter,
parse_file_counter - parse_file_counter_last))
print "COUNTER: resolve_expr_counter: %8d" % resolve_expr_counter
print "COUNTER: master_hit_counter: %8d" % master_hit_counter
print "COUNTER: master_miss_counter: %8d" % master_miss_counter
print "SIZE: master_cache %8d" % (
len(include_analyzer.master_cache))
print "COUNTER: sys_stat_counter: %10d" % sys_stat_counter
print "COUNTER: build_stat_counter: %10d" % build_stat_counter
if resolve_counter != 0:
print "COUNTER: search_counter (average): %4.1f" % (
float(search_counter)/resolve_counter)
print "SIZE: include_dir_pairs: %8d" % (
len(include_analyzer.include_dir_pairs))
if 'quote_dirs' in include_analyzer.__dict__:
print "SIZE: quote_path %8d" % (
len(include_analyzer.quote_dirs))
if 'angle_dirs' in include_analyzer.__dict__:
print "SIZE: angle_path %8d" % (
len(include_analyzer.angle_dirs))
print "SIZE: quote_path (average) %4.1f" % (
float(quote_path_total)/translation_unit_counter)
print "SIZE: angle_path (average) %4.1f" % (
float(angle_path_total)/translation_unit_counter)
print "SIZE: quote_dirs_set %8d" % (
len(include_analyzer.quote_dirs_set))
print "SIZE: angle_dirs_set: %8d" % (
len(include_analyzer.angle_dirs_set))
print
print "SIZE: calculated_closure: %8d" % len_calculated_closure
print "SIZE: calculated_closure_nonsys: %8d" % (
len_calculated_closure_nonsys)
print "SIZE: exact_closure %8d" % len_exact_closure
print "SIZE: surplus_nonsys %8d" % len_surplus_nonsys
print
|
gpl-2.0
|
EDUlib/edx-platform
|
lms/djangoapps/course_api/api.py
|
1
|
9596
|
"""
Course API
"""
import logging
import search
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User # lint-amnesty, pylint: disable=imported-auth-user
from django.urls import reverse
from edx_django_utils.monitoring import function_trace
from edx_when.api import get_dates_for_course
from opaque_keys.edx.django.models import CourseKeyField
from rest_framework.exceptions import PermissionDenied
from common.djangoapps.student.models import CourseAccessRole
from common.djangoapps.student.roles import GlobalStaff
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.courses import (
get_course_overview_with_access,
get_courses,
get_permission_for_course_about
)
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.lib.api.view_utils import LazySequence
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .permissions import can_view_courses_for_username
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
UNKNOWN_BLOCK_DISPLAY_NAME = 'UNKNOWN'
def get_effective_user(requesting_user, target_username):
"""
Get the user we want to view information on behalf of.
"""
if target_username == requesting_user.username:
return requesting_user
elif target_username == '':
return AnonymousUser()
elif can_view_courses_for_username(requesting_user, target_username):
return User.objects.get(username=target_username)
else:
raise PermissionDenied()
def course_detail(request, username, course_key):
"""
Return a single course identified by `course_key`.
The course must be visible to the user identified by `username` and the
logged-in user should have permission to view courses available to that
user.
Arguments:
request (HTTPRequest):
Used to identify the logged-in user and to instantiate the course
module to retrieve the course about description
username (string):
The name of the user `requesting_user would like to be identified as.
course_key (CourseKey): Identifies the course of interest
Return value:
`CourseOverview` object representing the requested course
"""
user = get_effective_user(request.user, username)
overview = get_course_overview_with_access(
user,
get_permission_for_course_about(),
course_key,
)
overview.effective_user = user
return overview
def _filter_by_search(course_queryset, search_term):
"""
Filters a course queryset by the specified search term.
"""
if not settings.FEATURES['ENABLE_COURSEWARE_SEARCH'] or not search_term:
return course_queryset
# Return all the results, 10K is the maximum allowed value for ElasticSearch.
# We should use 0 after upgrading to 1.1+:
# - https://github.com/elastic/elasticsearch/commit/8b0a863d427b4ebcbcfb1dcd69c996c52e7ae05e
results_size_infinity = 10000
search_courses = search.api.course_discovery_search(
search_term,
size=results_size_infinity,
)
search_courses_ids = {course['data']['id'] for course in search_courses['results']}
return LazySequence(
(
course for course in course_queryset
if str(course.id) in search_courses_ids
),
est_len=len(course_queryset)
)
def list_courses(request, username, org=None, filter_=None, search_term=None):
"""
Yield all available courses.
The courses returned are all be visible to the user identified by
`username` and the logged in user should have permission to view courses
available to that user.
Arguments:
request (HTTPRequest):
Used to identify the logged-in user and to instantiate the course
module to retrieve the course about description
username (string):
The name of the user the logged-in user would like to be
identified as
Keyword Arguments:
org (string):
If specified, visible `CourseOverview` objects are filtered
such that only those belonging to the organization with the provided
org code (e.g., "HarvardX") are returned. Case-insensitive.
filter_ (dict):
If specified, visible `CourseOverview` objects are filtered
by the given key-value pairs.
search_term (string):
Search term to filter courses (used by ElasticSearch).
Return value:
Yield `CourseOverview` objects representing the collection of courses.
"""
user = get_effective_user(request.user, username)
course_qs = get_courses(user, org=org, filter_=filter_)
course_qs = _filter_by_search(course_qs, search_term)
return course_qs
@function_trace('list_course_keys')
def list_course_keys(request, username, role):
"""
Yield all available CourseKeys for the user having the given role.
The courses returned include those for which the user identified by
`username` has the given role. Additionally, the logged in user
should have permission to view courses available to that user.
Note: This function does not use branding to determine courses.
Arguments:
request (HTTPRequest):
Used to identify the logged-in user and to instantiate the course
module to retrieve the course about description
username (string):
The name of the user the logged-in user would like to be
identified as
Keyword Arguments:
role (string):
Course keys are filtered such that only those for which the
user has the specified role are returned.
Return value:
Yield `CourseKey` objects representing the collection of courses.
"""
user = get_effective_user(request.user, username)
all_course_keys = CourseOverview.get_all_course_keys()
# Global staff have access to all courses. Filter courses for non-global staff.
if GlobalStaff().has_user(user):
return all_course_keys
if role == 'staff':
# This short-circuit implementation bypasses has_access() which we think is too slow for some users when
# evaluating staff-level course access for Insights. Various tickets have context on this issue: CR-2487,
# TNL-7448, DESUPPORT-416, and probably more.
#
# This is a simplified implementation that does not consider org-level access grants (e.g. when course_id is
# empty).
filtered_course_keys = (
CourseAccessRole.objects.filter(
user=user,
# Having the instructor role implies staff access.
role__in=['staff', 'instructor'],
)
# We need to check against CourseOverview so that we don't return any Libraries.
.extra(tables=['course_overviews_courseoverview'], where=['course_id = course_overviews_courseoverview.id'])
# For good measure, make sure we don't return empty course IDs.
.exclude(course_id=CourseKeyField.Empty)
.order_by('course_id')
.values_list('course_id', flat=True)
.distinct()
)
else:
# This is the original implementation which still covers the case where role = "instructor":
filtered_course_keys = LazySequence(
(
course_key for course_key in all_course_keys
if has_access(user, role, course_key)
),
est_len=len(all_course_keys)
)
return filtered_course_keys
def get_due_dates(request, course_key, user):
"""
Get due date information for a user for blocks in a course.
Arguments:
request: the request object
course_key (CourseKey): the CourseKey for the course
user: the user object for which we want due date information
Returns:
due_dates (list): a list of dictionaries containing due date information
keys:
name: the display name of the block
url: the deep link to the block
date: the due date for the block
"""
dates = get_dates_for_course(
course_key,
user,
)
store = modulestore()
due_dates = []
for (block_key, date_type), date in dates.items():
if date_type == 'due':
try:
block_display_name = store.get_item(block_key).display_name
except ItemNotFoundError:
logger.exception(f'Failed to get block for due date item with key: {block_key}')
block_display_name = UNKNOWN_BLOCK_DISPLAY_NAME
# get url to the block in the course
block_url = reverse('jump_to', args=[course_key, block_key])
block_url = request.build_absolute_uri(block_url)
due_dates.append({
'name': block_display_name,
'url': block_url,
'date': date,
})
return due_dates
def get_course_run_url(request, course_id):
"""
Get the URL to a course run.
Arguments:
request: the request object
course_id (string): the course id of the course
Returns:
(string): the URL to the course run associated with course_id
"""
course_run_url = reverse('openedx.course_experience.course_home', args=[course_id])
return request.build_absolute_uri(course_run_url)
|
agpl-3.0
|
kxliugang/edx-platform
|
lms/djangoapps/shoppingcart/admin.py
|
102
|
5401
|
"""Django admin interface for the shopping cart models. """
from ratelimitbackend import admin
from shoppingcart.models import (
PaidCourseRegistrationAnnotation,
Coupon,
DonationConfiguration,
Invoice,
CourseRegistrationCodeInvoiceItem,
InvoiceTransaction
)
class SoftDeleteCouponAdmin(admin.ModelAdmin):
"""
Admin for the Coupon table.
soft-delete on the coupons
"""
fields = ('code', 'description', 'course_id', 'percentage_discount', 'created_by', 'created_at', 'is_active')
raw_id_fields = ("created_by",)
readonly_fields = ('created_at',)
actions = ['really_delete_selected']
def queryset(self, request):
""" Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view. """
# Default: qs = self.model._default_manager.get_active_coupons_query_set()
# Queryset with all the coupons including the soft-deletes: qs = self.model._default_manager.get_query_set()
query_string = self.model._default_manager.get_active_coupons_query_set() # pylint: disable=protected-access
return query_string
def get_actions(self, request):
actions = super(SoftDeleteCouponAdmin, self).get_actions(request)
del actions['delete_selected']
return actions
def really_delete_selected(self, request, queryset):
"""override the default behavior of selected delete method"""
for obj in queryset:
obj.is_active = False
obj.save()
if queryset.count() == 1:
message_bit = "1 coupon entry was"
else:
message_bit = "%s coupon entries were" % queryset.count()
self.message_user(request, "%s successfully deleted." % message_bit)
def delete_model(self, request, obj):
"""override the default behavior of single instance of model delete method"""
obj.is_active = False
obj.save()
really_delete_selected.short_description = "Delete s selected entries"
class CourseRegistrationCodeInvoiceItemInline(admin.StackedInline):
"""Admin for course registration code invoice items.
Displayed inline within the invoice admin UI.
"""
model = CourseRegistrationCodeInvoiceItem
extra = 0
can_delete = False
readonly_fields = (
'qty',
'unit_price',
'currency',
'course_id',
)
def has_add_permission(self, request):
return False
class InvoiceTransactionInline(admin.StackedInline):
"""Admin for invoice transactions.
Displayed inline within the invoice admin UI.
"""
model = InvoiceTransaction
extra = 0
readonly_fields = (
'created',
'modified',
'created_by',
'last_modified_by'
)
class InvoiceAdmin(admin.ModelAdmin):
"""Admin for invoices.
This is intended for the internal finance team
to be able to view and update invoice information,
including payments and refunds.
"""
date_hierarchy = 'created'
can_delete = False
readonly_fields = ('created', 'modified')
search_fields = (
'internal_reference',
'customer_reference_number',
'company_name',
)
fieldsets = (
(
None, {
'fields': (
'internal_reference',
'customer_reference_number',
'created',
'modified',
)
}
),
(
'Billing Information', {
'fields': (
'company_name',
'company_contact_name',
'company_contact_email',
'recipient_name',
'recipient_email',
'address_line_1',
'address_line_2',
'address_line_3',
'city',
'state',
'zip',
'country'
)
}
)
)
readonly_fields = (
'internal_reference',
'customer_reference_number',
'created',
'modified',
'company_name',
'company_contact_name',
'company_contact_email',
'recipient_name',
'recipient_email',
'address_line_1',
'address_line_2',
'address_line_3',
'city',
'state',
'zip',
'country'
)
inlines = [
CourseRegistrationCodeInvoiceItemInline,
InvoiceTransactionInline
]
def save_formset(self, request, form, formset, change):
"""Save the user who created and modified invoice transactions. """
instances = formset.save(commit=False)
for instance in instances:
if isinstance(instance, InvoiceTransaction):
if not hasattr(instance, 'created_by'):
instance.created_by = request.user
instance.last_modified_by = request.user
instance.save()
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(PaidCourseRegistrationAnnotation)
admin.site.register(Coupon, SoftDeleteCouponAdmin)
admin.site.register(DonationConfiguration)
admin.site.register(Invoice, InvoiceAdmin)
|
agpl-3.0
|
Gagaro/django
|
tests/raw_query/tests.py
|
119
|
12624
|
from __future__ import unicode_literals
from datetime import date
from django.db.models.query_utils import InvalidQuery
from django.test import TestCase, skipUnlessDBFeature
from .models import Author, Book, BookFkAsPk, Coffee, FriendlyAuthor, Reviewer
class RawQueryTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(first_name='Joe', last_name='Smith', dob=date(1950, 9, 20))
cls.a2 = Author.objects.create(first_name='Jill', last_name='Doe', dob=date(1920, 4, 2))
cls.a3 = Author.objects.create(first_name='Bob', last_name='Smith', dob=date(1986, 1, 25))
cls.a4 = Author.objects.create(first_name='Bill', last_name='Jones', dob=date(1932, 5, 10))
cls.b1 = Book.objects.create(
title='The awesome book', author=cls.a1, paperback=False,
opening_line='It was a bright cold day in April and the clocks were striking thirteen.',
)
cls.b2 = Book.objects.create(
title='The horrible book', author=cls.a1, paperback=True,
opening_line=(
'On an evening in the latter part of May a middle-aged man '
'was walking homeward from Shaston to the village of Marlott, '
'in the adjoining Vale of Blakemore, or Blackmoor.'
),
)
cls.b3 = Book.objects.create(
title='Another awesome book', author=cls.a1, paperback=False,
opening_line='A squat grey building of only thirty-four stories.',
)
cls.b4 = Book.objects.create(
title='Some other book', author=cls.a3, paperback=True,
opening_line='It was the day my grandmother exploded.',
)
cls.c1 = Coffee.objects.create(brand='dunkin doughnuts')
cls.c2 = Coffee.objects.create(brand='starbucks')
cls.r1 = Reviewer.objects.create()
cls.r2 = Reviewer.objects.create()
cls.r1.reviewed.add(cls.b2, cls.b3, cls.b4)
def assertSuccessfulRawQuery(self, model, query, expected_results,
expected_annotations=(), params=[], translations=None):
"""
Execute the passed query against the passed model and check the output
"""
results = list(model.objects.raw(query, params=params, translations=translations))
self.assertProcessed(model, results, expected_results, expected_annotations)
self.assertAnnotations(results, expected_annotations)
def assertProcessed(self, model, results, orig, expected_annotations=()):
"""
Compare the results of a raw query against expected results
"""
self.assertEqual(len(results), len(orig))
for index, item in enumerate(results):
orig_item = orig[index]
for annotation in expected_annotations:
setattr(orig_item, *annotation)
for field in model._meta.fields:
# Check that all values on the model are equal
self.assertEqual(
getattr(item, field.attname),
getattr(orig_item, field.attname)
)
# This includes checking that they are the same type
self.assertEqual(
type(getattr(item, field.attname)),
type(getattr(orig_item, field.attname))
)
def assertNoAnnotations(self, results):
"""
Check that the results of a raw query contain no annotations
"""
self.assertAnnotations(results, ())
def assertAnnotations(self, results, expected_annotations):
"""
Check that the passed raw query results contain the expected
annotations
"""
if expected_annotations:
for index, result in enumerate(results):
annotation, value = expected_annotations[index]
self.assertTrue(hasattr(result, annotation))
self.assertEqual(getattr(result, annotation), value)
def test_simple_raw_query(self):
"""
Basic test of raw query with a simple database query
"""
query = "SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_raw_query_lazy(self):
"""
Raw queries are lazy: they aren't actually executed until they're
iterated over.
"""
q = Author.objects.raw('SELECT * FROM raw_query_author')
self.assertIsNone(q.query.cursor)
list(q)
self.assertIsNotNone(q.query.cursor)
def test_FK_raw_query(self):
"""
Test of a simple raw query against a model containing a foreign key
"""
query = "SELECT * FROM raw_query_book"
books = Book.objects.all()
self.assertSuccessfulRawQuery(Book, query, books)
def test_db_column_handler(self):
"""
Test of a simple raw query against a model containing a field with
db_column defined.
"""
query = "SELECT * FROM raw_query_coffee"
coffees = Coffee.objects.all()
self.assertSuccessfulRawQuery(Coffee, query, coffees)
def test_order_handler(self):
"""
Test of raw raw query's tolerance for columns being returned in any
order
"""
selects = (
('dob, last_name, first_name, id'),
('last_name, dob, first_name, id'),
('first_name, last_name, dob, id'),
)
for select in selects:
query = "SELECT %s FROM raw_query_author" % select
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_translations(self):
"""
Test of raw query's optional ability to translate unexpected result
column names to specific model fields
"""
query = "SELECT first_name AS first, last_name AS last, dob, id FROM raw_query_author"
translations = {'first': 'first_name', 'last': 'last_name'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def test_params(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %s"
author = Author.objects.all()[2]
params = [author.first_name]
qset = Author.objects.raw(query, params=params)
results = list(qset)
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
self.assertIsInstance(repr(qset), str)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_pyformat_params(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %(first)s"
author = Author.objects.all()[2]
params = {'first': author.first_name}
qset = Author.objects.raw(query, params=params)
results = list(qset)
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
self.assertIsInstance(repr(qset), str)
def test_query_representation(self):
"""
Test representation of raw query with parameters
"""
query = "SELECT * FROM raw_query_author WHERE last_name = %(last)s"
qset = Author.objects.raw(query, {'last': 'foo'})
self.assertEqual(repr(qset), "<RawQuerySet: SELECT * FROM raw_query_author WHERE last_name = foo>")
self.assertEqual(repr(qset.query), "<RawQuery: SELECT * FROM raw_query_author WHERE last_name = foo>")
query = "SELECT * FROM raw_query_author WHERE last_name = %s"
qset = Author.objects.raw(query, {'foo'})
self.assertEqual(repr(qset), "<RawQuerySet: SELECT * FROM raw_query_author WHERE last_name = foo>")
self.assertEqual(repr(qset.query), "<RawQuery: SELECT * FROM raw_query_author WHERE last_name = foo>")
def test_many_to_many(self):
"""
Test of a simple raw query against a model containing a m2m field
"""
query = "SELECT * FROM raw_query_reviewer"
reviewers = Reviewer.objects.all()
self.assertSuccessfulRawQuery(Reviewer, query, reviewers)
def test_extra_conversions(self):
"""
Test to insure that extra translations are ignored.
"""
query = "SELECT * FROM raw_query_author"
translations = {'something': 'else'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def test_missing_fields(self):
query = "SELECT id, first_name, dob FROM raw_query_author"
for author in Author.objects.raw(query):
self.assertNotEqual(author.first_name, None)
# last_name isn't given, but it will be retrieved on demand
self.assertNotEqual(author.last_name, None)
def test_missing_fields_without_PK(self):
query = "SELECT first_name, dob FROM raw_query_author"
try:
list(Author.objects.raw(query))
self.fail('Query without primary key should fail')
except InvalidQuery:
pass
def test_annotations(self):
query = (
"SELECT a.*, count(b.id) as book_count "
"FROM raw_query_author a "
"LEFT JOIN raw_query_book b ON a.id = b.author_id "
"GROUP BY a.id, a.first_name, a.last_name, a.dob ORDER BY a.id"
)
expected_annotations = (
('book_count', 3),
('book_count', 0),
('book_count', 1),
('book_count', 0),
)
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, expected_annotations)
def test_white_space_query(self):
query = " SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_multiple_iterations(self):
query = "SELECT * FROM raw_query_author"
normal_authors = Author.objects.all()
raw_authors = Author.objects.raw(query)
# First Iteration
first_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
first_iterations += 1
# Second Iteration
second_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
second_iterations += 1
self.assertEqual(first_iterations, second_iterations)
def test_get_item(self):
# Indexing on RawQuerySets
query = "SELECT * FROM raw_query_author ORDER BY id ASC"
third_author = Author.objects.raw(query)[2]
self.assertEqual(third_author.first_name, 'Bob')
first_two = Author.objects.raw(query)[0:2]
self.assertEqual(len(first_two), 2)
self.assertRaises(TypeError, lambda: Author.objects.raw(query)['test'])
def test_inheritance(self):
# date is the end of the Cuban Missile Crisis, I have no idea when
# Wesley was born
f = FriendlyAuthor.objects.create(first_name="Wesley", last_name="Chun",
dob=date(1962, 10, 28))
query = "SELECT * FROM raw_query_friendlyauthor"
self.assertEqual(
[o.pk for o in FriendlyAuthor.objects.raw(query)], [f.pk]
)
def test_query_count(self):
self.assertNumQueries(1, list, Author.objects.raw("SELECT * FROM raw_query_author"))
def test_subquery_in_raw_sql(self):
try:
list(Book.objects.raw('SELECT id FROM (SELECT * FROM raw_query_book WHERE paperback IS NOT NULL) sq'))
except InvalidQuery:
self.fail("Using a subquery in a RawQuerySet raised InvalidQuery")
def test_db_column_name_is_used_in_raw_query(self):
"""
Regression test that ensures the `column` attribute on the field is
used to generate the list of fields included in the query, as opposed
to the `attname`. This is important when the primary key is a
ForeignKey field because `attname` and `column` are not necessarily the
same.
"""
b = BookFkAsPk.objects.create(book=self.b1)
self.assertEqual(list(BookFkAsPk.objects.raw('SELECT not_the_default FROM raw_query_bookfkaspk')), [b])
|
bsd-3-clause
|
Vitallium/qtwebkit
|
Tools/QueueStatusServer/model/queuepropertymixin.py
|
143
|
1888
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class QueuePropertyMixin(object):
def _queue_getter(self):
# Import at runtime to avoid circular imports
from model.queues import Queue
return Queue.queue_with_name(self.queue_name)
def _queue_setter(self, queue):
self.queue_name = queue.name() if queue else None
queue = property(_queue_getter, _queue_setter)
|
gpl-2.0
|
mmardini/django
|
django/utils/tzinfo.py
|
97
|
3923
|
"Implementation of tzinfo classes for use with datetime.datetime."
from __future__ import unicode_literals
from datetime import timedelta, tzinfo
import time
import warnings
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text, DEFAULT_LOCALE_ENCODING
warnings.warn(
"django.utils.tzinfo will be removed in Django 1.9. "
"Use django.utils.timezone instead.",
RemovedInDjango19Warning, stacklevel=2)
# Python's doc say: "A tzinfo subclass must have an __init__() method that can
# be called with no arguments". FixedOffset and LocalTimezone don't honor this
# requirement. Defining __getinitargs__ is sufficient to fix copy/deepcopy as
# well as pickling/unpickling.
class FixedOffset(tzinfo):
"Fixed offset in minutes east from UTC."
def __init__(self, offset):
warnings.warn(
"django.utils.tzinfo.FixedOffset will be removed in Django 1.9. "
"Use django.utils.timezone.get_fixed_timezone instead.",
RemovedInDjango19Warning)
if isinstance(offset, timedelta):
self.__offset = offset
offset = self.__offset.seconds // 60
else:
self.__offset = timedelta(minutes=offset)
sign = '-' if offset < 0 else '+'
self.__name = "%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60)
def __repr__(self):
return self.__name
def __getinitargs__(self):
return self.__offset,
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
# This implementation is used for display purposes. It uses an approximation
# for DST computations on dates >= 2038.
# A similar implementation exists in django.utils.timezone. It's used for
# timezone support (when USE_TZ = True) and focuses on correctness.
class LocalTimezone(tzinfo):
"Proxy timezone information from time module."
def __init__(self, dt):
warnings.warn(
"django.utils.tzinfo.LocalTimezone will be removed in Django 1.9. "
"Use django.utils.timezone.get_default_timezone instead.",
RemovedInDjango19Warning)
tzinfo.__init__(self)
self.__dt = dt
self._tzname = self.tzname(dt)
def __repr__(self):
return force_str(self._tzname)
def __getinitargs__(self):
return self.__dt,
def utcoffset(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone)
else:
return timedelta(seconds=-time.timezone)
def dst(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
else:
return timedelta(0)
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
try:
return force_text(time.tzname[is_dst], DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
return None
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
try:
stamp = time.mktime(tt)
except (OverflowError, ValueError):
# 32 bit systems can't handle dates after Jan 2038, and certain
# systems can't handle dates before ~1901-12-01:
#
# >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0))
# OverflowError: mktime argument out of range
# >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0))
# ValueError: year out of range
#
# In this case, we fake the date, because we only care about the
# DST flag.
tt = (2037,) + tt[1:]
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
|
bsd-3-clause
|
bobbyluig/Eclipse
|
src/agility/maestro.py
|
1
|
14308
|
import logging
import os
import re
import time
import serial
from serial import SerialTimeoutException
import struct
from threading import Lock
from serial.tools import list_ports
logger = logging.getLogger('universe')
class Maestro:
"""
Implementation of the controller for Pololu Mini Maestro Controller.
This class communicates with only one maestro on the virtual command port.
This implementation is thread-safe.
However, a servo can only belong to at most one thread.
Having a servo in multiple threads will cause errors.
"""
def __init__(self, port=None, baud=115200):
"""
:param port: The virtual port number.
:param baud: Baud rate. Should use maximum.
"""
if port is not None:
self.port = port
else:
# Determine the operating system and port strings.
# Command port is used for USB Dual Port mode.
# Can automatically determine from a scan.
ports = list(list_ports.grep(r'(?i)1ffb:008b'))
if os.name == 'nt':
if len(ports) == 2:
if 'Command' in ports[0][1]:
self.port = ports[0][0]
else:
self.port = ports[1][0]
else:
raise ConnectionError('Unable to determine the Command port automatically. Please specify.')
else:
if len(ports) == 2:
# Assuming nothing was messed with, the command port is the lower port.
if int(re.search(r'(\d+)$', ports[1][0]).group(0)) > int(re.search(r'(\d+)$', ports[0][0]).group(0)):
self.port = ports[0][0]
else:
self.port = ports[1][0]
else:
raise ConnectionError('Unable to determine the Command port automatically. Please specify.')
# Start a connection using pyserial.
try:
self.usb = serial.Serial(self.port, baudrate=baud, write_timeout=0)
logger.debug('Using command port "{}".'.format(self.usb.port))
except:
raise ConnectionError('Unable to connect to servo controller at {}.'.format(self.port))
# Struct objects are faster.
self.struct = struct.Struct('<H')
# Locks.
self.read_lock = Lock()
def write(self, buffer):
"""
Send data to the Maestro.
:param buffer: The data to send.
"""
self.usb.write(buffer)
def close(self):
"""
Close the USB port.
"""
self.usb.close()
##########################################
# Begin implementation of static methods.
##########################################
@staticmethod
def endianize(value):
"""
Endian formatting for Pololu commands.
:param value: Integer value.
:return: (lsb, msb)
"""
return value & 0x7F, (value >> 7) & 0x7F
############################################
# Begin implementation of digital protocol.
############################################
def rotate(self, stepper, degrees, t):
"""
Send some pulses in a given time with equal spacing per pulse.
Blocking until completion.
:param stepper: Stepper object.
:param degrees: The number of degrees to turn.
:param t: The total time.
"""
steps = stepper.deg_to_steps(degrees)
if steps == 0:
return
if steps > 0:
direction = 1
self.usb.write((0x84, stepper.c1, 64, 62))
else:
direction = -1
self.usb.write((0x84, stepper.c1, 104, 7))
steps = abs(steps)
x = t / (2 * steps) / 1000
low_pulse = (0x84, stepper.c2, 104, 7)
high_pulse = (0x84, stepper.c2, 64, 62)
for i in range(steps):
self.usb.write(high_pulse)
time.sleep(x)
self.usb.write(low_pulse)
time.sleep(x)
stepper.step_one(direction)
##########################################################
# Begin implementation of buffer-capable compact protocol.
##########################################################
def set_target(self, servo, send=True):
"""
Move a servo to its target.
:param servo: A servo object.
:param send: Whether or not to send instruction immediately.
:return: The instruction tuple.
"""
# Use endian format suitable for Maestro.
lsb, msb = self.endianize(servo.target)
# Compose and send or return.
ins = (0x84, servo.channel, lsb, msb)
if send:
self.usb.write(ins)
return ins
def set_speed(self, servo, speed, send=True):
"""
Set the servo speed.
:param servo: A servo object.
:param speed: The speed in 0.25 us / 10 ms.
:param send: Whether or not to send instruction immediately.
:return: The instruction tuple.
"""
# Use endian format suitable for Maestro.
lsb, msb = self.endianize(speed)
# Update object. However, this will not be accurate until send.
servo.vel = speed
# Compose and send or return.
ins = (0x87, servo.channel, lsb, msb)
if send:
self.usb.write(ins)
return ins
def set_acceleration(self, servo, accel, send=True):
"""
Set the servo acceleration.
:param servo: A servo object.
:param accel: The acceleration in 0.25 us / 10 ms / 80 ms. See documentation for different PWM.
:param send: Whether or not to send instruction immediately.
:return: The instruction tuple.
"""
# Use endian format suitable for Maestro.
lsb, msb = self.endianize(accel)
# Update object. However, this will not be accurate until flush.
servo.accel = accel
# Compose and add to buffer.
ins = (0x89, servo.channel, lsb, msb)
if send:
self.usb.write(ins)
return ins
##########################################
# Begin implementation of bulk operations.
##########################################
def get_multiple_positions(self, servos):
"""
Get multiple positions.
:param servos: Servo objects.
"""
data = bytearray()
count = len(servos)
size = 2 * count
for servo in servos:
data.extend((0x90, servo.channel))
with self.read_lock:
self.usb.write(data)
reply = self.usb.read(size=size)
for i in range(count):
data = reply[2 * i: 2 * i + 2]
servos[i].pwm = self.struct.unpack(data)[0]
def set_multiple_targets(self, servos):
"""
Set multiple targets with one command. Faster than multiple set_target().
Only use for contiguous blocks!
:param servos: Servo objects.
"""
# Count the number of targets. Required by controller.
count = len(servos)
# Sort.
servos = sorted(servos, key=lambda s: s.channel)
# Start channel
start = servos[0].channel
# Data header.
data = bytearray((0x9F, count, start))
# Iterate through all servos, appending to data as needed.
for servo in servos:
target = servo.deg_to_maestro(servo.target)
# Check contiguity.
if servo.channel != start:
raise Exception('Channels not contiguous!')
else:
start += 1
lsb, msb = self.endianize(target)
data.extend((lsb, msb))
# Update object.
servo.pwm = target
# Write.
self.usb.write(data)
##########################################
# Begin implementation of read operations.
##########################################
def get_position(self, servo):
"""
Get the position of one servo.
:param servo: A servo object.
"""
with self.read_lock:
# Send command and get reply.
self.usb.write((0x90, servo.channel))
reply = self.usb.read(size=2)
# Unpack data.
pwm = self.struct.unpack(reply)[0]
# Set servo data.
servo.pwm = pwm
def get_moving_state(self):
"""
Checks if any servos are moving.
:return: Returns True if one or more servos are moving, else False.
"""
with self.read_lock:
# Send command and receive.
self.usb.write((0x93,))
reply = self.usb.read()
# Check and return.
if reply == b'\x00':
return False
else:
return True
def get_errors(self):
"""
Gets errors.
:return: Returns an integer reprenstation of an error or None if there are no errors.
"""
with self.read_lock:
# Send command and receive.
self.usb.write((0xA1,))
reply = self.usb.read(size=2)
if reply:
return self.struct.unpack(reply)[0]
else:
return None
###############################################
# Begin implementation of accessory operations.
###############################################
def set_pwm(self, time, period):
"""
Set the PWM.
:param time: The time parameter as specified by the documentation.
:param period: THe period parameter as specified by the documentation.
"""
# Use endian format suitable for Maestro.
lsb1, msb1 = self.endianize(time)
lsb2, msb2 = self.endianize(period)
# Compose.
data = (0x8A, lsb1, msb1, lsb2, msb2)
# Write.
self.usb.write(data)
def go_home(self):
"""
Return all servos to their home positions.
"""
# Send command.
self.usb.write((0xA2,))
############################################
# Begin implementation of script operations.
############################################
def stop_script(self):
"""
Stops the running script.
"""
# Send command.
self.usb.write((0xA4,))
# Restart script.
def restart(self, subroutine, parameter=None):
"""
Starts or restarts a script.
:param subroutine: The subroutine number.
:param parameter: An integer parameter to put on the stack for consumption.
"""
# Construct command depending on parameter.
if parameter is None:
data = (0xA7, subroutine)
else:
lsb, msb = self.endianize(parameter)
data = (0xA8, lsb, msb)
# Send data.
self.usb.write(data)
def get_script_status(self):
"""
Get a script status.
:return: Returns True if script is running and False if it is not.
"""
with self.read_lock:
# Send command and receive.
self.usb.write((0xAE,))
reply = self.usb.read()
# Check and return.
if reply == b'\x00':
return False
else:
return True
###################################################
# Begin implementation of complex helper functions.
###################################################
def end_in(self, servo, t=0, update=False):
"""
Move one servo to it's target in t time.
:param servo: A servo object.
:param t: The time in ms for the operation. Set to 0 for max speed.
:param update: Whether of not to update servo's position.
"""
# Update servo positions as needed.
if update:
self.get_position(servo)
# Max speed.
if t == 0:
t = abs(servo.target - servo.pwm) / servo.max_vel * 10
# Already at target.
if t == 0:
return
# Faster send.
buffer = bytearray()
# Set acceleration to zero.
ins = self.set_acceleration(servo, 0, send=False)
buffer.extend(ins)
# Compute velocity as a change in 0.25us PWM / 10ms.
delta = abs(servo.target - servo.pwm)
vel = int(round(delta / t * 10))
# Set velocity.
ins = self.set_speed(servo, vel, send=False)
buffer.extend(ins)
# Send data.
self.write(buffer)
# Synchronize instruction.
self.set_target(servo)
def end_together(self, servos, t=0, update=False):
"""
Move all servos to their respective targets such that they arrive together.
This will reset all accelerations to 0 and flush buffer.
:param servos: Servo objects.
:param t: The time in ms for the operation. Set to 0 for max speed.
:param update: Whether of not to update servo positions.
"""
# Update servo positions as needed.
if update:
self.get_multiple_positions(servos)
# Max speed.
if t == 0:
t = max([abs(servo.target - servo.pwm) / servo.max_vel * 10 for servo in servos])
# Already at target.
if t == 0:
return
# Faster send.
buffer = bytearray()
# Compute and set the velocity for every servo.
for servo in servos:
# Set acceleration to zero.
ins = self.set_acceleration(servo, 0, send=False)
buffer.extend(ins)
# Compute velocity as a change in 0.25us PWM / 10ms.
delta = abs(servo.target - servo.pwm)
vel = int(round(delta / t * 10))
# Set velocity.
ins = self.set_speed(servo, vel, send=False)
buffer.extend(ins)
# Send data.
self.write(buffer)
# Synchronize instructions.
buffer = bytearray()
# Move all servos to their respective targets.
for servo in servos:
ins = self.set_target(servo, send=False)
buffer.extend(ins)
# Send to execute move.
self.write(buffer)
|
mit
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/databrowse/plugins/fieldchoices.py
|
252
|
3856
|
from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.contrib.databrowse.sites import DatabrowsePlugin
from django.shortcuts import render_to_response
from django.utils.text import capfirst
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
import urllib
class FieldChoicePlugin(DatabrowsePlugin):
def __init__(self, field_filter=None):
# If field_filter is given, it should be a callable that takes a
# Django database Field instance and returns True if that field should
# be included. If field_filter is None, that all fields will be used.
self.field_filter = field_filter
def field_dict(self, model):
"""
Helper function that returns a dictionary of all fields in the given
model. If self.field_filter is set, it only includes the fields that
match the filter.
"""
if self.field_filter:
return dict([(f.name, f) for f in model._meta.fields if self.field_filter(f)])
else:
return dict([(f.name, f) for f in model._meta.fields if not f.rel and not f.primary_key and not f.unique and not isinstance(f, (models.AutoField, models.TextField))])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return u''
return mark_safe(u'<p class="filter"><strong>View by:</strong> %s</p>' % \
u', '.join(['<a href="fields/%s/">%s</a>' % (f.name, force_unicode(capfirst(f.verbose_name))) for f in fields.values()]))
def urls(self, plugin_name, easy_instance_field):
if easy_instance_field.field in self.field_dict(easy_instance_field.model.model).values():
field_value = smart_str(easy_instance_field.raw_value)
return [mark_safe(u'%s%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
urllib.quote(field_value, safe='')))]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no fields with choices, there's no point in going
# further.
if not self.fields:
raise http.Http404('The requested model has no fields.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/', 1)
if self.fields.has_key(url_bits[0]):
return self.field_view(request, self.fields[url_bits[0]], *url_bits[1:])
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = self.fields.values()
field_list.sort(key=lambda k: k.verbose_name)
return render_to_response('databrowse/fieldchoice_homepage.html', {'root_url': self.site.root_url, 'model': easy_model, 'field_list': field_list})
def field_view(self, request, field, value=None):
easy_model = EasyModel(self.site, self.model)
easy_field = easy_model.field(field.name)
if value is not None:
obj_list = easy_model.objects(**{field.name: value})
return render_to_response('databrowse/fieldchoice_detail.html', {'root_url': self.site.root_url, 'model': easy_model, 'field': easy_field, 'value': value, 'object_list': obj_list})
obj_list = [v[field.name] for v in self.model._default_manager.distinct().order_by(field.name).values(field.name)]
return render_to_response('databrowse/fieldchoice_list.html', {'root_url': self.site.root_url, 'model': easy_model, 'field': easy_field, 'object_list': obj_list})
|
bsd-3-clause
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Examples/VisualizationAlgorithms/Python/officeTubes.py
|
1
|
12926
|
#!/usr/bin/env python
# This example demonstrates the use of streamlines generated from seeds,
# combined with a tube filter to create several streamtubes.
import vtk
from vtk.util.misc import vtkGetDataRoot
from vtk.util.colors import *
VTK_DATA_ROOT = vtkGetDataRoot()
# We read a data file the is a CFD analysis of airflow in an office
# (with ventilation and a burning cigarette). We force an update so
# that we can query the output for its length, i.e., the length of the
# diagonal of the bounding box. This is useful for normalizing the
# data.
reader = vtk.vtkStructuredGridReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/office.binary.vtk")
reader.Update()
length = reader.GetOutput().GetLength()
maxVelocity =reader.GetOutput().GetPointData().GetVectors().GetMaxNorm()
maxTime = 35.0*length/maxVelocity
# Now we will generate multiple streamlines in the data. We create a
# random cloud of points and then use those as integration seeds. We
# select the integration order to use (RungeKutta order 4) and
# associate it with the streamer. The start position is the position
# in world space where we want to begin streamline integration; and we
# integrate in both directions. The step length is the length of the
# line segments that make up the streamline (i.e., related to
# display). The IntegrationStepLength specifies the integration step
# length as a fraction of the cell size that the streamline is in.
# Create source for streamtubes
seeds = vtk.vtkPointSource()
seeds.SetRadius(0.15)
seeds.SetCenter(0.1, 2.1, 0.5)
seeds.SetNumberOfPoints(6)
integ = vtk.vtkRungeKutta4()
streamer = vtk.vtkStreamTracer()
streamer.SetInputConnection(reader.GetOutputPort())
streamer.SetSourceConnection(seeds.GetOutputPort())
streamer.SetMaximumPropagation(500)
streamer.SetInitialIntegrationStep(0.05)
streamer.SetIntegrationDirectionToBoth()
streamer.SetIntegrator(integ)
# The tube is wrapped around the generated streamline. By varying the
# radius by the inverse of vector magnitude, we are creating a tube
# whose radius is proportional to mass flux (in incompressible flow).
streamTube = vtk.vtkTubeFilter()
streamTube.SetInputConnection(streamer.GetOutputPort())
streamTube.SetInputArrayToProcess(1, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, "vectors")
streamTube.SetRadius(0.02)
streamTube.SetNumberOfSides(12)
streamTube.SetVaryRadiusToVaryRadiusByVector()
mapStreamTube = vtk.vtkPolyDataMapper()
mapStreamTube.SetInputConnection(streamTube.GetOutputPort())
mapStreamTube.SetScalarRange(reader.GetOutput().GetPointData().GetScalars().GetRange())
streamTubeActor = vtk.vtkActor()
streamTubeActor.SetMapper(mapStreamTube)
streamTubeActor.GetProperty().BackfaceCullingOn()
# From here on we generate a whole bunch of planes which correspond to
# the geometry in the analysis; tables, bookshelves and so on.
table1 = vtk.vtkStructuredGridGeometryFilter()
table1.SetInputConnection(reader.GetOutputPort())
table1.SetExtent(11, 15, 7, 9, 8, 8)
mapTable1 = vtk.vtkPolyDataMapper()
mapTable1.SetInputConnection(table1.GetOutputPort())
mapTable1.ScalarVisibilityOff()
table1Actor = vtk.vtkActor()
table1Actor.SetMapper(mapTable1)
table1Actor.GetProperty().SetColor(.59, .427, .392)
table2 = vtk.vtkStructuredGridGeometryFilter()
table2.SetInputConnection(reader.GetOutputPort())
table2.SetExtent(11, 15, 10, 12, 8, 8)
mapTable2 = vtk.vtkPolyDataMapper()
mapTable2.SetInputConnection(table2.GetOutputPort())
mapTable2.ScalarVisibilityOff()
table2Actor = vtk.vtkActor()
table2Actor.SetMapper(mapTable2)
table2Actor.GetProperty().SetColor(.59, .427, .392)
FilingCabinet1 = vtk.vtkStructuredGridGeometryFilter()
FilingCabinet1.SetInputConnection(reader.GetOutputPort())
FilingCabinet1.SetExtent(15, 15, 7, 9, 0, 8)
mapFilingCabinet1 = vtk.vtkPolyDataMapper()
mapFilingCabinet1.SetInputConnection(FilingCabinet1.GetOutputPort())
mapFilingCabinet1.ScalarVisibilityOff()
FilingCabinet1Actor = vtk.vtkActor()
FilingCabinet1Actor.SetMapper(mapFilingCabinet1)
FilingCabinet1Actor.GetProperty().SetColor(.8, .8, .6)
FilingCabinet2 = vtk.vtkStructuredGridGeometryFilter()
FilingCabinet2.SetInputConnection(reader.GetOutputPort())
FilingCabinet2.SetExtent(15, 15, 10, 12, 0, 8)
mapFilingCabinet2 = vtk.vtkPolyDataMapper()
mapFilingCabinet2.SetInputConnection(FilingCabinet2.GetOutputPort())
mapFilingCabinet2.ScalarVisibilityOff()
FilingCabinet2Actor = vtk.vtkActor()
FilingCabinet2Actor.SetMapper(mapFilingCabinet2)
FilingCabinet2Actor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Top = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Top.SetInputConnection(reader.GetOutputPort())
bookshelf1Top.SetExtent(13, 13, 0, 4, 0, 11)
mapBookshelf1Top = vtk.vtkPolyDataMapper()
mapBookshelf1Top.SetInputConnection(bookshelf1Top.GetOutputPort())
mapBookshelf1Top.ScalarVisibilityOff()
bookshelf1TopActor = vtk.vtkActor()
bookshelf1TopActor.SetMapper(mapBookshelf1Top)
bookshelf1TopActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Bottom = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Bottom.SetInputConnection(reader.GetOutputPort())
bookshelf1Bottom.SetExtent(20, 20, 0, 4, 0, 11)
mapBookshelf1Bottom = vtk.vtkPolyDataMapper()
mapBookshelf1Bottom.SetInputConnection(bookshelf1Bottom.GetOutputPort())
mapBookshelf1Bottom.ScalarVisibilityOff()
bookshelf1BottomActor = vtk.vtkActor()
bookshelf1BottomActor.SetMapper(mapBookshelf1Bottom)
bookshelf1BottomActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Front = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Front.SetInputConnection(reader.GetOutputPort())
bookshelf1Front.SetExtent(13, 20, 0, 0, 0, 11)
mapBookshelf1Front = vtk.vtkPolyDataMapper()
mapBookshelf1Front.SetInputConnection(bookshelf1Front.GetOutputPort())
mapBookshelf1Front.ScalarVisibilityOff()
bookshelf1FrontActor = vtk.vtkActor()
bookshelf1FrontActor.SetMapper(mapBookshelf1Front)
bookshelf1FrontActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Back = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Back.SetInputConnection(reader.GetOutputPort())
bookshelf1Back.SetExtent(13, 20, 4, 4, 0, 11)
mapBookshelf1Back = vtk.vtkPolyDataMapper()
mapBookshelf1Back.SetInputConnection(bookshelf1Back.GetOutputPort())
mapBookshelf1Back.ScalarVisibilityOff()
bookshelf1BackActor = vtk.vtkActor()
bookshelf1BackActor.SetMapper(mapBookshelf1Back)
bookshelf1BackActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1LHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf1LHS.SetInputConnection(reader.GetOutputPort())
bookshelf1LHS.SetExtent(13, 20, 0, 4, 0, 0)
mapBookshelf1LHS = vtk.vtkPolyDataMapper()
mapBookshelf1LHS.SetInputConnection(bookshelf1LHS.GetOutputPort())
mapBookshelf1LHS.ScalarVisibilityOff()
bookshelf1LHSActor = vtk.vtkActor()
bookshelf1LHSActor.SetMapper(mapBookshelf1LHS)
bookshelf1LHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1RHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf1RHS.SetInputConnection(reader.GetOutputPort())
bookshelf1RHS.SetExtent(13, 20, 0, 4, 11, 11)
mapBookshelf1RHS = vtk.vtkPolyDataMapper()
mapBookshelf1RHS.SetInputConnection(bookshelf1RHS.GetOutputPort())
mapBookshelf1RHS.ScalarVisibilityOff()
bookshelf1RHSActor = vtk.vtkActor()
bookshelf1RHSActor.SetMapper(mapBookshelf1RHS)
bookshelf1RHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Top = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Top.SetInputConnection(reader.GetOutputPort())
bookshelf2Top.SetExtent(13, 13, 15, 19, 0, 11)
mapBookshelf2Top = vtk.vtkPolyDataMapper()
mapBookshelf2Top.SetInputConnection(bookshelf2Top.GetOutputPort())
mapBookshelf2Top.ScalarVisibilityOff()
bookshelf2TopActor = vtk.vtkActor()
bookshelf2TopActor.SetMapper(mapBookshelf2Top)
bookshelf2TopActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Bottom = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Bottom.SetInputConnection(reader.GetOutputPort())
bookshelf2Bottom.SetExtent(20, 20, 15, 19, 0, 11)
mapBookshelf2Bottom = vtk.vtkPolyDataMapper()
mapBookshelf2Bottom.SetInputConnection(bookshelf2Bottom.GetOutputPort())
mapBookshelf2Bottom.ScalarVisibilityOff()
bookshelf2BottomActor = vtk.vtkActor()
bookshelf2BottomActor.SetMapper(mapBookshelf2Bottom)
bookshelf2BottomActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Front = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Front.SetInputConnection(reader.GetOutputPort())
bookshelf2Front.SetExtent(13, 20, 15, 15, 0, 11)
mapBookshelf2Front = vtk.vtkPolyDataMapper()
mapBookshelf2Front.SetInputConnection(bookshelf2Front.GetOutputPort())
mapBookshelf2Front.ScalarVisibilityOff()
bookshelf2FrontActor = vtk.vtkActor()
bookshelf2FrontActor.SetMapper(mapBookshelf2Front)
bookshelf2FrontActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Back = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Back.SetInputConnection(reader.GetOutputPort())
bookshelf2Back.SetExtent(13, 20, 19, 19, 0, 11)
mapBookshelf2Back = vtk.vtkPolyDataMapper()
mapBookshelf2Back.SetInputConnection(bookshelf2Back.GetOutputPort())
mapBookshelf2Back.ScalarVisibilityOff()
bookshelf2BackActor = vtk.vtkActor()
bookshelf2BackActor.SetMapper(mapBookshelf2Back)
bookshelf2BackActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2LHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf2LHS.SetInputConnection(reader.GetOutputPort())
bookshelf2LHS.SetExtent(13, 20, 15, 19, 0, 0)
mapBookshelf2LHS = vtk.vtkPolyDataMapper()
mapBookshelf2LHS.SetInputConnection(bookshelf2LHS.GetOutputPort())
mapBookshelf2LHS.ScalarVisibilityOff()
bookshelf2LHSActor = vtk.vtkActor()
bookshelf2LHSActor.SetMapper(mapBookshelf2LHS)
bookshelf2LHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2RHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf2RHS.SetInputConnection(reader.GetOutputPort())
bookshelf2RHS.SetExtent(13, 20, 15, 19, 11, 11)
mapBookshelf2RHS = vtk.vtkPolyDataMapper()
mapBookshelf2RHS.SetInputConnection(bookshelf2RHS.GetOutputPort())
mapBookshelf2RHS.ScalarVisibilityOff()
bookshelf2RHSActor = vtk.vtkActor()
bookshelf2RHSActor.SetMapper(mapBookshelf2RHS)
bookshelf2RHSActor.GetProperty().SetColor(.8, .8, .6)
window = vtk.vtkStructuredGridGeometryFilter()
window.SetInputConnection(reader.GetOutputPort())
window.SetExtent(20, 20, 6, 13, 10, 13)
mapWindow = vtk.vtkPolyDataMapper()
mapWindow.SetInputConnection(window.GetOutputPort())
mapWindow.ScalarVisibilityOff()
windowActor = vtk.vtkActor()
windowActor.SetMapper(mapWindow)
windowActor.GetProperty().SetColor(.3, .3, .5)
outlet = vtk.vtkStructuredGridGeometryFilter()
outlet.SetInputConnection(reader.GetOutputPort())
outlet.SetExtent(0, 0, 9, 10, 14, 16)
mapOutlet = vtk.vtkPolyDataMapper()
mapOutlet.SetInputConnection(outlet.GetOutputPort())
mapOutlet.ScalarVisibilityOff()
outletActor = vtk.vtkActor()
outletActor.SetMapper(mapOutlet)
outletActor.GetProperty().SetColor(0, 0, 0)
inlet = vtk.vtkStructuredGridGeometryFilter()
inlet.SetInputConnection(reader.GetOutputPort())
inlet.SetExtent(0, 0, 9, 10, 0, 6)
mapInlet = vtk.vtkPolyDataMapper()
mapInlet.SetInputConnection(inlet.GetOutputPort())
mapInlet.ScalarVisibilityOff()
inletActor = vtk.vtkActor()
inletActor.SetMapper(mapInlet)
inletActor.GetProperty().SetColor(0, 0, 0)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(mapOutline)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Now create the usual graphics stuff.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(table1Actor)
ren.AddActor(table2Actor)
ren.AddActor(FilingCabinet1Actor)
ren.AddActor(FilingCabinet2Actor)
ren.AddActor(bookshelf1TopActor)
ren.AddActor(bookshelf1BottomActor)
ren.AddActor(bookshelf1FrontActor)
ren.AddActor(bookshelf1BackActor)
ren.AddActor(bookshelf1LHSActor)
ren.AddActor(bookshelf1RHSActor)
ren.AddActor(bookshelf2TopActor)
ren.AddActor(bookshelf2BottomActor)
ren.AddActor(bookshelf2FrontActor)
ren.AddActor(bookshelf2BackActor)
ren.AddActor(bookshelf2LHSActor)
ren.AddActor(bookshelf2RHSActor)
ren.AddActor(windowActor)
ren.AddActor(outletActor)
ren.AddActor(inletActor)
ren.AddActor(outlineActor)
ren.AddActor(streamTubeActor)
ren.SetBackground(slate_grey)
# Here we specify a particular view.
aCamera = vtk.vtkCamera()
aCamera.SetClippingRange(0.726079, 36.3039)
aCamera.SetFocalPoint(2.43584, 2.15046, 1.11104)
aCamera.SetPosition(-4.76183, -10.4426, 3.17203)
aCamera.SetViewUp(0.0511273, 0.132773, 0.989827)
aCamera.SetViewAngle(18.604)
aCamera.Zoom(1.2)
ren.SetActiveCamera(aCamera)
renWin.SetSize(500, 300)
iren.Initialize()
renWin.Render()
iren.Start()
|
bsd-3-clause
|
tangfeixiong/nova
|
nova/api/metadata/password.py
|
66
|
2255
|
# Copyright 2012 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import range
from webob import exc
from nova import context
from nova.i18n import _
from nova import objects
from nova import utils
CHUNKS = 4
CHUNK_LENGTH = 255
MAX_SIZE = CHUNKS * CHUNK_LENGTH
def extract_password(instance):
result = ''
sys_meta = utils.instance_sys_meta(instance)
for key in sorted(sys_meta.keys()):
if key.startswith('password_'):
result += sys_meta[key]
return result or None
def convert_password(context, password):
"""Stores password as system_metadata items.
Password is stored with the keys 'password_0' -> 'password_3'.
"""
password = password or ''
meta = {}
for i in range(CHUNKS):
meta['password_%d' % i] = password[:CHUNK_LENGTH]
password = password[CHUNK_LENGTH:]
return meta
def handle_password(req, meta_data):
ctxt = context.get_admin_context()
if req.method == 'GET':
return meta_data.password
elif req.method == 'POST':
# NOTE(vish): The conflict will only happen once the metadata cache
# updates, but it isn't a huge issue if it can be set for
# a short window.
if meta_data.password:
raise exc.HTTPConflict()
if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE):
msg = _("Request is too large.")
raise exc.HTTPBadRequest(explanation=msg)
instance = objects.Instance.get_by_uuid(ctxt, meta_data.uuid)
instance.system_metadata.update(convert_password(ctxt, req.body))
instance.save()
else:
raise exc.HTTPBadRequest()
|
apache-2.0
|
pplatek/odoo
|
openerp/addons/base/res/res_lang.py
|
196
|
12383
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
from locale import localeconv
import logging
import re
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class lang(osv.osv):
_name = "res.lang"
_description = "Languages"
_disallowed_datetime_patterns = tools.DATETIME_FORMATS_MAP.keys()
_disallowed_datetime_patterns.remove('%y') # this one is in fact allowed, just not good practice
def install_lang(self, cr, uid, **args):
"""
This method is called from openerp/addons/base/base_data.xml to load
some language and set it as the default for every partners. The
language is set via tools.config by the RPC 'create' method on the
'db' object. This is a fragile solution and something else should be
found.
"""
lang = tools.config.get('lang')
if not lang:
return False
lang_ids = self.search(cr, uid, [('code','=', lang)])
if not lang_ids:
self.load_lang(cr, uid, lang)
ir_values_obj = self.pool.get('ir.values')
default_value = ir_values_obj.get(cr, uid, 'default', False, ['res.partner'])
if not default_value:
ir_values_obj.set(cr, uid, 'default', False, 'lang', ['res.partner'], lang)
return True
def load_lang(self, cr, uid, lang, lang_name=None):
# create the language with locale information
fail = True
iso_lang = tools.get_iso_codes(lang)
for ln in tools.get_locales(lang):
try:
locale.setlocale(locale.LC_ALL, str(ln))
fail = False
break
except locale.Error:
continue
if fail:
lc = locale.getdefaultlocale()[0]
msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.'
_logger.warning(msg, lang, lc)
if not lang_name:
lang_name = tools.ALL_LANGUAGES.get(lang, lang)
def fix_xa0(s):
"""Fix badly-encoded non-breaking space Unicode character from locale.localeconv(),
coercing to utf-8, as some platform seem to output localeconv() in their system
encoding, e.g. Windows-1252"""
if s == '\xa0':
return '\xc2\xa0'
return s
def fix_datetime_format(format):
"""Python's strftime supports only the format directives
that are available on the platform's libc, so in order to
be 100% cross-platform we map to the directives required by
the C standard (1989 version), always available on platforms
with a C standard implementation."""
# For some locales, nl_langinfo returns a D_FMT/T_FMT that contains
# unsupported '%-' patterns, e.g. for cs_CZ
format = format.replace('%-', '%')
for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems():
format = format.replace(pattern, replacement)
return str(format)
lang_info = {
'code': lang,
'iso_code': iso_lang,
'name': lang_name,
'translatable': 1,
'date_format' : fix_datetime_format(locale.nl_langinfo(locale.D_FMT)),
'time_format' : fix_datetime_format(locale.nl_langinfo(locale.T_FMT)),
'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])),
'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])),
}
lang_id = False
try:
lang_id = self.create(cr, uid, lang_info)
finally:
tools.resetlocale()
return lang_id
def _check_format(self, cr, uid, ids, context=None):
for lang in self.browse(cr, uid, ids, context=context):
for pattern in self._disallowed_datetime_patterns:
if (lang.time_format and pattern in lang.time_format)\
or (lang.date_format and pattern in lang.date_format):
return False
return True
def _check_grouping(self, cr, uid, ids, context=None):
for lang in self.browse(cr, uid, ids, context=context):
try:
if not all(isinstance(x, int) for x in eval(lang.grouping)):
return False
except Exception:
return False
return True
def _get_default_date_format(self, cursor, user, context=None):
return '%m/%d/%Y'
def _get_default_time_format(self, cursor, user, context=None):
return '%H:%M:%S'
_columns = {
'name': fields.char('Name', required=True),
'code': fields.char('Locale Code', size=16, required=True, help='This field is used to set/get locales for user'),
'iso_code': fields.char('ISO code', size=16, required=False, help='This ISO code is the name of po files to use for translations'),
'translatable': fields.boolean('Translatable'),
'active': fields.boolean('Active'),
'direction': fields.selection([('ltr', 'Left-to-Right'), ('rtl', 'Right-to-Left')], 'Direction', required=True),
'date_format':fields.char('Date Format', required=True),
'time_format':fields.char('Time Format', required=True),
'grouping':fields.char('Separator Format', required=True,help="The Separator Format should be like [,n] where 0 < n :starting from Unit digit.-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. Provided ',' as the thousand separator in each case."),
'decimal_point':fields.char('Decimal Separator', required=True),
'thousands_sep':fields.char('Thousands Separator'),
}
_defaults = {
'active': 1,
'translatable': 0,
'direction': 'ltr',
'date_format':_get_default_date_format,
'time_format':_get_default_time_format,
'grouping': '[]',
'decimal_point': '.',
'thousands_sep': ',',
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'The name of the language must be unique !'),
('code_uniq', 'unique (code)', 'The code of the language must be unique !'),
]
_constraints = [
(_check_format, 'Invalid date/time format directive specified. Please refer to the list of allowed directives, displayed when you edit a language.', ['time_format', 'date_format']),
(_check_grouping, "The Separator Format should be like [,n] where 0 < n :starting from Unit digit.-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. Provided ',' as the thousand separator in each case.", ['grouping'])
]
@tools.ormcache(skiparg=3)
def _lang_data_get(self, cr, uid, lang, monetary=False):
if type(lang) in (str, unicode):
lang = self.search(cr, uid, [('code', '=', lang)]) or \
self.search(cr, uid, [('code', '=', 'en_US')])
lang = lang[0]
conv = localeconv()
lang_obj = self.browse(cr, uid, lang)
thousands_sep = lang_obj.thousands_sep or conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
decimal_point = lang_obj.decimal_point
grouping = lang_obj.grouping
return grouping, thousands_sep, decimal_point
def write(self, cr, uid, ids, vals, context=None):
for lang_id in ids :
self._lang_data_get.clear_cache(self)
return super(lang, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
languages = self.read(cr, uid, ids, ['code','active'], context=context)
for language in languages:
ctx_lang = context.get('lang')
if language['code']=='en_US':
raise osv.except_osv(_('User Error'), _("Base Language 'en_US' can not be deleted!"))
if ctx_lang and (language['code']==ctx_lang):
raise osv.except_osv(_('User Error'), _("You cannot delete the language which is User's Preferred Language!"))
if language['active']:
raise osv.except_osv(_('User Error'), _("You cannot delete the language which is Active!\nPlease de-activate the language first."))
trans_obj = self.pool.get('ir.translation')
trans_ids = trans_obj.search(cr, uid, [('lang','=',language['code'])], context=context)
trans_obj.unlink(cr, uid, trans_ids, context=context)
return super(lang, self).unlink(cr, uid, ids, context=context)
#
# IDS: can be a list of IDS or a list of XML_IDS
#
def format(self, cr, uid, ids, percent, value, grouping=False, monetary=False, context=None):
""" Format() will return the language-specific output for float values"""
if percent[0] != '%':
raise ValueError("format() must be given exactly one %char format specifier")
formatted = percent % value
# floats and decimal ints need special action!
if grouping:
lang_grouping, thousands_sep, decimal_point = \
self._lang_data_get(cr, uid, ids[0], monetary)
eval_lang_grouping = eval(lang_grouping)
if percent[-1] in 'eEfFgG':
parts = formatted.split('.')
parts[0], _ = intersperse(parts[0], eval_lang_grouping, thousands_sep)
formatted = decimal_point.join(parts)
elif percent[-1] in 'diu':
formatted = intersperse(formatted, eval_lang_grouping, thousands_sep)[0]
return formatted
# import re, operator
# _percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
# r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
lang()
def split(l, counts):
"""
>>> split("hello world", [])
['hello world']
>>> split("hello world", [1])
['h', 'ello world']
>>> split("hello world", [2])
['he', 'llo world']
>>> split("hello world", [2,3])
['he', 'llo', ' world']
>>> split("hello world", [2,3,0])
['he', 'llo', ' wo', 'rld']
>>> split("hello world", [2,-1,3])
['he', 'llo world']
"""
res = []
saved_count = len(l) # count to use when encoutering a zero
for count in counts:
if not l:
break
if count == -1:
break
if count == 0:
while l:
res.append(l[:saved_count])
l = l[saved_count:]
break
res.append(l[:count])
l = l[count:]
saved_count = count
if l:
res.append(l)
return res
intersperse_pat = re.compile('([^0-9]*)([^ ]*)(.*)')
def intersperse(string, counts, separator=''):
"""
See the asserts below for examples.
"""
left, rest, right = intersperse_pat.match(string).groups()
def reverse(s): return s[::-1]
splits = split(reverse(rest), counts)
res = separator.join(map(reverse, reverse(splits)))
return left + res + right, len(splits) > 0 and len(splits) -1 or 0
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
retorquere/zotero-better-bibtex
|
util/rebalance.py
|
1
|
4472
|
#!/usr/bin/env python3
import os
import sys
import json
from munch import Munch
import re
from ortools.algorithms import pywrapknapsack_solver
ref, output = sys.argv[1:]
if not ref.startswith('refs/heads/'):
print(ref, 'is not a branch')
sys.exit(0)
branch = ref.split('/')[-1]
print('rebalance', branch, '=>', output)
for job in [1, 2]:
job = f'logs/behave-zotero-{job}-{branch}.json'
if not os.path.exists(job):
print('not found:', job)
sys.exit(0)
class RunningAverage():
def __init__(self, average=None, n=0):
self.average = average
self.n = n
def add(self, new_value):
self.n += 1
if self.n == 1:
self.average = new_value
else:
# https://math.stackexchange.com/questions/106700/incremental-averageing
self.average = self.average + ((new_value - self.average) / self.n)
def __float__(self):
return self.average
def __repr__(self):
return "average: " + str(self.average)
class NoTestError(Exception):
pass
class FailedError(Exception):
pass
class Log:
def __init__(self):
self.tests = []
def load(self, timings):
tests = {}
for feature in timings:
if not 'elements' in feature: continue
for test in feature.elements:
if test.type == 'background': continue
if test.status == 'failed':
status = test.status
elif not 'use.with_slow=true' in test.tags and not 'slow' in test.tags:
status = 'fast'
else:
status = 'slow'
# for retries, the last successful iteration (if any) will overwrite the failed iterations
tests[re.sub(r' -- @[0-9]+\.[0-9]+ ', '', test.name)] = Munch(
# convert to msecs here or too much gets rounded down to 0
msecs=sum([step.result.duration * 1000 for step in test.steps if 'result' in step and 'duration' in step.result]), # msecs
status=status
)
if len(tests) == 0: raise NoTestError()
if any(1 for test in tests.values() if test.status == 'failed'): raise FailedError()
for name, test in tests.items():
self.tests.append(Munch(name=name, msecs=test.msecs, status=status))
log = Log()
try:
for job in [1, 2]:
with open(f'logs/behave-zotero-{job}-{branch}.json') as f:
log.load(json.load(f, object_hook=Munch.fromDict))
print(len(log.tests), 'tests')
with open(output) as f:
history = json.load(f, object_hook=Munch.fromDict)
for name, h in list(history.duration.items()):
if type(h) in (float, int):
history.duration[name] = Munch(msecs=h, runs=0)
history.duration[name].runs += history.runs
balance = Munch.fromDict({
'duration': {},
'runs': history.runs + 1,
})
for test in log.tests:
if h:= history.duration.get(test.name):
avg = RunningAverage(h.msecs, h.runs)
else:
avg = RunningAverage()
avg.add(test.msecs)
balance.duration[test.name] = Munch(msecs=round(float(avg) / 10) * 10, runs=avg.n)
for status in ['slow', 'fast']:
tests = [test for test in log.tests if status in [ 'slow', test.status] ]
durations = [balance.duration[test.name].msecs for test in tests]
#if status == 'slow':
# solver = pywrapknapsack_solver.KnapsackSolver.KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER
#else:
# solver = pywrapknapsack_solver.KnapsackSolver.KNAPSACK_MULTIDIMENSION_CBC_MIP_SOLVER
solver = pywrapknapsack_solver.KnapsackSolver.KNAPSACK_MULTIDIMENSION_CBC_MIP_SOLVER
solver = pywrapknapsack_solver.KnapsackSolver(solver, 'TestBalancer')
solver.Init([1 for n in durations], [durations], [int(sum(durations)/2)])
solver.Solve()
balance[status] = {}
for _bin in [1, 2]:
balance[status][_bin] = [ test.name for i, test in enumerate(tests) if solver.BestSolutionContains(i) == (_bin == 1) ]
print(status, len(tests), 'tests,', { k: len(t) for k, t in balance[status].items()})
# simplify for cleaner diffs
for name, duration in list(balance.duration.items()):
balance.duration[name].runs -= balance.runs
if balance.duration[name].runs == 0:
balance.duration[name] = balance.duration[name].msecs
except FileNotFoundError:
print('logs incomplete')
sys.exit()
except NoTestError:
print('missing tests')
sys.exit()
except FailedError:
print('some tests failed')
sys.exit()
print('writing', output)
with open(output, 'w') as f:
json.dump(balance, f, indent=' ', sort_keys=True)
print(f"::set-output name=balance::{output}")
|
mit
|
cysnake4713/odoo
|
addons/l10n_ve/__openerp__.py
|
119
|
3056
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
##############################################################################
# Module programed and financed by:
# Vauxoo, C.A. (<http://vauxoo.com>).
# Our Community team mantain this module:
# https://launchpad.net/~openerp-venezuela
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Venezuela - Accounting',
'version': '1.0',
'author': ['OpenERP SA', 'Vauxoo'],
'category': 'Localization/Account Charts',
'description':
"""
Chart of Account for Venezuela.
===============================
Venezuela doesn't have any chart of account by law, but the default
proposed in OpenERP should comply with some Accepted best practices in Venezuela,
this plan comply with this practices.
This module has been tested as base for more of 1000 companies, because
it is based in a mixtures of most common software in the Venezuelan
market what will allow for sure to accountants feel them first steps with
OpenERP more confortable.
This module doesn't pretend be the total localization for Venezuela,
but it will help you to start really quickly with OpenERP in this country.
This module give you.
---------------------
- Basic taxes for Venezuela.
- Have basic data to run tests with community localization.
- Start a company from 0 if your needs are basic from an accounting PoV.
We recomend install account_anglo_saxon if you want valued your
stocks as Venezuela does with out invoices.
If you install this module, and select Custom chart a basic chart will be proposed,
but you will need set manually account defaults for taxes.
""",
'depends': ['account',
'base_vat',
'account_chart'
],
'demo': [],
'data': ['data/account_tax_code.xml',
'data/account_user_types.xml',
'data/account_chart.xml',
'data/account_tax.xml',
'data/l10n_chart_ve_wizard.xml'
],
'auto_install': False,
'installable': True,
'images': ['images/config_chart_l10n_ve.jpeg',
'images/l10n_ve_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
andela-angene/coursebuilder-core
|
coursebuilder/modules/gen_sample_data/gen_sample_data.py
|
3
|
14095
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates sample data for a course and its users."""
__author__ = ['Timothy Johnson (tujohnson@google.com)']
import os
import random
from common import safe_dom
from common import users
from common import utils as common_utils
from controllers import utils
from models import analytics
from models import courses
from models import custom_modules
from models import event_transforms
from models import models
from models import transforms
from models.models import EventEntity
from models.models import Student
from modules.dashboard import dashboard
from google.appengine.ext.testbed import datastore_stub_util
MODULE_NAME = 'gen_sample_data'
MODULE_TITLE = 'Generate Sample Data'
def register_analytic():
"""This isn't exactly an analytic, but we register that way to be included
with the other analytics sub-tabs on the Dashboard."""
name = 'sample_data'
title = 'Generate Sample Data'
sample_data = analytics.Visualization(name, title,
os.path.join('modules',
'gen_sample_data',
'templates',
'sample_data.html'))
dashboard.DashboardHandler.add_sub_nav_mapping(
'analytics', name, title, action='analytics_sample_data',
contents=analytics.TabRenderer([sample_data]))
# Generates a random string from an ID between 1 and 10**16, inclusive
def _generate_id_num():
return random.randint(1, 10**16)
class GenerateSampleQuizHandler(utils.BaseHandler):
"""Generates a new assessment for the currently active course."""
NUM_QUESTIONS = 10
NUM_ANSWERS = 10
SAMPLE_QUIZ_PATH = 'generate-quiz'
QUESTION_PREFIX = 'gen_sample: '
# TODO(tujohnson): Re-use existing questions sometimes (future CL)
# TODO(tujohnson): Also generate multiple-choice questions (future CL)
def post(self):
questions = [self._generate_question_data(i + 1)
for i in xrange(self.NUM_QUESTIONS)]
self._create_assessment(questions)
self.redirect(self.request.referer)
def _create_assessment(self, questions):
question_id_list = []
for question in questions:
question_id_list.append(self._add_question(question))
questions_data_list = []
for i in xrange(len(questions)):
questions_data_list.append(
str(safe_dom.Element(
'question',
quid=str(question_id_list[i]),
instanceid=common_utils.generate_instance_id())))
questions_data = '\n'.join(questions_data_list)
course = self.get_course()
self._add_assessment(course, 'Next Assessment', questions_data)
def _generate_question_data(self, question_num):
question_name = '%sQuestion: %s' % (self.QUESTION_PREFIX, question_num)
return self._generate_question_data_internal(question_num,
question_name)
def _generate_question_data_internal(self, question_num, question_name):
answer = str(random.randint(1, self.NUM_ANSWERS))
question_data = {}
# If a question is supposed to look automatically generated, we
# tag the beginning of it with the prefix defined by the class.
question_data['question'] = question_name
question_data['rows'] = 1
question_data['columns'] = 100
question_data['defaultFeedback'] = ''
question_data['graders'] = [{
'matcher': 'case_insensitive',
'feedback': '',
'score': '1.0',
'response': answer,
}]
question_data['type'] = 1
question_data['description'] = 'Question ' + str(question_num)
question_data['version'] = '1.5'
question_data['hint'] = ''
question_data_string = transforms.dumps(question_data)
return question_data_string
def _add_assessment(self, course, title, questions_data):
assessment = course.add_assessment()
assessment.title = title
assessment.availability = courses.AVAILABILITY_AVAILABLE
assessment.html_content = questions_data
course.save()
def _add_question(self, question_data):
# Let the datastore choose the ID for entities that we create
to_store = models.QuestionEntity(data=question_data)
question_id = to_store.put().id()
return str(question_id)
class GenerateSampleStudentsHandler(utils.BaseHandler):
"""Generates a new set of students for the currently active course"""
NUM_STUDENTS = 10
SAMPLE_STUDENTS_PATH = 'generate-students'
EMAIL_PREFIX = 'gen_sample_student_'
def post(self):
student_emails = self._generate_emails(self.EMAIL_PREFIX)
self._generate_students(student_emails)
# Redirect back to original page
self.redirect(self.request.referer)
def _generate_emails(self, prefix):
return ['%s%s@example.com' % (prefix, _generate_id_num())
for i in xrange(self.NUM_STUDENTS)]
def _generate_students(self, student_emails):
course = self.get_course()
for email in student_emails:
user_id = datastore_stub_util.SynthesizeUserId(email)
student = Student(name='Student%s' % user_id, key_name=user_id,
email=email, user_id=user_id, is_enrolled=True)
Student.put(student)
# Record our new student visiting the home page for our course,
# then registering
user = users.User(email=email, _user_id=user_id)
host = os.environ['HTTP_HOST']
self.visit_page(user, 'http://%s/%s' %
(host, course.title))
self.visit_page(user, 'http://%s/%s'
'/course#registration_confirmation' %
(host, course.title))
def visit_page(self, user, pageURL):
source = 'enter-page'
data = {}
data['user_agent'] = ('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit'
'/537.36 (KHTML, like Gecko) Chrome'
'/51.0.2704.106 Safari/537.36')
data['loc'] = {'page_locale': 'en_US', 'locale': 'en_US',
'region':'null', 'language': 'en-US,en;q=0.8',
'country': 'ZZ', 'city': 'null'}
data['location'] = pageURL
data_str = transforms.dumps(data)
EventEntity.record(source, user, data_str)
class GenerateSampleScoresHandler(utils.BaseHandler):
"""Generates answers for automatically generated students.
Students are determined to be automatically generated if their email begins
with gen_sample_student_. For these students, we generate one new answer
that receives full credit with probability CORRECT_PROB. Otherwise, we
generate the answer -, which we assume to be incorrect.
"""
# TODO(tujohnson): We may want to have varying probabilities for different
# students.
CORRECT_PROB = 0.5
SAMPLE_SCORES_PATH = 'generate-scores'
def post(self):
# Sort questions into a dictionary based on their unit number
questions_by_usage_id = event_transforms.get_questions_by_usage_id(
self.app_context)
sorted_questions_by_unit = self._rearrange_dict_by_field(
questions_by_usage_id, 'unit')
# Only use Students we generated.
students = common_utils.iter_all(models.Student.all().filter(
'email >', 'gen_sample_student_').filter(
'email <', 'gen_sample_student`'))
source = 'submit-assessment'
for student in students:
user = users.User(email=student.email, _user_id=student.user_id)
assessment_data = self._generate_answers(student,
sorted_questions_by_unit)
for data in assessment_data:
EventEntity.record(source, user, transforms.dumps({
'values': data, 'location': 'AnswerHandler'}))
self.redirect(self.request.referer)
# Returns a list of answers for all assessments, in the required data format
def _generate_answers(self, student, sorted_questions_by_unit):
course = self.get_course()
answers = []
for unit in course.get_units():
if (unit.is_assessment() and
unit.unit_id in sorted_questions_by_unit):
answers.append(self._generate_answers_one_assessment(
student, unit, sorted_questions_by_unit[unit.unit_id]))
return answers
def _generate_answers_one_assessment(self, student, assessment, questions):
answersEntity = assessment.workflow.get_grader()
answer = {}
# Generate the correct answer with the defined constant probability
# Otherwise leave the answer blank so that it will be marked incorrect
for question_id in questions:
rand_val = random.random()
if rand_val < self.CORRECT_PROB:
answer[question_id] = {'response':
questions[question_id]['graders'][0]['response']}
else:
answer[question_id] = {'response': '-'}
answer['answers'] = {}
for question_id in questions:
answer['answers'][question_id] = answer[question_id]['response']
answer['quids'] = {}
for question_id in questions:
answer['quids'][question_id] = questions[question_id]['id']
answer['totalWeight'] = sum([questions[question_id]['weight']
for question_id in questions])
answer['containedTypes'] = {}
for question_id in questions:
answer['containedTypes'][question_id] = 'SaQuestion'
answer['individualScores'] = {}
for question_id in questions:
if questions[question_id]['graders'][0]['response'] == \
answer[question_id]['response']:
answer['individualScores'][question_id] = 1
else:
answer['individualScores'][question_id] = 0
answer['rawScore'] = sum([answer['individualScores'][question_id]
for question_id in questions])
answer['percentScore'] = answer['rawScore'] / answer['totalWeight']
answer['percentScore'] = int(answer['percentScore'] * 100)
answer['version'] = '1.5'
return answer
def _rearrange_dict_by_field(self, old_dict, sorted_field):
"""Rearranges and filters a dictionary of questions.
Takes a dictionary of entries of the form
{id1 : { 'val1': _, 'val2': _ }, id2 : { 'val1': _, 'val2': _ }, ...}
and rearranges it so that items that match for the chosen field are
placed.
When we arrange by unit number, the output will be:
{ <unit_num_1> : <dictionary of questions from unit_num_1>,
<unit_num_2> : <dictionary of questions from unit_num_2>, ...}
We also include only the questions whose text begins with the correct
prefix marking it as an automatically generated questions.
"""
# First we need to get the set of ID's for automatically generated
# questions, and their graders.
question_entities = common_utils.iter_all(models.QuestionEntity.all())
grader_dict = {}
auto_generated_ids = set()
for question_entity in question_entities:
question_data = transforms.loads(question_entity.data)
question_id = str(question_entity.key().id())
text = question_data['question']
if text.startswith(GenerateSampleQuizHandler.QUESTION_PREFIX):
auto_generated_ids.add(question_id)
grader_dict[question_id] = question_data['graders']
sorted_dict = {}
for instance_id in old_dict:
old_entry = old_dict[instance_id]
question_id = old_entry['id']
if question_id in auto_generated_ids:
sort_val = old_entry[sorted_field]
if sort_val in sorted_dict:
sorted_dict[sort_val][instance_id] = old_dict[instance_id]
else:
sorted_dict[sort_val] = {instance_id:
old_dict[instance_id]}
grader = grader_dict[question_id]
sorted_dict[sort_val][instance_id]['graders'] = grader
return sorted_dict
custom_module = None
def register_module():
"""Registers this module in the registry."""
def on_module_enabled():
register_analytic()
global_routes = []
namespaced_routes = [
('/' + GenerateSampleQuizHandler.SAMPLE_QUIZ_PATH,
GenerateSampleQuizHandler),
('/' + GenerateSampleStudentsHandler.SAMPLE_STUDENTS_PATH,
GenerateSampleStudentsHandler),
('/' + GenerateSampleScoresHandler.SAMPLE_SCORES_PATH,
GenerateSampleScoresHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
MODULE_TITLE,
'Generate sample data',
global_routes,
namespaced_routes,
notify_module_enabled=on_module_enabled)
return custom_module
|
apache-2.0
|
dennis-sheil/commandergenius
|
project/jni/python/src/Lib/unittest.py
|
55
|
31107
|
#!/usr/bin/env python
'''
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmenticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEquals((1 + 2), 3)
self.assertEquals(0 + 1, 1)
def testMultiply(self):
self.assertEquals((0 * 10), 0)
self.assertEquals((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/lib/module-unittest.html
Copyright (c) 1999-2003 Steve Purcell
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
'''
__author__ = "Steve Purcell"
__email__ = "stephen_purcell at yahoo dot com"
__version__ = "#Revision: 1.63 $"[11:-2]
import time
import sys
import traceback
import os
import types
##############################################################################
# Exported classes and functions
##############################################################################
__all__ = ['TestResult', 'TestCase', 'TestSuite', 'TextTestRunner',
'TestLoader', 'FunctionTestCase', 'main', 'defaultTestLoader']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
##############################################################################
# Backward compatibility
##############################################################################
if sys.version_info[:2] < (2, 2):
def isinstance(obj, clsinfo):
import __builtin__
if type(clsinfo) in (tuple, list):
for cls in clsinfo:
if cls is type: cls = types.ClassType
if __builtin__.isinstance(obj, cls):
return 1
return 0
else: return __builtin__.isinstance(obj, clsinfo)
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
##############################################################################
# Test framework core
##############################################################################
# All classes defined herein are 'new-style' classes, allowing use of 'super()'
__metaclass__ = type
def _strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
__unittest = 1
class TestResult:
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun = self.testsRun + 1
def stopTest(self, test):
"Called when the given test has been run"
pass
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
return ''.join(traceback.format_exception(exctype, value, tb, length))
return ''.join(traceback.format_exception(exctype, value, tb))
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(_strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
class TestCase:
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
try:
self._testMethodName = methodName
testMethod = getattr(self, methodName)
self._testMethodDoc = testMethod.__doc__
except AttributeError:
raise ValueError, "no such test method in %s: %s" % \
(self.__class__, methodName)
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
def countTestCases(self):
return 1
def defaultTestResult(self):
return TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (_strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return False
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, _strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(_strclass(self.__class__), self._testMethodName)
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
return
ok = False
try:
testMethod()
ok = True
except self.failureException:
result.addFailure(self, self._exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
def _exc_info(self):
"""Return a version of sys.exc_info() with the traceback frame
minimised; usually the top level of the traceback frame is not
needed.
"""
return sys.exc_info()
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException, msg
def failIf(self, expr, msg=None):
"Fail the test if the expression is true."
if expr: raise self.failureException, msg
def failUnless(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr: raise self.failureException, msg
def failUnlessRaises(self, excClass, callableObj, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
try:
callableObj(*args, **kwargs)
except excClass:
return
else:
if hasattr(excClass,'__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise self.failureException, "%s not raised" % excName
def failUnlessEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
if not first == second:
raise self.failureException, \
(msg or '%r != %r' % (first, second))
def failIfEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if first == second:
raise self.failureException, \
(msg or '%r == %r' % (first, second))
def failUnlessAlmostEqual(self, first, second, places=7, msg=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
"""
if round(abs(second-first), places) != 0:
raise self.failureException, \
(msg or '%r != %r within %r places' % (first, second, places))
def failIfAlmostEqual(self, first, second, places=7, msg=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
"""
if round(abs(second-first), places) == 0:
raise self.failureException, \
(msg or '%r == %r within %r places' % (first, second, places))
# Synonyms for assertion methods
assertEqual = assertEquals = failUnlessEqual
assertNotEqual = assertNotEquals = failIfEqual
assertAlmostEqual = assertAlmostEquals = failUnlessAlmostEqual
assertNotAlmostEqual = assertNotAlmostEquals = failIfAlmostEqual
assertRaises = failUnlessRaises
assert_ = assertTrue = failUnless
assertFalse = failIf
class TestSuite:
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (_strclass(self.__class__), self._tests)
__str__ = __repr__
def __eq__(self, other):
if type(self) is not type(other):
return False
return self._tests == other._tests
def __ne__(self, other):
return not self == other
# Can't guarantee hash invariant, so flag as unhashable
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self._tests:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not hasattr(test, '__call__'):
raise TypeError("the test to add must be callable")
if (isinstance(test, (type, types.ClassType)) and
issubclass(test, (TestCase, TestSuite))):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self._tests:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self._tests: test.debug()
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None,
description=None):
TestCase.__init__(self)
self.__setUpFunc = setUp
self.__tearDownFunc = tearDown
self.__testFunc = testFunc
self.__description = description
def setUp(self):
if self.__setUpFunc is not None:
self.__setUpFunc()
def tearDown(self):
if self.__tearDownFunc is not None:
self.__tearDownFunc()
def runTest(self):
self.__testFunc()
def id(self):
return self.__testFunc.__name__
def __eq__(self, other):
if type(self) is not type(other):
return False
return self.__setUpFunc == other.__setUpFunc and \
self.__tearDownFunc == other.__tearDownFunc and \
self.__testFunc == other.__testFunc and \
self.__description == other.__description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self.__setUpFunc, self.__tearDownFunc,
self.__testFunc, self.__description))
def __str__(self):
return "%s (%s)" % (_strclass(self.__class__), self.__testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (_strclass(self.__class__), self.__testFunc)
def shortDescription(self):
if self.__description is not None: return self.__description
doc = self.__testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
##############################################################################
# Locating and loading tests
##############################################################################
class TestLoader:
"""This class is responsible for loading tests according to various
criteria and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = TestSuite
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, TestSuite):
raise TypeError("Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
return self.suiteClass(map(testCaseClass, testCaseNames))
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, TestCase)):
tests.append(self.loadTestsFromTestCase(obj))
return self.suiteClass(tests)
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy: raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if type(obj) == types.ModuleType:
return self.loadTestsFromModule(obj)
elif (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, TestCase)):
return self.loadTestsFromTestCase(obj)
elif (type(obj) == types.UnboundMethodType and
isinstance(parent, (type, types.ClassType)) and
issubclass(parent, TestCase)):
return TestSuite([parent(obj.__name__)])
elif isinstance(obj, TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, TestSuite):
return test
elif isinstance(test, TestCase):
return TestSuite([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
defaultTestLoader = TestLoader()
##############################################################################
# Patches for old functions: these functions should be considered obsolete
##############################################################################
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass: loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp, suiteClass=TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp, suiteClass=TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
##############################################################################
# Text UI
##############################################################################
class _WritelnDecorator:
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg: self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class _TextTestResult(TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
TestResult.__init__(self)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
if self.descriptions:
return test.shortDescription() or str(test)
else:
return str(test)
def startTest(self, test):
TestResult.startTest(self, test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
TestResult.addSuccess(self, test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
TestResult.addError(self, test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
TestResult.addFailure(self, test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner:
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
##############################################################################
# Facilities for running tests from the command line
##############################################################################
class TestProgram:
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def __init__(self, module='__main__', defaultTest=None,
argv=None, testRunner=TextTestRunner,
testLoader=defaultTestLoader):
if type(module) == type(''):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.verbosity = 1
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg: print msg
print self.USAGE % self.__dict__
sys.exit(2)
def parseArgs(self, argv):
import getopt
try:
options, args = getopt.getopt(argv[1:], 'hHvq',
['help','verbose','quiet'])
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if len(args) == 0 and self.defaultTest is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
return
if len(args) > 0:
self.testNames = args
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def runTests(self):
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity)
except TypeError:
# didn't accept the verbosity argument
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
result = testRunner.run(self.test)
sys.exit(not result.wasSuccessful())
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
|
lgpl-2.1
|
upfluence/thrift
|
test/py/TestEof.py
|
99
|
4327
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir', default='gen-py')
options, args = parser.parse_args()
del sys.argv[1:] # clean up hack so unittest doesn't complain
sys.path.insert(0, options.genpydir)
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
from ThriftTest import ThriftTest
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
import unittest
import time
class TestEof(unittest.TestCase):
def make_data(self, pfactory=None):
trans = TTransport.TMemoryBuffer()
if pfactory:
prot = pfactory.getProtocol(trans)
else:
prot = TBinaryProtocol.TBinaryProtocol(trans)
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 0
x.write(prot)
x = Xtruct()
x.string_thing = "One"
x.byte_thing = 1
x.write(prot)
return trans.getvalue()
def testTransportReadAll(self):
"""Test that readAll on any type of transport throws an EOFError"""
trans = TTransport.TMemoryBuffer(self.make_data())
trans.readAll(1)
try:
trans.readAll(10000)
except EOFError:
return
self.fail("Should have gotten EOFError")
def eofTestHelper(self, pfactory):
trans = TTransport.TMemoryBuffer(self.make_data(pfactory))
prot = pfactory.getProtocol(trans)
x = Xtruct()
x.read(prot)
self.assertEqual(x.string_thing, "Zero")
self.assertEqual(x.byte_thing, 0)
x = Xtruct()
x.read(prot)
self.assertEqual(x.string_thing, "One")
self.assertEqual(x.byte_thing, 1)
try:
x = Xtruct()
x.read(prot)
except EOFError:
return
self.fail("Should have gotten EOFError")
def eofTestHelperStress(self, pfactory):
"""Teest the ability of TBinaryProtocol to deal with the removal of every byte in the file"""
# TODO: we should make sure this covers more of the code paths
data = self.make_data(pfactory)
for i in xrange(0, len(data) + 1):
trans = TTransport.TMemoryBuffer(data[0:i])
prot = pfactory.getProtocol(trans)
try:
x = Xtruct()
x.read(prot)
x.read(prot)
x.read(prot)
except EOFError:
continue
self.fail("Should have gotten an EOFError")
def testBinaryProtocolEof(self):
"""Test that TBinaryProtocol throws an EOFError when it reaches the end of the stream"""
self.eofTestHelper(TBinaryProtocol.TBinaryProtocolFactory())
self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolFactory())
def testBinaryProtocolAcceleratedEof(self):
"""Test that TBinaryProtocolAccelerated throws an EOFError when it reaches the end of the stream"""
self.eofTestHelper(TBinaryProtocol.TBinaryProtocolAcceleratedFactory())
self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolAcceleratedFactory())
def testCompactProtocolEof(self):
"""Test that TCompactProtocol throws an EOFError when it reaches the end of the stream"""
self.eofTestHelper(TCompactProtocol.TCompactProtocolFactory())
self.eofTestHelperStress(TCompactProtocol.TCompactProtocolFactory())
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestEof))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
|
apache-2.0
|
cloudbase/maas
|
src/maas/tests/test_maas.py
|
1
|
3335
|
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test the maas package."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
from importlib import import_module
import new
import os.path
import sys
from textwrap import dedent
from unittest import skipIf
from fixtures import PythonPathEntry
from maas import (
find_settings,
import_local_settings,
import_settings,
)
from maastesting.djangotestcase import DjangoTestCase
from maastesting.factory import factory
class TestSettingsHelpers(DjangoTestCase):
"""Test Django settings helper functions."""
def test_find_settings(self):
# find_settings() returns a dict of settings from a Django-like
# settings file. It excludes settings beginning with underscores.
module = new.module(b"example")
module.SETTING = factory.getRandomString()
module._NOT_A_SETTING = factory.getRandomString()
expected = {"SETTING": module.SETTING}
observed = find_settings(module)
self.assertEqual(expected, observed)
def test_import_settings(self):
# import_settings() copies settings from another module into the
# caller's global scope.
source = new.module(b"source")
source.SETTING = factory.getRandomString()
target = new.module(b"target")
target._source = source
target._import_settings = import_settings
eval("_import_settings(_source)", vars(target))
expected = {"SETTING": source.SETTING}
observed = find_settings(target)
self.assertEqual(expected, observed)
local_settings_module = b"maas_local_settings"
def _test_import_local_settings(self):
# import_local_settings() copies settings from the local settings
# module into the caller's global scope.
target = new.module(b"target")
target._import_local_settings = import_local_settings
eval("_import_local_settings()", vars(target))
source = import_module(self.local_settings_module)
expected = find_settings(source)
observed = find_settings(target)
self.assertEqual(expected, observed)
@skipIf(
local_settings_module in sys.modules,
"%s already imported." % local_settings_module)
def test_import_local_settings_1(self):
# The local settings module has not yet been imported, so fake one.
config = dedent("""
SETTING = %r
_NOT_A_SETTING = %r
""" % (factory.getRandomString(), factory.getRandomString()))
module = self.make_file(
name=b"%s.py" % self.local_settings_module, contents=config)
module_dir, module_file = os.path.split(module)
self.addCleanup(sys.modules.pop, self.local_settings_module, None)
self.useFixture(PythonPathEntry(module_dir))
self._test_import_local_settings()
@skipIf(
local_settings_module not in sys.modules,
"%s not yet imported." % local_settings_module)
def test_import_local_settings_2(self):
# The local settings module has been imported, so test with that.
self._test_import_local_settings()
|
agpl-3.0
|
awslabs/chalice
|
chalice/app.py
|
1
|
57505
|
"""Chalice app and routing code."""
# pylint: disable=too-many-lines,ungrouped-imports
import re
import sys
import os
import logging
import json
import traceback
import decimal
import base64
import copy
from collections import defaultdict
__version__ = '1.15.1'
_PARAMS = re.compile(r'{\w+}')
# Implementation note: This file is intended to be a standalone file
# that gets copied into the lambda deployment package. It has no dependencies
# on other parts of chalice so it can stay small and lightweight, with minimal
# startup overhead. This also means we need to handle py2/py3 compat issues
# directly in this file instead of copying over compat.py
try:
from urllib.parse import unquote_plus
from collections.abc import Mapping
from collections.abc import MutableMapping
unquote_str = unquote_plus
# In python 3 string and bytes are different so we explicitly check
# for both.
_ANY_STRING = (str, bytes)
except ImportError:
from urllib import unquote_plus
from collections import Mapping
from collections import MutableMapping
# This is borrowed from botocore/compat.py
def unquote_str(value, encoding='utf-8'):
# In python2, unquote() gives us a string back that has the urldecoded
# bits, but not the unicode parts. We need to decode this manually.
# unquote has special logic in which if it receives a unicode object it
# will decode it to latin1. This is hard coded. To avoid this, we'll
# encode the string with the passed in encoding before trying to
# unquote it.
byte_string = value.encode(encoding)
return unquote_plus(byte_string).decode(encoding)
# In python 2 there is a base class for the string types that we can check
# for. It was removed in python 3 so it will cause a name error.
_ANY_STRING = (basestring, bytes) # noqa pylint: disable=E0602
def handle_extra_types(obj):
# Lambda will automatically serialize decimals so we need
# to support that as well.
if isinstance(obj, decimal.Decimal):
return float(obj)
# This is added for backwards compatibility.
# It will keep only the last value for every key as it used to.
if isinstance(obj, MultiDict):
return dict(obj)
raise TypeError('Object of type %s is not JSON serializable'
% obj.__class__.__name__)
def error_response(message, error_code, http_status_code, headers=None):
body = {'Code': error_code, 'Message': message}
response = Response(body=body, status_code=http_status_code,
headers=headers)
return response.to_dict()
def _matches_content_type(content_type, valid_content_types):
# If '*/*' is in the Accept header or the valid types,
# then all content_types match. Otherwise see of there are any common types
content_type = content_type.lower()
valid_content_types = [x.lower() for x in valid_content_types]
return '*/*' in content_type or \
'*/*' in valid_content_types or \
_content_type_header_contains(content_type, valid_content_types)
def _content_type_header_contains(content_type_header, valid_content_types):
content_type_header_parts = [
p.strip() for p in
re.split('[,;]', content_type_header)
]
valid_parts = set(valid_content_types).intersection(
content_type_header_parts
)
return len(valid_parts) > 0
class ChaliceError(Exception):
pass
class WebsocketDisconnectedError(ChaliceError):
def __init__(self, connection_id):
self.connection_id = connection_id
class ChaliceViewError(ChaliceError):
STATUS_CODE = 500
def __init__(self, msg=''):
super(ChaliceViewError, self).__init__(
self.__class__.__name__ + ': %s' % msg)
class BadRequestError(ChaliceViewError):
STATUS_CODE = 400
class UnauthorizedError(ChaliceViewError):
STATUS_CODE = 401
class ForbiddenError(ChaliceViewError):
STATUS_CODE = 403
class NotFoundError(ChaliceViewError):
STATUS_CODE = 404
class MethodNotAllowedError(ChaliceViewError):
STATUS_CODE = 405
class RequestTimeoutError(ChaliceViewError):
STATUS_CODE = 408
class ConflictError(ChaliceViewError):
STATUS_CODE = 409
class UnprocessableEntityError(ChaliceViewError):
STATUS_CODE = 422
class TooManyRequestsError(ChaliceViewError):
STATUS_CODE = 429
ALL_ERRORS = [
ChaliceViewError,
BadRequestError,
NotFoundError,
UnauthorizedError,
ForbiddenError,
MethodNotAllowedError,
RequestTimeoutError,
ConflictError,
UnprocessableEntityError,
TooManyRequestsError]
class MultiDict(MutableMapping): # pylint: disable=too-many-ancestors
"""A mapping of key to list of values.
Accessing it in the usual way will return the last value in the list.
Calling getlist will return a list of all the values associated with
the same key.
"""
def __init__(self, mapping):
if mapping is None:
mapping = {}
self._dict = mapping
def __getitem__(self, k):
try:
return self._dict[k][-1]
except IndexError:
raise KeyError(k)
def __setitem__(self, k, v):
self._dict[k] = [v]
def __delitem__(self, k):
del self._dict[k]
def getlist(self, k):
return list(self._dict[k])
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
return 'MultiDict(%s)' % self._dict
def __str__(self):
return repr(self)
class CaseInsensitiveMapping(Mapping):
"""Case insensitive and read-only mapping."""
def __init__(self, mapping):
mapping = mapping or {}
self._dict = {k.lower(): v for k, v in mapping.items()}
def __getitem__(self, key):
return self._dict[key.lower()]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return 'CaseInsensitiveMapping(%s)' % repr(self._dict)
class Authorizer(object):
name = ''
scopes = []
def to_swagger(self):
raise NotImplementedError("to_swagger")
def with_scopes(self, scopes):
raise NotImplementedError("with_scopes")
class IAMAuthorizer(Authorizer):
_AUTH_TYPE = 'aws_iam'
def __init__(self):
self.name = 'sigv4'
self.scopes = []
def to_swagger(self):
return {
'in': 'header',
'type': 'apiKey',
'name': 'Authorization',
'x-amazon-apigateway-authtype': 'awsSigv4',
}
def with_scopes(self, scopes):
raise NotImplementedError("with_scopes")
class CognitoUserPoolAuthorizer(Authorizer):
_AUTH_TYPE = 'cognito_user_pools'
def __init__(self, name, provider_arns, header='Authorization',
scopes=None):
self.name = name
self._header = header
if not isinstance(provider_arns, list):
# This class is used directly by users so we're
# adding some validation to help them troubleshoot
# potential issues.
raise TypeError(
"provider_arns should be a list of ARNs, received: %s"
% provider_arns)
self._provider_arns = provider_arns
self.scopes = scopes or []
def to_swagger(self):
return {
'in': 'header',
'type': 'apiKey',
'name': self._header,
'x-amazon-apigateway-authtype': self._AUTH_TYPE,
'x-amazon-apigateway-authorizer': {
'type': self._AUTH_TYPE,
'providerARNs': self._provider_arns,
}
}
def with_scopes(self, scopes):
authorizer_with_scopes = copy.deepcopy(self)
authorizer_with_scopes.scopes = scopes
return authorizer_with_scopes
class CustomAuthorizer(Authorizer):
_AUTH_TYPE = 'custom'
def __init__(self, name, authorizer_uri, ttl_seconds=300,
header='Authorization', invoke_role_arn=None, scopes=None):
self.name = name
self._header = header
self._authorizer_uri = authorizer_uri
self._ttl_seconds = ttl_seconds
self._invoke_role_arn = invoke_role_arn
self.scopes = scopes or []
def to_swagger(self):
swagger = {
'in': 'header',
'type': 'apiKey',
'name': self._header,
'x-amazon-apigateway-authtype': self._AUTH_TYPE,
'x-amazon-apigateway-authorizer': {
'type': 'token',
'authorizerUri': self._authorizer_uri,
'authorizerResultTtlInSeconds': self._ttl_seconds,
}
}
if self._invoke_role_arn is not None:
swagger['x-amazon-apigateway-authorizer'][
'authorizerCredentials'] = self._invoke_role_arn
return swagger
def with_scopes(self, scopes):
authorizer_with_scopes = copy.deepcopy(self)
authorizer_with_scopes.scopes = scopes
return authorizer_with_scopes
class CORSConfig(object):
"""A cors configuration to attach to a route."""
_REQUIRED_HEADERS = ['Content-Type', 'X-Amz-Date', 'Authorization',
'X-Api-Key', 'X-Amz-Security-Token']
def __init__(self, allow_origin='*', allow_headers=None,
expose_headers=None, max_age=None, allow_credentials=None):
self.allow_origin = allow_origin
if allow_headers is None:
allow_headers = set(self._REQUIRED_HEADERS)
else:
allow_headers = set(allow_headers + self._REQUIRED_HEADERS)
self._allow_headers = allow_headers
if expose_headers is None:
expose_headers = []
self._expose_headers = expose_headers
self._max_age = max_age
self._allow_credentials = allow_credentials
@property
def allow_headers(self):
return ','.join(sorted(self._allow_headers))
def get_access_control_headers(self):
headers = {
'Access-Control-Allow-Origin': self.allow_origin,
'Access-Control-Allow-Headers': self.allow_headers
}
if self._expose_headers:
headers.update({
'Access-Control-Expose-Headers': ','.join(self._expose_headers)
})
if self._max_age is not None:
headers.update({
'Access-Control-Max-Age': str(self._max_age)
})
if self._allow_credentials is True:
headers.update({
'Access-Control-Allow-Credentials': 'true'
})
return headers
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.get_access_control_headers() == \
other.get_access_control_headers()
return False
class Request(object):
"""The current request from API gateway."""
def __init__(self, query_params, headers, uri_params, method, body,
context, stage_vars, is_base64_encoded):
self.query_params = None if query_params is None \
else MultiDict(query_params)
self.headers = CaseInsensitiveMapping(headers)
self.uri_params = uri_params
self.method = method
self._is_base64_encoded = is_base64_encoded
self._body = body
#: The parsed JSON from the body. This value should
#: only be set if the Content-Type header is application/json,
#: which is the default content type value in chalice.
self._json_body = None
self._raw_body = b''
self.context = context
self.stage_vars = stage_vars
def _base64decode(self, encoded):
if not isinstance(encoded, bytes):
encoded = encoded.encode('ascii')
output = base64.b64decode(encoded)
return output
@property
def raw_body(self):
if not self._raw_body and self._body is not None:
if self._is_base64_encoded:
self._raw_body = self._base64decode(self._body)
elif not isinstance(self._body, bytes):
self._raw_body = self._body.encode('utf-8')
else:
self._raw_body = self._body
return self._raw_body
@property
def json_body(self):
if self.headers.get('content-type', '').startswith('application/json'):
if self._json_body is None:
try:
self._json_body = json.loads(self.raw_body)
except ValueError:
raise BadRequestError('Error Parsing JSON')
return self._json_body
def to_dict(self):
# Don't copy internal attributes.
copied = {k: v for k, v in self.__dict__.items()
if not k.startswith('_')}
# We want the output of `to_dict()` to be
# JSON serializable, so we need to remove the CaseInsensitive dict.
copied['headers'] = dict(copied['headers'])
if copied['query_params'] is not None:
copied['query_params'] = dict(copied['query_params'])
return copied
class Response(object):
def __init__(self, body, headers=None, status_code=200):
self.body = body
if headers is None:
headers = {}
self.headers = headers
self.status_code = status_code
def to_dict(self, binary_types=None):
body = self.body
if not isinstance(body, _ANY_STRING):
body = json.dumps(body, separators=(',', ':'),
default=handle_extra_types)
single_headers, multi_headers = self._sort_headers(self.headers)
response = {
'headers': single_headers,
'multiValueHeaders': multi_headers,
'statusCode': self.status_code,
'body': body
}
if binary_types is not None:
self._b64encode_body_if_needed(response, binary_types)
return response
def _sort_headers(self, all_headers):
multi_headers = {}
single_headers = {}
for name, value in all_headers.items():
if isinstance(value, list):
multi_headers[name] = value
else:
single_headers[name] = value
return single_headers, multi_headers
def _b64encode_body_if_needed(self, response_dict, binary_types):
response_headers = CaseInsensitiveMapping(response_dict['headers'])
content_type = response_headers.get('content-type', '')
body = response_dict['body']
if _matches_content_type(content_type, binary_types):
if _matches_content_type(content_type, ['application/json']):
# There's a special case when a user configures
# ``application/json`` as a binary type. The default
# json serialization results in a string type, but for binary
# content types we need a type bytes(). So we need to special
# case this scenario and encode the JSON body to bytes().
body = body if isinstance(body, bytes) \
else body.encode('utf-8')
body = self._base64encode(body)
response_dict['isBase64Encoded'] = True
response_dict['body'] = body
def _base64encode(self, data):
if not isinstance(data, bytes):
raise ValueError('Expected bytes type for body with binary '
'Content-Type. Got %s type body instead.'
% type(data))
data = base64.b64encode(data)
return data.decode('ascii')
class RouteEntry(object):
def __init__(self, view_function, view_name, path, method,
api_key_required=None, content_types=None,
cors=False, authorizer=None):
self.view_function = view_function
self.view_name = view_name
self.uri_pattern = path
self.method = method
self.api_key_required = api_key_required
#: A list of names to extract from path:
#: e.g, '/foo/{bar}/{baz}/qux -> ['bar', 'baz']
self.view_args = self._parse_view_args()
self.content_types = content_types
# cors is passed as either a boolean or a CORSConfig object. If it is a
# boolean it needs to be replaced with a real CORSConfig object to
# pass the typechecker. None in this context will not inject any cors
# headers, otherwise the CORSConfig object will determine which
# headers are injected.
if cors is True:
cors = CORSConfig()
elif cors is False:
cors = None
self.cors = cors
self.authorizer = authorizer
def _parse_view_args(self):
if '{' not in self.uri_pattern:
return []
# The [1:-1] slice is to remove the braces
# e.g {foobar} -> foobar
results = [r[1:-1] for r in _PARAMS.findall(self.uri_pattern)]
return results
def __eq__(self, other):
return self.__dict__ == other.__dict__
class APIGateway(object):
_DEFAULT_BINARY_TYPES = [
'application/octet-stream', 'application/x-tar', 'application/zip',
'audio/basic', 'audio/ogg', 'audio/mp4', 'audio/mpeg', 'audio/wav',
'audio/webm', 'image/png', 'image/jpg', 'image/jpeg', 'image/gif',
'video/ogg', 'video/mpeg', 'video/webm',
]
def __init__(self):
self.binary_types = self.default_binary_types
self.cors = False
@property
def default_binary_types(self):
return list(self._DEFAULT_BINARY_TYPES)
class WebsocketAPI(object):
_WEBSOCKET_ENDPOINT_TEMPLATE = 'https://{domain_name}/{stage}'
def __init__(self):
self.session = None
self._endpoint = None
self._client = None
def configure(self, domain_name, stage):
if self._endpoint is not None:
return
self._endpoint = self._WEBSOCKET_ENDPOINT_TEMPLATE.format(
domain_name=domain_name,
stage=stage,
)
def _get_client(self):
if self.session is None:
raise ValueError(
'Assign app.websocket_api.session to a boto3 session before '
'using the WebsocketAPI'
)
if self._endpoint is None:
raise ValueError(
'WebsocketAPI.configure must be called before using the '
'WebsocketAPI'
)
if self._client is None:
self._client = self.session.client(
'apigatewaymanagementapi',
endpoint_url=self._endpoint,
)
return self._client
def send(self, connection_id, message):
client = self._get_client()
try:
client.post_to_connection(
ConnectionId=connection_id,
Data=message,
)
except client.exceptions.GoneException:
raise WebsocketDisconnectedError(connection_id)
def close(self, connection_id):
client = self._get_client()
try:
client.delete_connection(
ConnectionId=connection_id,
)
except client.exceptions.GoneException:
raise WebsocketDisconnectedError(connection_id)
def info(self, connection_id):
client = self._get_client()
try:
return client.get_connection(
ConnectionId=connection_id,
)
except client.exceptions.GoneException:
raise WebsocketDisconnectedError(connection_id)
class DecoratorAPI(object):
def authorizer(self, ttl_seconds=None, execution_role=None, name=None):
return self._create_registration_function(
handler_type='authorizer',
name=name,
registration_kwargs={
'ttl_seconds': ttl_seconds, 'execution_role': execution_role,
}
)
def on_s3_event(self, bucket, events=None,
prefix=None, suffix=None, name=None):
return self._create_registration_function(
handler_type='on_s3_event',
name=name,
registration_kwargs={
'bucket': bucket, 'events': events,
'prefix': prefix, 'suffix': suffix,
}
)
def on_sns_message(self, topic, name=None):
return self._create_registration_function(
handler_type='on_sns_message',
name=name,
registration_kwargs={'topic': topic}
)
def on_sqs_message(self, queue, batch_size=1, name=None):
return self._create_registration_function(
handler_type='on_sqs_message',
name=name,
registration_kwargs={'queue': queue, 'batch_size': batch_size}
)
def on_cw_event(self, event_pattern, name=None):
return self._create_registration_function(
handler_type='on_cw_event',
name=name,
registration_kwargs={'event_pattern': event_pattern}
)
def schedule(self, expression, name=None, description=''):
return self._create_registration_function(
handler_type='schedule',
name=name,
registration_kwargs={'expression': expression,
'description': description},
)
def route(self, path, **kwargs):
return self._create_registration_function(
handler_type='route',
name=kwargs.pop('name', None),
# This looks a little weird taking kwargs as a key,
# but we want to preserve keep the **kwargs signature
# in the route decorator.
registration_kwargs={'path': path, 'kwargs': kwargs},
)
def lambda_function(self, name=None):
return self._create_registration_function(
handler_type='lambda_function', name=name)
def on_ws_connect(self, name=None):
return self._create_registration_function(
handler_type='on_ws_connect',
name=name,
registration_kwargs={'route_key': '$connect'},
)
def on_ws_disconnect(self, name=None):
return self._create_registration_function(
handler_type='on_ws_disconnect',
name=name,
registration_kwargs={'route_key': '$disconnect'},
)
def on_ws_message(self, name=None):
return self._create_registration_function(
handler_type='on_ws_message',
name=name,
registration_kwargs={'route_key': '$default'},
)
def _create_registration_function(self, handler_type, name=None,
registration_kwargs=None):
def _register_handler(user_handler):
handler_name = name
if handler_name is None:
handler_name = user_handler.__name__
if registration_kwargs is not None:
kwargs = registration_kwargs
else:
kwargs = {}
wrapped = self._wrap_handler(handler_type, handler_name,
user_handler)
self._register_handler(handler_type, handler_name,
user_handler, wrapped, kwargs)
return wrapped
return _register_handler
def _wrap_handler(self, handler_type, handler_name, user_handler):
event_classes = {
'on_s3_event': S3Event,
'on_sns_message': SNSEvent,
'on_sqs_message': SQSEvent,
'on_cw_event': CloudWatchEvent,
'schedule': CloudWatchEvent,
}
if handler_type in event_classes:
return EventSourceHandler(
user_handler, event_classes[handler_type])
websocket_event_classes = [
'on_ws_connect',
'on_ws_message',
'on_ws_disconnect',
]
if handler_type in websocket_event_classes:
return WebsocketEventSourceHandler(
user_handler, WebsocketEvent,
self.websocket_api # pylint: disable=no-member
)
if handler_type == 'authorizer':
# Authorizer is special cased and doesn't quite fit the
# EventSourceHandler pattern.
return ChaliceAuthorizer(handler_name, user_handler)
return user_handler
def _register_handler(self, handler_type, name,
user_handler, wrapped_handler, kwargs, options=None):
raise NotImplementedError("_register_handler")
class _HandlerRegistration(object):
def __init__(self):
self.routes = defaultdict(dict)
self.websocket_handlers = {}
self.builtin_auth_handlers = []
self.event_sources = []
self.pure_lambda_functions = []
self.api = APIGateway()
def _do_register_handler(self, handler_type, name, user_handler,
wrapped_handler, kwargs, options=None):
url_prefix = None
name_prefix = None
module_name = 'app'
if options is not None:
name_prefix = options.get('name_prefix')
if name_prefix is not None:
name = name_prefix + name
url_prefix = options.get('url_prefix')
if url_prefix is not None:
# Move url_prefix into kwargs so only the
# route() handler gets a url_prefix kwarg.
kwargs['url_prefix'] = url_prefix
# module_name is always provided if options is not None.
module_name = options['module_name']
handler_string = '%s.%s' % (module_name, user_handler.__name__)
getattr(self, '_register_%s' % handler_type)(
name=name,
user_handler=user_handler,
handler_string=handler_string,
wrapped_handler=wrapped_handler,
kwargs=kwargs,
)
def _attach_websocket_handler(self, handler):
route_key = handler.route_key_handled
decorator_name = {
'$default': 'on_ws_message',
'$connect': 'on_ws_connect',
'$disconnect': 'on_ws_disconnect',
}.get(route_key)
if route_key in self.websocket_handlers:
raise ValueError(
"Duplicate websocket handler: '%s'. There can only be one "
"handler for each websocket decorator." % decorator_name
)
self.websocket_handlers[route_key] = handler
def _register_on_ws_connect(self, name, user_handler, handler_string,
kwargs, **unused):
wrapper = WebsocketConnectConfig(
name=name,
handler_string=handler_string,
user_handler=user_handler,
)
self._attach_websocket_handler(wrapper)
def _register_on_ws_message(self, name, user_handler, handler_string,
kwargs, **unused):
route_key = kwargs['route_key']
wrapper = WebsocketMessageConfig(
name=name,
route_key_handled=route_key,
handler_string=handler_string,
user_handler=user_handler,
)
self._attach_websocket_handler(wrapper)
self.websocket_handlers[route_key] = wrapper
def _register_on_ws_disconnect(self, name, user_handler,
handler_string, kwargs, **unused):
wrapper = WebsocketDisconnectConfig(
name=name,
handler_string=handler_string,
user_handler=user_handler,
)
self._attach_websocket_handler(wrapper)
def _register_lambda_function(self, name, user_handler,
handler_string, **unused):
wrapper = LambdaFunction(
user_handler, name=name,
handler_string=handler_string,
)
self.pure_lambda_functions.append(wrapper)
def _register_on_s3_event(self, name, handler_string, kwargs, **unused):
events = kwargs['events']
if events is None:
events = ['s3:ObjectCreated:*']
s3_event = S3EventConfig(
name=name,
bucket=kwargs['bucket'],
events=events,
prefix=kwargs['prefix'],
suffix=kwargs['suffix'],
handler_string=handler_string,
)
self.event_sources.append(s3_event)
def _register_on_sns_message(self, name, handler_string, kwargs, **unused):
sns_config = SNSEventConfig(
name=name,
handler_string=handler_string,
topic=kwargs['topic'],
)
self.event_sources.append(sns_config)
def _register_on_sqs_message(self, name, handler_string, kwargs, **unused):
sqs_config = SQSEventConfig(
name=name,
handler_string=handler_string,
queue=kwargs['queue'],
batch_size=kwargs['batch_size'],
)
self.event_sources.append(sqs_config)
def _register_on_cw_event(self, name, handler_string, kwargs, **unused):
event_source = CloudWatchEventConfig(
name=name,
event_pattern=kwargs['event_pattern'],
handler_string=handler_string
)
self.event_sources.append(event_source)
def _register_schedule(self, name, handler_string, kwargs, **unused):
event_source = ScheduledEventConfig(
name=name,
schedule_expression=kwargs['expression'],
description=kwargs["description"],
handler_string=handler_string,
)
self.event_sources.append(event_source)
def _register_authorizer(self, name, handler_string, wrapped_handler,
kwargs, **unused):
actual_kwargs = kwargs.copy()
ttl_seconds = actual_kwargs.pop('ttl_seconds', None)
execution_role = actual_kwargs.pop('execution_role', None)
if actual_kwargs:
raise TypeError(
'TypeError: authorizer() got unexpected keyword '
'arguments: %s' % ', '.join(list(actual_kwargs)))
auth_config = BuiltinAuthConfig(
name=name,
handler_string=handler_string,
ttl_seconds=ttl_seconds,
execution_role=execution_role,
)
wrapped_handler.config = auth_config
self.builtin_auth_handlers.append(auth_config)
def _register_route(self, name, user_handler, kwargs, **unused):
actual_kwargs = kwargs['kwargs']
path = kwargs['path']
url_prefix = kwargs.pop('url_prefix', None)
if url_prefix is not None:
path = '/'.join([url_prefix.rstrip('/'),
path.strip('/')]).rstrip('/')
methods = actual_kwargs.pop('methods', ['GET'])
route_kwargs = {
'authorizer': actual_kwargs.pop('authorizer', None),
'api_key_required': actual_kwargs.pop('api_key_required', None),
'content_types': actual_kwargs.pop('content_types',
['application/json']),
'cors': actual_kwargs.pop('cors', self.api.cors),
}
if route_kwargs['cors'] is None:
route_kwargs['cors'] = self.api.cors
if not isinstance(route_kwargs['content_types'], list):
raise ValueError(
'In view function "%s", the content_types '
'value must be a list, not %s: %s' % (
name, type(route_kwargs['content_types']),
route_kwargs['content_types']))
if actual_kwargs:
raise TypeError('TypeError: route() got unexpected keyword '
'arguments: %s' % ', '.join(list(actual_kwargs)))
for method in methods:
if method in self.routes[path]:
raise ValueError(
"Duplicate method: '%s' detected for route: '%s'\n"
"between view functions: \"%s\" and \"%s\". A specific "
"method may only be specified once for "
"a particular path." % (
method, path, self.routes[path][method].view_name,
name)
)
entry = RouteEntry(user_handler, name, path, method,
**route_kwargs)
self.routes[path][method] = entry
class Chalice(_HandlerRegistration, DecoratorAPI):
FORMAT_STRING = '%(name)s - %(levelname)s - %(message)s'
def __init__(self, app_name, debug=False, configure_logs=True, env=None):
super(Chalice, self).__init__()
self.app_name = app_name
self.websocket_api = WebsocketAPI()
self.current_request = None
self.lambda_context = None
self._debug = debug
self.configure_logs = configure_logs
self.log = logging.getLogger(self.app_name)
if env is None:
env = os.environ
self._initialize(env)
self.experimental_feature_flags = set()
# This is marked as internal but is intended to be used by
# any code within Chalice.
self._features_used = set()
def _initialize(self, env):
if self.configure_logs:
self._configure_logging()
env['AWS_EXECUTION_ENV'] = '%s aws-chalice/%s' % (
env.get('AWS_EXECUTION_ENV', 'AWS_Lambda'),
__version__,
)
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, value):
self._debug = value
self._configure_log_level()
def _configure_logging(self):
if self._already_configured(self.log):
return
handler = logging.StreamHandler(sys.stdout)
# Timestamp is handled by lambda itself so the
# default FORMAT_STRING doesn't need to include it.
formatter = logging.Formatter(self.FORMAT_STRING)
handler.setFormatter(formatter)
self.log.propagate = False
self._configure_log_level()
self.log.addHandler(handler)
def _already_configured(self, log):
if not log.handlers:
return False
for handler in log.handlers:
if isinstance(handler, logging.StreamHandler):
if handler.stream == sys.stdout:
return True
return False
def _configure_log_level(self):
if self._debug:
level = logging.DEBUG
else:
level = logging.ERROR
self.log.setLevel(level)
def register_blueprint(self, blueprint, name_prefix=None, url_prefix=None):
blueprint.register(self, options={'name_prefix': name_prefix,
'url_prefix': url_prefix})
def _register_handler(self, handler_type, name, user_handler,
wrapped_handler, kwargs, options=None):
self._do_register_handler(handler_type, name, user_handler,
wrapped_handler, kwargs, options)
def _register_on_ws_connect(self, name, user_handler, handler_string,
kwargs, **unused):
self._features_used.add('WEBSOCKETS')
super(Chalice, self)._register_on_ws_connect(
name, user_handler, handler_string, kwargs, **unused)
def _register_on_ws_message(self, name, user_handler, handler_string,
kwargs, **unused):
self._features_used.add('WEBSOCKETS')
super(Chalice, self)._register_on_ws_message(
name, user_handler, handler_string, kwargs, **unused)
def _register_on_ws_disconnect(self, name, user_handler,
handler_string, kwargs, **unused):
self._features_used.add('WEBSOCKETS')
super(Chalice, self)._register_on_ws_disconnect(
name, user_handler, handler_string, kwargs, **unused)
def __call__(self, event, context):
# This is what's invoked via lambda.
# Sometimes the event can be something that's not
# what we specified in our request_template mapping.
# When that happens, we want to give a better error message here.
resource_path = event.get('requestContext', {}).get('resourcePath')
if resource_path is None:
return error_response(error_code='InternalServerError',
message='Unknown request.',
http_status_code=500)
http_method = event['requestContext']['httpMethod']
if resource_path not in self.routes:
raise ChaliceError("No view function for: %s" % resource_path)
if http_method not in self.routes[resource_path]:
return error_response(
error_code='MethodNotAllowedError',
message='Unsupported method: %s' % http_method,
http_status_code=405)
route_entry = self.routes[resource_path][http_method]
view_function = route_entry.view_function
function_args = {name: event['pathParameters'][name]
for name in route_entry.view_args}
self.lambda_context = context
self.current_request = Request(
event['multiValueQueryStringParameters'],
event['headers'],
event['pathParameters'],
event['requestContext']['httpMethod'],
event['body'],
event['requestContext'],
event['stageVariables'],
event.get('isBase64Encoded', False)
)
# We're getting the CORS headers before validation to be able to
# output desired headers with
cors_headers = None
if self._cors_enabled_for_route(route_entry):
cors_headers = self._get_cors_headers(route_entry.cors)
# We're doing the header validation after creating the request
# so can leverage the case insensitive dict that the Request class
# uses for headers.
if route_entry.content_types:
content_type = self.current_request.headers.get(
'content-type', 'application/json')
if not _matches_content_type(content_type,
route_entry.content_types):
return error_response(
error_code='UnsupportedMediaType',
message='Unsupported media type: %s' % content_type,
http_status_code=415,
headers=cors_headers
)
response = self._get_view_function_response(view_function,
function_args)
if cors_headers is not None:
self._add_cors_headers(response, cors_headers)
response_headers = CaseInsensitiveMapping(response.headers)
if not self._validate_binary_response(
self.current_request.headers, response_headers):
content_type = response_headers.get('content-type', '')
return error_response(
error_code='BadRequest',
message=('Request did not specify an Accept header with %s, '
'The response has a Content-Type of %s. If a '
'response has a binary Content-Type then the request '
'must specify an Accept header that matches.'
% (content_type, content_type)),
http_status_code=400,
headers=cors_headers
)
response = response.to_dict(self.api.binary_types)
return response
def _validate_binary_response(self, request_headers, response_headers):
# Validates that a response is valid given the request. If the response
# content-type specifies a binary type, there must be an accept header
# that is a binary type as well.
request_accept_header = request_headers.get('accept')
response_content_type = response_headers.get(
'content-type', 'application/json')
response_is_binary = _matches_content_type(response_content_type,
self.api.binary_types)
expects_binary_response = False
if request_accept_header is not None:
expects_binary_response = _matches_content_type(
request_accept_header, self.api.binary_types)
if response_is_binary and not expects_binary_response:
return False
return True
def _get_view_function_response(self, view_function, function_args):
try:
response = view_function(**function_args)
if not isinstance(response, Response):
response = Response(body=response)
self._validate_response(response)
except ChaliceViewError as e:
# Any chalice view error should propagate. These
# get mapped to various HTTP status codes in API Gateway.
response = Response(body={'Code': e.__class__.__name__,
'Message': str(e)},
status_code=e.STATUS_CODE)
except Exception:
headers = {}
self.log.error("Caught exception for %s", view_function,
exc_info=True)
if self.debug:
# If the user has turned on debug mode,
# we'll let the original exception propagate so
# they get more information about what went wrong.
stack_trace = ''.join(traceback.format_exc())
body = stack_trace
headers['Content-Type'] = 'text/plain'
else:
body = {'Code': 'InternalServerError',
'Message': 'An internal server error occurred.'}
response = Response(body=body, headers=headers, status_code=500)
return response
def _validate_response(self, response):
for header, value in response.headers.items():
if '\n' in value:
raise ChaliceError("Bad value for header '%s': %r" %
(header, value))
def _cors_enabled_for_route(self, route_entry):
return route_entry.cors is not None
def _get_cors_headers(self, cors):
return cors.get_access_control_headers()
def _add_cors_headers(self, response, cors_headers):
for name, value in cors_headers.items():
if name not in response.headers:
response.headers[name] = value
class BuiltinAuthConfig(object):
def __init__(self, name, handler_string, ttl_seconds=None,
execution_role=None):
# We'd also support all the misc config options you can set.
self.name = name
self.handler_string = handler_string
self.ttl_seconds = ttl_seconds
self.execution_role = execution_role
# ChaliceAuthorizer is unique in that the runtime component (the thing
# that wraps the decorated function) also needs a reference to the config
# object (the object the describes how to create the resource). In
# most event sources these are separate and don't need to know about
# each other, but ChaliceAuthorizer does. This is because the way
# you associate a builtin authorizer with a view function is by passing
# a direct reference:
#
# @app.authorizer(...)
# def my_auth_function(...): pass
#
# @app.route('/', auth=my_auth_function)
#
# The 'route' part needs to know about the auth function for two reasons:
#
# 1. We use ``view.authorizer`` to figure out how to deploy the app
# 2. We need a reference to the runtime handler for the auth in order
# to support local mode testing.
# I *think* we can refactor things to handle both of those issues but
# we would need more research to know for sure. For now, this is a
# special cased runtime class that knows about its config.
class ChaliceAuthorizer(object):
def __init__(self, name, func, scopes=None):
self.name = name
self.func = func
self.scopes = scopes or []
# This is filled in during the @app.authorizer()
# processing.
self.config = None
def __call__(self, event, context):
auth_request = self._transform_event(event)
result = self.func(auth_request)
if isinstance(result, AuthResponse):
return result.to_dict(auth_request)
return result
def _transform_event(self, event):
return AuthRequest(event['type'],
event['authorizationToken'],
event['methodArn'])
def with_scopes(self, scopes):
authorizer_with_scopes = copy.deepcopy(self)
authorizer_with_scopes.scopes = scopes
return authorizer_with_scopes
class AuthRequest(object):
def __init__(self, auth_type, token, method_arn):
self.auth_type = auth_type
self.token = token
self.method_arn = method_arn
class AuthResponse(object):
ALL_HTTP_METHODS = ['DELETE', 'HEAD', 'OPTIONS',
'PATCH', 'POST', 'PUT', 'GET']
def __init__(self, routes, principal_id, context=None):
self.routes = routes
self.principal_id = principal_id
# The request is used to generate full qualified ARNs
# that we need for the resource portion of the returned
# policy.
if context is None:
context = {}
self.context = context
def to_dict(self, request):
return {
'context': self.context,
'principalId': self.principal_id,
'policyDocument': self._generate_policy(request),
}
def _generate_policy(self, request):
allowed_resources = self._generate_allowed_resources(request)
return {
'Version': '2012-10-17',
'Statement': [
{
'Action': 'execute-api:Invoke',
'Effect': 'Allow',
'Resource': allowed_resources,
}
]
}
def _generate_allowed_resources(self, request):
allowed_resources = []
for route in self.routes:
if isinstance(route, AuthRoute):
methods = route.methods
path = route.path
elif route == '*':
# A string route of '*' means that all paths and
# all HTTP methods are now allowed.
methods = ['*']
path = '*'
else:
# If 'route' is just a string, then they've
# opted not to use the AuthRoute(), so we'll
# generate a policy that allows all HTTP methods.
methods = ['*']
path = route
for method in methods:
allowed_resources.append(
self._generate_arn(path, request, method))
return allowed_resources
def _generate_arn(self, route, request, method='*'):
incoming_arn = request.method_arn
parts = incoming_arn.rsplit(':', 1)
# "arn:aws:execute-api:us-west-2:123:rest-api-id/dev/GET/needs/auth"
# Then we pull out the rest-api-id and stage, such that:
# base = ['rest-api-id', 'stage']
base = parts[-1].split('/')[:2]
# Now we add in the path components and rejoin everything
# back together to make a full arn.
# We're also assuming all HTTP methods (via '*') for now.
# To support per HTTP method routes the API will need to be updated.
# We also need to strip off the leading ``/`` so it can be
# '/'.join(...)'d properly.
base.extend([method, route[1:]])
last_arn_segment = '/'.join(base)
if route in ['/', '*']:
# We have to special case the '/' case. For whatever
# reason, API gateway adds an extra '/' to the method_arn
# of the auth request, so we need to do the same thing.
# We also have to handle the '*' case which is for wildcards
last_arn_segment += route
final_arn = '%s:%s' % (parts[0], last_arn_segment)
return final_arn
class AuthRoute(object):
def __init__(self, path, methods):
self.path = path
self.methods = methods
class LambdaFunction(object):
def __init__(self, func, name, handler_string):
self.func = func
self.name = name
self.handler_string = handler_string
def __call__(self, event, context):
return self.func(event, context)
class BaseEventSourceConfig(object):
def __init__(self, name, handler_string):
self.name = name
self.handler_string = handler_string
class ScheduledEventConfig(BaseEventSourceConfig):
def __init__(self, name, handler_string, schedule_expression, description):
super(ScheduledEventConfig, self).__init__(name, handler_string)
self.schedule_expression = schedule_expression
self.description = description
class CloudWatchEventConfig(BaseEventSourceConfig):
def __init__(self, name, handler_string, event_pattern):
super(CloudWatchEventConfig, self).__init__(name, handler_string)
self.event_pattern = event_pattern
class ScheduleExpression(object):
def to_string(self):
raise NotImplementedError("to_string")
class Rate(ScheduleExpression):
MINUTES = 'MINUTES'
HOURS = 'HOURS'
DAYS = 'DAYS'
def __init__(self, value, unit):
self.value = value
self.unit = unit
def to_string(self):
unit = self.unit.lower()
if self.value == 1:
# Remove the 's' from the end if it's singular.
# This is required by the cloudwatch events API.
unit = unit[:-1]
return 'rate(%s %s)' % (self.value, unit)
class Cron(ScheduleExpression):
def __init__(self, minutes, hours, day_of_month, month, day_of_week, year):
self.minutes = minutes
self.hours = hours
self.day_of_month = day_of_month
self.month = month
self.day_of_week = day_of_week
self.year = year
def to_string(self):
return 'cron(%s %s %s %s %s %s)' % (
self.minutes,
self.hours,
self.day_of_month,
self.month,
self.day_of_week,
self.year,
)
class S3EventConfig(BaseEventSourceConfig):
def __init__(self, name, bucket, events, prefix, suffix, handler_string):
super(S3EventConfig, self).__init__(name, handler_string)
self.bucket = bucket
self.events = events
self.prefix = prefix
self.suffix = suffix
class SNSEventConfig(BaseEventSourceConfig):
def __init__(self, name, handler_string, topic):
super(SNSEventConfig, self).__init__(name, handler_string)
self.topic = topic
class SQSEventConfig(BaseEventSourceConfig):
def __init__(self, name, handler_string, queue, batch_size):
super(SQSEventConfig, self).__init__(name, handler_string)
self.queue = queue
self.batch_size = batch_size
class WebsocketConnectConfig(BaseEventSourceConfig):
CONNECT_ROUTE = '$connect'
def __init__(self, name, handler_string, user_handler):
super(WebsocketConnectConfig, self).__init__(name, handler_string)
self.route_key_handled = self.CONNECT_ROUTE
self.handler_function = user_handler
class WebsocketMessageConfig(BaseEventSourceConfig):
def __init__(self, name, route_key_handled, handler_string, user_handler):
super(WebsocketMessageConfig, self).__init__(name, handler_string)
self.route_key_handled = route_key_handled
self.handler_function = user_handler
class WebsocketDisconnectConfig(BaseEventSourceConfig):
DISCONNECT_ROUTE = '$disconnect'
def __init__(self, name, handler_string, user_handler):
super(WebsocketDisconnectConfig, self).__init__(name, handler_string)
self.route_key_handled = self.DISCONNECT_ROUTE
self.handler_function = user_handler
class EventSourceHandler(object):
def __init__(self, func, event_class):
self.func = func
self.event_class = event_class
def __call__(self, event, context):
event_obj = self.event_class(event, context)
return self.func(event_obj)
class WebsocketEventSourceHandler(object):
def __init__(self, func, event_class, websocket_api):
self.func = func
self.event_class = event_class
self.websocket_api = websocket_api
def __call__(self, event, context):
event_obj = self.event_class(event, context)
self.websocket_api.configure(
event_obj.domain_name,
event_obj.stage,
)
self.func(event_obj)
return {'statusCode': 200}
# These classes contain all the event types that are passed
# in as arguments in the lambda event handlers. These are
# part of Chalice's public API and must be backwards compatible.
class BaseLambdaEvent(object):
def __init__(self, event_dict, context):
self._event_dict = event_dict
self.context = context
self._extract_attributes(event_dict)
def _extract_attributes(self, event_dict):
raise NotImplementedError("_extract_attributes")
def to_dict(self):
return self._event_dict
class CloudWatchEvent(BaseLambdaEvent):
def _extract_attributes(self, event_dict):
self.version = event_dict['version']
self.account = event_dict['account']
self.region = event_dict['region']
self.detail = event_dict['detail']
self.detail_type = event_dict['detail-type']
self.source = event_dict['source']
self.time = event_dict['time']
self.event_id = event_dict['id']
self.resources = event_dict['resources']
class WebsocketEvent(BaseLambdaEvent):
def __init__(self, event_dict, context):
super(WebsocketEvent, self).__init__(event_dict, context)
self._json_body = None
def _extract_attributes(self, event_dict):
request_context = event_dict['requestContext']
self.domain_name = request_context['domainName']
self.stage = request_context['stage']
self.connection_id = request_context['connectionId']
self.body = event_dict.get('body')
@property
def json_body(self):
if self._json_body is None:
try:
self._json_body = json.loads(self.body)
except ValueError:
raise BadRequestError('Error Parsing JSON')
return self._json_body
class SNSEvent(BaseLambdaEvent):
def _extract_attributes(self, event_dict):
first_record = event_dict['Records'][0]
self.message = first_record['Sns']['Message']
self.subject = first_record['Sns']['Subject']
class S3Event(BaseLambdaEvent):
def _extract_attributes(self, event_dict):
s3 = event_dict['Records'][0]['s3']
self.bucket = s3['bucket']['name']
self.key = unquote_str(s3['object']['key'])
class SQSEvent(BaseLambdaEvent):
def _extract_attributes(self, event_dict):
# We don't extract anything off the top level
# event.
pass
def __iter__(self):
for record in self._event_dict['Records']:
yield SQSRecord(record, self.context)
class SQSRecord(BaseLambdaEvent):
def _extract_attributes(self, event_dict):
self.body = event_dict['body']
self.receipt_handle = event_dict['receiptHandle']
class Blueprint(DecoratorAPI):
def __init__(self, import_name):
self._import_name = import_name
self._deferred_registrations = []
self._current_app = None
self._lambda_context = None
@property
def current_request(self):
if self._current_app is None:
raise RuntimeError(
"Can only access Blueprint.current_request if it's registered "
"to an app."
)
return self._current_app.current_request
@property
def lambda_context(self):
if self._current_app is None:
raise RuntimeError(
"Can only access Blueprint.lambda_context if it's registered "
"to an app."
)
return self._current_app.lambda_context
def register(self, app, options):
self._current_app = app
all_options = options.copy()
all_options['module_name'] = self._import_name
for function in self._deferred_registrations:
function(app, all_options)
def _register_handler(self, handler_type, name, user_handler,
wrapped_handler, kwargs, options=None):
# If we go through the public API (app.route, app.schedule, etc) then
# we have to duplicate either the methods or the params in this
# class. We're using _register_handler as a tradeoff for cutting
# down on the duplication.
self._deferred_registrations.append(
# pylint: disable=protected-access
lambda app, options: app._register_handler(
handler_type, name, user_handler, wrapped_handler,
kwargs, options
)
)
|
apache-2.0
|
sameerparekh/pants
|
tests/python/pants_test/base/test_source_root.py
|
1
|
11740
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from twitter.common.collections import OrderedSet
from pants.base.address import Address, parse_spec
from pants.base.exceptions import TargetDefinitionException
from pants.base.source_root import SourceRoot, SourceRootTree
from pants.build_graph.target import Target
class TestTarget(Target):
def __init__(self, spec):
spec_path, target_name = parse_spec(spec)
super(TestTarget, self).__init__(target_name, Address.parse(spec), None)
class NotTestTarget(Target):
def __init__(self, spec):
spec_path, target_name = parse_spec(spec)
super(NotTestTarget, self).__init__(target_name, Address.parse(spec), None)
class AnotherTarget(Target):
def __init__(self, spec):
spec_path, target_name = parse_spec(spec)
super(AnotherTarget, self).__init__(target_name, Address.parse(spec), None)
class SourceRootTest(unittest.TestCase):
"""Tests for SourceRoot. SourceRoot is a singleton so we must make sure this
test cleans up after itself.
"""
def tearDown(self):
SourceRoot.reset()
def _assert_source_root_empty(self):
self.assertEqual({}, SourceRoot.all_roots())
with self.assertRaises(KeyError):
self.assertEqual(set(), SourceRoot.types("tests"))
with self.assertRaises(KeyError):
self.assertEqual(set(), SourceRoot.roots(TestTarget))
def test_register(self):
self._assert_source_root_empty()
SourceRoot.register("tests", TestTarget)
self.assertEquals({"tests": OrderedSet([TestTarget])}, SourceRoot.all_roots())
self.assertEquals(OrderedSet([TestTarget]), SourceRoot.types("tests"))
self.assertEquals(OrderedSet(["tests"]), SourceRoot.roots(TestTarget))
def check_buildroot(self, buildroot_path):
self._assert_source_root_empty()
SourceRoot.register(buildroot_path, TestTarget)
self.assertEquals({".": OrderedSet([TestTarget])}, SourceRoot.all_roots())
self.assertEquals(OrderedSet([TestTarget]), SourceRoot.types("."))
self.assertEquals(OrderedSet(["."]), SourceRoot.roots(TestTarget))
target = TestTarget("//mock/foo/bar:baz")
self.assertEqual("", SourceRoot.find(target))
def test_register_buildroot_dot(self):
self.check_buildroot(".")
def test_register_buildroot_empty(self):
self.check_buildroot("")
def test_register_none(self):
self._assert_source_root_empty()
SourceRoot.register("tests", )
self.assertEquals({"tests": OrderedSet()}, SourceRoot.all_roots())
self.assertEquals(OrderedSet(), SourceRoot.types("tests"))
self.assertEquals("tests", SourceRoot.find(TestTarget("//tests/foo/bar:baz")))
self.assertEquals("tests", SourceRoot.find_by_path("tests/foo/bar"))
def test_reset(self):
self._assert_source_root_empty()
SourceRoot.register("tests", TestTarget)
self.assertEquals({"tests": OrderedSet([TestTarget])}, SourceRoot.all_roots())
SourceRoot.reset()
self._assert_source_root_empty()
def test_here(self):
target = TestTarget("//mock/foo/bar:baz")
self.assertEqual("mock/foo/bar", SourceRoot.find(target))
SourceRoot("mock/foo").here()
self.assertEqual("mock/foo", SourceRoot.find(target))
def check_here_buildroot(self, buildroot_path):
target = TestTarget("//mock/foo/bar:baz")
self.assertEqual("mock/foo/bar", SourceRoot.find(target))
SourceRoot(buildroot_path).here()
self.assertEqual("", SourceRoot.find(target))
def test_here_buildroot_dot(self):
self.check_buildroot(".")
def test_here_buildroot_empty(self):
self.check_buildroot("")
def test_find(self):
# When no source_root is registered, it should just return the path from the address
self.assertEqual("tests/foo/bar", SourceRoot.find(TestTarget("//tests/foo/bar:baz")))
SourceRoot.register("tests/foo", TestTarget)
# After the source root is registered, you should get the source root
self.assertEquals("tests/foo", SourceRoot.find(TestTarget("//tests/foo/bar:baz")))
with self.assertRaises(TargetDefinitionException):
SourceRoot.find(NotTestTarget("//tests/foo/foobar:qux"))
def test_find_by_path(self):
# No source_root is registered yet
query = "tests/foo/bar:baz"
self.assertIsNone(SourceRoot.find_by_path(query),
msg="Query {query} Failed for tree: {dump}"
.format(query=query, dump=SourceRoot._dump()))
SourceRoot.register("tests/foo", TestTarget)
self.assertEquals("tests/foo", SourceRoot.find_by_path(query),
msg="Query {query} Failed for tree: {dump}"
.format(query=query, dump=SourceRoot._dump()))
self.assertIsNone(SourceRoot.find_by_path("tests/bar/foobar:qux"),
msg="Failed for tree: {dump}"
.format(dump=SourceRoot._dump()))
def test_source_root_tree_node(self):
root = SourceRootTree.Node("ROOT")
self.assertIsNone(root.get("child1"))
self.assertIsNone(root.get("child2"))
child = root.get_or_add("child1")
self.assertIsNotNone(child)
self.assertEquals(child, root.get("child1"))
self.assertIsNone(root.get("child2"))
grandchild = child.get_or_add("grandchild")
self.assertIsNone(root.get("grandchild"))
self.assertEquals(grandchild, child.get("grandchild"))
# Retrieve the same object on re-insertion
self.assertEquals(grandchild, child.get_or_add("grandchild"))
def test_source_root_tree(self):
tree = SourceRootTree()
self.assertEquals((None, None), tree.get_root_and_types(""))
self.assertEquals((None, None), tree.get_root_and_types("tests/language"))
self.assertEquals((None, None), tree.get_root_and_types("tests/language/foo"))
self.assertEquals((None, None), tree.get_root_and_types("src/language"))
self.assertEquals((None, None), tree.get_root_and_types("src"))
tree.add_root("tests/language", set([NotTestTarget, TestTarget]))
self.assertEquals(("tests/language", OrderedSet([NotTestTarget, TestTarget])),
tree.get_root_and_types("tests/language"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
root, types = tree.get_root_and_types("tests/language/foo")
self.assertEquals("tests/language", root,
msg="Failed for tree: {dump}".format(dump=tree._dump()))
self.assertEquals(set(types),
set([NotTestTarget, TestTarget]),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
self.assertEquals((None, None), tree.get_root_and_types("src"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
self.assertEquals((None, None), tree.get_root_and_types("src/bar"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
self.assertEquals((None, None), tree.get_root_and_types("s"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
tree.add_root("src/language", set([NotTestTarget]))
self.assertEquals(("tests/language", OrderedSet([NotTestTarget, TestTarget])),
tree.get_root_and_types("tests/language"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
self.assertEquals(("tests/language", OrderedSet([NotTestTarget, TestTarget])),
tree.get_root_and_types("tests/language/foo"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
self.assertEquals(("src/language", OrderedSet([NotTestTarget])),
tree.get_root_and_types("src/language"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
self.assertEquals(("src/language", OrderedSet([NotTestTarget])),
tree.get_root_and_types("src/language/bar"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
self.assertEquals((None, None), tree.get_root_and_types("src"),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
with self.assertRaises(SourceRootTree.DuplicateSourceRootError):
tree.add_root("tests/language", set([NotTestTarget]))
with self.assertRaises(SourceRootTree.NestedSourceRootError):
tree.add_root("tests", set([NotTestTarget]))
def test_mutable(self):
tree = SourceRootTree()
tree.add_root("mutable/foo", set([NotTestTarget, TestTarget]), mutable=True)
tree.add_root("immutable/foo", set([NotTestTarget, TestTarget]), mutable=False)
with self.assertRaises(SourceRootTree.DuplicateSourceRootError):
# Can't add an immutable root to a mutable root
tree.add_root("mutable/foo", set([AnotherTarget]))
with self.assertRaises(SourceRootTree.DuplicateSourceRootError):
# Can't add an mutable root to an immutable root
tree.add_root("immutable/foo", set([AnotherTarget]), mutable=True)
# But can add a mutable root to a mutable root
tree.add_root("mutable/foo", set([AnotherTarget]), mutable=True)
self.assertEquals(set([AnotherTarget, NotTestTarget, TestTarget]),
set(tree.get_root_and_types("mutable/foo")[1]),
msg="Failed for tree: {dump}".format(dump=tree._dump()))
def _add_siblings1(self, tree, common_root):
tree.add_root(os.path.join(common_root, 'src/java'), [NotTestTarget])
tree.add_root(os.path.join(common_root, 'src/resources'), [NotTestTarget])
tree.add_root(os.path.join(common_root, 'tests/java'), [NotTestTarget, TestTarget])
tree.add_root(os.path.join(common_root, 'tests/resources'), [NotTestTarget])
def test_get_root_siblings(self):
tree = SourceRootTree()
self._add_siblings1(tree, "")
self.assertEquals([], tree.get_root_siblings("foo/bar/baz"))
self.assertEquals([], tree.get_root_siblings("src"))
self.assertEquals(["src/java", "src/resources"],
tree.get_root_siblings("src/java"))
self.assertEquals(["src/java", "src/resources"],
tree.get_root_siblings("src/resources"))
self.assertEquals(["src/java", "src/resources"],
tree.get_root_siblings("src/java/org/pantsbuild/foo"))
self.assertEquals(["src/java", "src/resources"],
tree.get_root_siblings("src/resources/org/pantsbuild/foo"))
self.assertEquals([], tree.get_root_siblings("src/foo/bar/baz"))
self.assertEquals(["tests/java", "tests/resources"],
tree.get_root_siblings("tests/java/org/pantsbuild/foo"))
self.assertEquals(["tests/java", "tests/resources"],
tree.get_root_siblings("tests/resources/org/pantsbuild/foo"))
self.assertEquals([], tree.get_root_siblings("tests/foo/bar/baz"))
self._add_siblings1(tree, "examples")
self.assertEquals([], tree.get_root_siblings("foo/bar/baz"))
self.assertEquals(["src/java", "src/resources"],
tree.get_root_siblings("src/java/org/pantsbuild/foo"))
self.assertEquals(["tests/java", "tests/resources"],
tree.get_root_siblings("tests/resources/org/pantsbuild/foo"))
self.assertEquals(["examples/src/java", "examples/src/resources"],
tree.get_root_siblings("examples/src/java/org/pantsbuild/foo"))
self.assertEquals(["examples/tests/java", "examples/tests/resources"],
tree.get_root_siblings("examples/tests/resources/org/pantsbuild/foo"))
|
apache-2.0
|
open-power/op-test-framework
|
common/OpTestUtil.py
|
1
|
89212
|
#!/usr/bin/env python2
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/common/OpTestUtil.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
import sys
import os
import datetime
import time
import pwd
import string
import subprocess
import random
import re
import telnetlib
import socket
import select
import time
import pty
import pexpect
import commands
import requests
import traceback
from requests.adapters import HTTPAdapter
#from requests.packages.urllib3.util import Retry
from httplib import HTTPConnection
#HTTPConnection.debuglevel = 1 # this will print some additional info to stdout
import urllib3 # setUpChildLogger enables integrated logging with op-test
import json
from OpTestConstants import OpTestConstants as BMC_CONST
from OpTestError import OpTestError
from Exceptions import CommandFailed, RecoverFailed, ConsoleSettings
from Exceptions import HostLocker, AES, ParameterCheck, HTTPCheck, UnexpectedCase
import logging
import OpTestLogger
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
sudo_responses = ["not in the sudoers",
"incorrect password"]
class OpTestUtil():
def __init__(self, conf=None):
self.conf = conf
def setup(self, config='HostLocker'):
# we need this called AFTER the proper configuration values have been seeded
if config == 'AES':
self.conf.util_server = Server(url=self.conf.args.aes_server,
base_url=self.conf.args.aes_base_url,
minutes=None,
proxy=self.build_proxy(self.conf.args.aes_proxy,
self.conf.args.aes_no_proxy_ips))
elif config == 'REST':
rest_server = "https://{}".format(self.conf.args.bmc_ip)
self.conf.util_bmc_server = Server(url=rest_server,
username=self.conf.args.bmc_username,
password=self.conf.args.bmc_password)
else:
self.conf.util_server = Server(url=self.conf.args.hostlocker_server,
base_url=self.conf.args.hostlocker_base_url,
minutes=None,
proxy=self.build_proxy(self.conf.args.hostlocker_proxy,
self.conf.args.hostlocker_no_proxy_ips))
def check_lockers(self):
if self.conf.args.hostlocker is not None:
self.conf.util.hostlocker_lock(self.conf.args)
if self.conf.args.aes is not None:
query = False
lock = False
unlock = False
for i in range(len(self.conf.args.aes)):
if self.conf.args.aes[i].lower() == 'q':
query = True
del self.conf.args.aes[i] # remove the q flag in case env name is also q
break
if self.conf.args.aes[i].lower() == 'l':
lock = True
del self.conf.args.aes[i] # remove the l flag in case env name is also l
break
if self.conf.args.aes[i].lower() == 'u':
unlock = True
del self.conf.args.aes[i] # remove the u flag in case env name is also u
break
self.conf.args.aes = list(set(self.conf.args.aes)) # removes any duplicates
if query:
envs, search_criteria = self.conf.util.aes_get_environments(self.conf.args)
if envs is not None:
self.conf.util.aes_print_environments(envs)
else:
print("NO environments found, (if Environment_Name added its "
"probably a syntax problem with --aes q, look at "
"--aes-search-args), we used --aes-search-args {}\n"
.format(' '.join(search_criteria)))
self.conf.util.cleanup()
exit(0)
if lock:
envs, search_criteria = self.conf.util.aes_get_environments(self.conf.args)
if envs is not None and len(envs) > 0:
if len(envs) <= 1:
for env in envs:
# working_id should NOT be kept to release upon exit
working_id = self.conf.util.aes_lock_env(env=env)
if working_id is None:
print ("AES shows NOT available to LOCK, "
"Environment_EnvId={} Environment_Name='{}' "
"Environment_State={} res_id={} res_email={}"
.format(env['env_id'], env['name'], env['state'],
env['res_id'], env['res_email']))
else:
print ("AES LOCKED Environment_EnvId={} "
"Environment_Name='{}' res_id={} aes-add-locktime "
"(in hours, zero is Never Expires) = '{}'"
.format(env['env_id'], env['name'], working_id,
self.conf.args.aes_add_locktime))
else:
print ("AES LOCK limit imposed, we found {} environments "
"using --aes-search-args {} and we must find only "
"one to lock here, use --aes q with your "
"--aes-search-args to view what we found"
.format(len(envs), ' '.join(search_criteria)))
else:
print ("Found NO environments using --aes-search-args {}, "
"use --aes q with your --aes-search-args to view "
"what we found".format(' '.join(search_criteria)))
self.conf.util.cleanup()
exit(0) # exit lock branch
if unlock:
envs, search_criteria = self.conf.util.aes_get_environments(self.conf.args)
if envs is not None:
if len(envs) <= 1:
for env in envs:
res_id = self.conf.util.aes_release_reservation(env=env)
if res_id is None:
print ("AES shows NO LOCK, so skipped UNLOCK "
"Environment_EnvId={} Environment_Name='{}' "
"Environment_State={} res_id={} res_email={}"
.format(env['env_id'], env['name'], env['state'],
env['res_id'], env['res_email']))
else:
print ("AES UNLOCKed Environment_EnvId={} "
"Environment_Name='{}' res_id={} res_email={}"
.format(env['env_id'], env['name'],
env['res_id'], env['res_email']))
else:
print ("AES UNLOCK limit imposed, we found {} "
"environments and we must only find one to unlock "
"here, use --aes-search-args to limit "
"serach criteria".format(len(envs)))
else:
print ("NO AES environments found using --aes-search-args {}"
.format(' '.join(search_criteria)))
self.conf.util.cleanup()
exit(0) # exit unlock branch
else: # we filtered out all else so now find an env and lock it
self.conf.lock_dict = self.conf.util.aes_lock(self.conf.args,
self.conf.lock_dict)
environments = self.conf.lock_dict.get('envs')
if self.conf.lock_dict.get('res_id') is None:
if self.conf.aes_print_helpers is True:
self.conf.util.aes_print_environments(environments)
# MESSAGE 'unable to lock' must be kept in same line to be filtered
raise AES(message="OpTestSystem AES unable to lock environment "
"requested, try --aes q with options for --aes-search-args "
"to view availability")
else:
log.info("OpTestSystem AES Reservation for Environment_Name '{}' "
"Group_Name={} Reservation id={}"
.format(self.conf.lock_dict.get('name'),
self.conf.lock_dict.get('Group_Name'),
self.conf.lock_dict.get('res_id')))
elif self.conf.args.aes_search_args is not None:
self.conf.lock_dict = self.conf.util.aes_lock(self.conf.args,
self.conf.lock_dict)
environments = self.conf.lock_dict.get('envs')
if self.conf.lock_dict.get('res_id') is None:
if self.conf.aes_print_helpers is True:
self.conf.util.aes_print_environments(environments)
# MESSAGE 'unable to lock' must be kept in same line to be filtered
raise AES(message="OpTestSystem AES NO available environments matching "
"criteria (see output earlier), unable to lock,"
"try running op-test with --aes q "
"--aes-search-args Environment_State=A "
"to query system availability, if trying to use "
"existing reservation the query must be exactly one")
else:
log.info("OpTestSystem AES Reservation for Environment_Name '{}' "
"Group_Name={} Reservation id={}"
.format(self.conf.lock_dict.get('name'),
self.conf.lock_dict.get('Group_Name'),
self.conf.lock_dict.get('res_id')))
def cleanup(self):
if self.conf.args.hostlocker is not None:
if self.conf.args.hostlocker_keep_lock is False:
try:
self.hostlocker_unlock()
except Exception as e:
log.warning("OpTestSystem HostLocker attempted to release "
"host '{}' hostlocker-user '{}', please manually "
"verify and release".format(self.conf.args.hostlocker,
self.conf.args.hostlocker_user))
rc, lockers = self.hostlocker_locked()
if rc == 0:
# there can be cases during signal handler cleanup
# where we get interrupted before the actual lock hit
# so this message can be output even though no lock was
# actually released, no two phase commits here :0
# other cases exist where we confirm no locks held, but
# the info message may say released, due to exceptions thrown
insert_message = ", host is locked by '{}'".format(lockers)
if len(lockers) == 0:
insert_message = ""
log.info("OpTestSystem HostLocker cleanup for host '{}' "
"hostlocker-user '{}' confirms you do not hold the lock{}"
.format(self.conf.args.hostlocker,
self.conf.args.hostlocker_user, insert_message))
else: # we only care if user held the lock
log.warning("OpTestSystem HostLocker attempted to cleanup "
"and release the host '{}' and we were unable to verify, "
"please manually verify and release"
.format(self.conf.args.hostlocker))
# clear since signal handler may call and exit path
self.conf.args.hostlocker = None
if self.conf.args.aes is not None or self.conf.args.aes_search_args is not None:
if self.conf.args.aes_keep_lock is False:
if self.conf.lock_dict.get('res_id') is not None:
temp_res_id = self.aes_release_reservation(res_id=self.conf.lock_dict.get('res_id'))
if temp_res_id is not None:
log.info("OpTestSystem AES releasing reservation {} "
"Environment_Name '{}' Group_Name {}"
.format(self.conf.lock_dict.get('res_id'),
self.conf.lock_dict.get('name'),
self.conf.lock_dict.get('Group_Name')))
# clear signal handler may call and exit path
self.conf.lock_dict['res_id'] = None
else:
log.info("OpTestSystem AES attempted to cleanup and release "
"reservation {} Environment_Name '{}' Group_Name {}"
" and we were unable to verify, please manually verify "
"and release".format(self.conf.lock_dict.get('res_id'),
self.conf.lock_dict.get('name'),
self.conf.lock_dict.get('Group_Name')))
if self.conf.util_server is not None:
# AES and Hostlocker skip logout
log.debug("Closing util_server")
self.conf.util_server.close()
if self.conf.util_bmc_server is not None:
log.debug("Logging out of util_bmc_server")
self.conf.util_bmc_server.logout()
log.debug("Closing util_bmc_server")
self.conf.util_bmc_server.close()
if self.conf.dump:
self.conf.dump = False # possible for multiple passes here
self.dump_versions()
self.dump_nvram_opts()
# leave closing the qemu scratch disk until last
# no known reasons at this point, document in future
try:
log.debug("self.conf.args.qemu_scratch_disk={}"
.format(self.conf.args.qemu_scratch_disk))
if self.conf.args.qemu_scratch_disk is not None:
self.conf.args.qemu_scratch_disk.close()
log.debug("Successfully closed qemu_scratch_disk")
self.conf.args.qemu_scratch_disk = None # in case we pass here again
except Exception as e:
log.debug("self.conf.args.qemu_scratch_disk={} "
"closing Exception={}"
.format(self.conf.args.qemu_scratch_disk, e))
def dump_versions(self):
log.info("Log Location: {}/*debug*".format(self.conf.output))
log.info("\n----------------------------------------------------------\n"
"OpTestSystem Firmware Versions Tested\n"
"(if flashed things like skiboot.lid, may not be accurate)\n"
"----------------------------------------------------------\n"
"{}\n"
"----------------------------------------------------------\n"
"----------------------------------------------------------\n"
.format(
(None if self.conf.firmware_versions is None \
else ('\n'.join(f for f in self.conf.firmware_versions)))
))
def check_nvram_options(self, console):
try:
console.run_command("which nvram")
except:
log.info("No NVRAM utility available to check options")
return
result = console.run_command("nvram -p ibm,skiboot --print-config")
self.conf.nvram_debug_opts = [o for o in result if "=" in o]
if len(self.conf.nvram_debug_opts) == 0:
log.info("No NVRAM debugging options set")
return
log.warning("{} NVRAM debugging options set".format(len(self.conf.nvram_debug_opts)))
def dump_nvram_opts(self):
if self.conf.nvram_debug_opts is None or len(self.conf.nvram_debug_opts) == 0:
return
log.warning("\n{} NVRAM debugging options set\n"
"These may adversely affect test results; verify these are appropriate if a failure occurs:\n"
"----------------------------------------------------------\n"
"{}\n"
"----------------------------------------------------------\n"
.format(len(self.conf.nvram_debug_opts), '\n'.join(f for f in self.conf.nvram_debug_opts)))
def build_proxy(self, proxy, no_proxy_ips):
if no_proxy_ips is None:
return proxy
for ip in no_proxy_ips:
cmd = 'ip addr show to %s' % ip
try:
output = subprocess.check_output(cmd.split())
except (subprocess.CalledProcessError, OSError) as e:
raise HostLocker(message="Could not run 'ip' to check for no proxy?")
if len(output):
proxy = None
break
return proxy
def get_env_name(self, x):
return x['name']
def aes_print_environments(self, environments):
if environments is None:
return
sorted_env_list = sorted(environments, key=self.get_env_name)
print "--------------------------------------------------------------------------------"
for env in sorted_env_list:
print ("--aes-search-args Environment_Name='{}' Environment_EnvId={} "
"Group_Name='{}' Group_GroupId={} Environment_State={} <res_id={} "
"res_email={} aes-add-locktime={}>"
.format(env['name'], env['env_id'], env['group']['name'],
env['group']['group_id'], env['state'], env['res_id'],
env['res_email'], env['res_length'], ))
print "--------------------------------------------------------------------------------"
print ("\nHELPERS --aes-search-args Server_VersionName=witherspoon|boston|habanero|zz|tuleta"
"|palmetto|brazos|fleetwood|p8dtu|p9dsu|zaius|stratton|firestone|garrison|romulus|alpine")
print " --aes-search-args Server_HardwarePlatform=POWER8|POWER9|openpower"
print " --aes-search-args Group_Name=op-test"
print " --aes-search-args Environment_State=A|R|M|X|H|E"
print "A=Available R=Reserved M=Maintenance X=Offline H=HealthCheck E=Exclusive"
print "AES Environments found = {}".format(len(sorted_env_list))
def aes_release_reservation(self, res_id=None, env=None):
release_dict = {'result' : None,
'status' : None,
'message' : None,
}
if res_id is None:
if env is not None:
res_id = env.get('res_id')
if res_id is None:
return None # nothing to do
res_payload = { 'res_id': res_id }
uri = "/release-reservation.php"
try:
r = self.conf.util_server.get(uri=uri, params=res_payload)
if r.status_code != requests.codes.ok:
raise AES(message="OpTestSystem AES attempted to release "
"reservation '{}' but it was NOT found in AES, "
"please update and retry".format(res_id))
except Exception as e:
raise AES(message="OpTestSystem AES attempted to releasing "
"reservation '{}' but encountered an Exception='{}', "
"please manually verify and release".format(res_id, e))
try:
json_data = r.json()
release_dict['status'] = json_data.get('status')
release_dict['result'] = json_data.get('result')
if json_data.get('result').get('res_id') != res_id:
log.warning("OpTestSystem AES UNABLE to confirm the release "
"of the reservation '{}' in AES, please manually "
"verify and release if needed, see details: {}"
.format(res_id, release_dict))
except Exception as e:
# this seems to be the typical path from AES, not sure what's up
log.debug("NO JSON object from aes_release_reservation, r.text={}".format(r.text))
release_dict['message'] = r.text
log.debug("OpTestSystem AES UNABLE to confirm the release "
"of the reservation '{}' in AES, please manually "
"verify and release if needed, see details: {}"
.format(res_id, release_dict))
return res_id
def aes_get_environments(self, args):
# this method initializes the Server request session
get_dict = {'result' : None,
'status' : None,
'message' : None,
}
args_dict = vars(args)
if self.conf.util_server is None:
self.setup(config='AES')
if self.conf.args.aes_search_args is None:
self.conf.args.aes_search_args = []
if self.conf.args.aes is not None:
for i in range(len(self.conf.args.aes)):
# add the single env to the list of search
self.conf.args.aes_search_args += ("Environment_Name={}"
.format(self.conf.args.aes[i]).splitlines())
else:
return None, None # we should NOT have gotten here
else:
if self.conf.args.aes is not None:
for i in range(len(self.conf.args.aes)):
self.conf.args.aes_search_args += ("Environment_Name={}"
.format(self.conf.args.aes[i]).splitlines())
uri = "/get-environments.php"
payload = { 'query_params[]': self.conf.args.aes_search_args}
r = self.conf.util_server.get(uri=uri, params=payload)
if r.status_code != requests.codes.ok:
raise AES(message="OpTestSystem AES UNABLE to find the environment '{}' "
"in AES, please update and retry".format(self.conf.args.aes))
# SQL issues can cause various problems which come back as ok=200
filter_list = ["have an error"]
matching = [xs for xs in filter_list if xs in r.text]
if len(matching):
raise AES(message="OpTestSystem AES encountered an error,"
" check the syntax of your query and retry, Exception={}"
.format(r.text))
# we need this here to set the aes_user for subsequent calls
if self.conf.args.aes_user is None:
self.conf.args.aes_user = pwd.getpwuid(os.getuid()).pw_name
aes_response_json = r.json()
get_dict['status'] = aes_response_json.get('status')
if aes_response_json.get('status') == 0:
get_dict['result'] = aes_response_json.get('result')
else:
get_dict['message'] = aes_response_json.get('message')
raise AES(message="Something unexpected happened, "
"see details: {}".format(get_dict))
return get_dict.get('result'), self.conf.args.aes_search_args
def aes_get_env(self, env):
uri = "/get-environment-info.php"
env_payload = { 'env_id': env['env_id'] }
r = self.conf.util_server.get(uri=uri, params=env_payload)
if r.status_code != requests.codes.ok:
raise AES(message="OpTestSystem AES UNABLE to find the environment '{}' "
"in AES, please update and retry".format(env['env_id']))
aes_response_json = r.json()
if aes_response_json.get('status') == 0:
return aes_response_json['result'][0]
def aes_add_time(self, env=None, locktime=24):
# Sept 10, 2018 - seems to be some issue with add-res-time.php
# even in Web UI the Add an Hour is not working
# locktime number of hours to add
# if aes_add_time called when AES reservation is
# in expiration window this fails
# not sure how that calculation is done yet
time_dict = {'result' : None,
'status' : None,
'message' : None,
}
if locktime == 0:
# if default, add some time
# allows user to specify command line override
locktime = 24
uri = "/add-res-time.php"
res_payload = { 'res_id': env.get('res_id'),
'hours': float(locktime),
}
r = self.conf.util_server.get(uri=uri, params=res_payload)
if r.status_code != requests.codes.ok:
raise AES(message="OpTestSystem AES UNABLE to find the reservation "
"res_id '{}' in AES, please update and retry".format(env['res_id']))
aes_response_json = r.json()
time_dict['status'] = aes_response_json.get('status')
if aes_response_json.get('status') == 0:
time_dict['result'] = aes_response_json.get('result')
else:
time_dict['message'] = aes_response_json.get('message')
raise AES(message="OpTestSystem AES UNABLE to add time to existing "
"reservation, the reservation may be about to expire or "
"conflict exists, see details: {}".format(time_dict))
return time_dict
def aes_get_creds(self, env, args):
# version_mappings used for bmc_type
# AES op-test
version_mappings = { 'witherspoon' : 'OpenBMC',
'zaius' : 'OpenBMC',
'boston' : 'SMC',
'stratton' : 'SMC',
'p9dsu' : 'SMC',
'p8dtu' : 'SMC',
'firestone' : 'AMI',
'garrison' : 'AMI',
'habanero' : 'AMI',
'palmetto' : 'AMI',
'romulus' : 'AMI',
'alpine' : 'FSP',
'brazos' : 'FSP',
'fleetwood' : 'FSP',
'tuleta' : 'FSP',
'zz' : 'FSP',
'unknown' : 'unknown',
'qemu' : 'qemu',
}
# aes_mappings used for configuration parameters
# AES op-test
aes_mappings = { 'os_password' : 'host_password',
'os_username' : 'host_user',
'os_host' : 'host_ip',
'net_mask' : 'host_submask',
'os_mac_address' : 'host_mac',
'def_gateway' : 'host_gateway',
'mac_address' : 'bmc_mac',
'password' : 'bmc_password',
'username' : 'bmc_username',
'host_name' : 'bmc_ip',
'ipmi_username' : 'bmc_usernameipmi',
'ipmi_password' : 'bmc_passwordipmi',
'version_name' : 'bmc_type',
'hardware_platform' : 'platform',
'attached_disk' : 'host_scratch_disk',
}
args_dict = vars(args) # we store credentials to the args
if len(env['servers']) != 1:
# we may not yet have output a message about reservation
# but we will get the release message
self.cleanup()
raise AES(message="AES credential problem, check AES definitions "
"for server record, we either have no server record or more "
"than one, check FSPs and BMCs")
for key, value in aes_mappings.items():
if env['servers'][0].get(key) is not None and env['servers'][0].get(key) != '':
if key == 'version_name':
args_dict[aes_mappings[key]] = version_mappings.get(env['servers'][0][key].lower())
else:
args_dict[aes_mappings[key]] = env['servers'][0][key]
def aes_lock_env(self, env=None):
if env is None:
return
new_res_id = None
res_payload = { 'email' : self.conf.args.aes_user,
'query_params[]': None,
'needs_claim' : False,
'length' : float(self.conf.args.aes_add_locktime),
'rel_on_expire' : self.conf.args.aes_rel_on_expire,
}
if env.get('state') == 'A':
uri = "/enqueue-reservation.php"
res_payload['query_params[]'] = 'Environment_EnvId={}'.format(env.get('env_id'))
r = self.conf.util_server.get(uri=uri, params=res_payload)
if r.status_code != requests.codes.ok:
raise AES(message="Problem with AES trying to enqueue a reservation "
"for environment '{}', please retry".format(env.get('env_id')))
# SQL issues can cause various problems which come back as ok=200
filter_list = ["have an error"]
matching = [xs for xs in filter_list if xs in r.text]
if len(matching):
raise AES(message="OpTestSystem AES encountered an error,"
" check the syntax of your query and retry, Exception={}"
.format(r.text))
aes_response_json = r.json()
if aes_response_json['status'] == 0:
new_res_id = aes_response_json['result']
return new_res_id # None if status not zero
else:
if env.get('state') == 'R' and \
env.get('res_email') == self.conf.args.aes_user and \
self.conf.args.aes_add_locktime != 0:
time_dict = self.aes_add_time(env=env,
locktime=self.conf.args.aes_add_locktime)
return env.get('res_id')
return new_res_id # return None, nothing works
def aes_lock(self, args, lock_dict):
environments, search_criteria = self.aes_get_environments(args)
for env in environments:
# store the new reservation id in the callers instance
# since we need to cleanup if aes_get_creds fails
lock_dict['res_id'] = self.aes_lock_env(env=env)
if lock_dict['res_id'] is not None:
# get the database join info for the env
creds_env = self.aes_get_env(env)
# we need lock_dict filled in here
# in case exception thrown in aes_get_creds
lock_dict['name'] = env.get('name')
lock_dict['Group_Name'] = env.get('group').get('name')
lock_dict['envs'] = environments
self.aes_get_creds(creds_env, args)
return lock_dict
else: # it was not Available
# if only one environment, was it us ?
# if so extend the reservation
if len(environments) == 1:
if env.get('res_email') == self.conf.args.aes_user:
if env.get('state') == 'R':
if env.get('res_length') != 0:
lock_dict['res_id'] = env.get('res_id')
# aes_add_time can fail if reservation
# about to expire or conflicts
time_dict = self.aes_add_time(env=env,
locktime=self.conf.args.aes_add_locktime)
creds_env = self.aes_get_env(env)
# we need lock_dict filled in here
# in case exception thrown in aes_get_creds
lock_dict['res_id'] = env.get('res_id')
lock_dict['name'] = env.get('name')
lock_dict['Group_Name'] = env.get('group').get('name')
lock_dict['envs'] = environments
self.aes_get_creds(creds_env, args)
return lock_dict
lock_dict['res_id'] = None
lock_dict['name'] = None
lock_dict['Group_Name'] = None
lock_dict['envs'] = environments
# we did not find anything able to be reserved
# return the list we looked thru
return lock_dict
def hostlocker_lock(self, args):
args_dict = vars(args)
# we need hostlocker_user first thing in case exceptions
if self.conf.args.hostlocker_user is None:
self.conf.args.hostlocker_user = pwd.getpwuid(os.getuid()).pw_name
if self.conf.util_server is None:
self.setup()
uri = "/host/{}/".format(self.conf.args.hostlocker)
try:
r = self.conf.util_server.get(uri=uri)
except Exception as e:
log.debug("hostlocker_lock unable to query Exception={}".format(e))
raise HostLocker(message="OpTestSystem HostLocker unable to query "
"HostLocker, check that your VPN/SSH tunnel is properly"
" configured and open, proxy configured as '{}' Exception={}"
.format(self.conf.args.hostlocker_proxy, e))
if r.status_code != requests.codes.ok:
raise HostLocker(message="OpTestSystem did NOT find the host '{}' "
"in HostLocker, please update and retry"
.format(self.conf.args.hostlocker))
# parse the hostlocker comment for op-test settings
host = r.json()[0]
hostlocker_comment = []
hostlocker_comment = host['comment'].splitlines()
# Ignore anything before the [op-test] marker, as a fallback we try
# to parse everything if there's no marker.
offset = 0;
for i, line in enumerate(hostlocker_comment):
if line.find("[op-test]") == 0:
offset = i
break
for key in args_dict.keys():
for l in hostlocker_comment[offset:-1]:
line = l.strip()
if line.startswith(key + ":"):
value = re.sub(key + ':', "", line).strip()
args_dict[key] = value
if "password" in key:
log_value = "<hidden>"
else:
log_value = value
log.debug("Hostlocker config: {} = {}".format(key, log_value))
break
uri = "/lock/"
payload = {'host' : self.conf.args.hostlocker,
'user' : self.conf.args.hostlocker_user,
'expiry_time' : self.conf.args.hostlocker_locktime}
try:
r = self.conf.util_server.post(uri=uri, data=payload)
except Exception as e:
raise HostLocker(message="OpTestSystem HostLocker unable to "
"acquire lock from HostLocker, see Exception={}".format(e))
if r.status_code == requests.codes.locked: # 423
rc, lockers = self.hostlocker_locked()
# MESSAGE 'unable to lock' string must be kept in same line to be filtered
raise HostLocker(message="OpTestSystem HostLocker unable to lock"
" Host '{}' is locked by '{}', please unlock and retry"
.format(self.conf.args.hostlocker, lockers))
elif r.status_code == requests.codes.conflict: # 409
raise HostLocker(message="OpTestSystem HostLocker Host '{}' is "
"unusable, please pick another host and retry"
.format(self.conf.args.hostlocker))
elif r.status_code == requests.codes.bad_request: # 400
raise HostLocker(message=r.text)
elif r.status_code == requests.codes.not_found: # 404
msg = ("OpTestSystem HostLocker unknown hostlocker_user '{}', "
"you need to have logged in to HostLocker via the web"
" at least once prior, please log in to HostLocker via the web"
" and then retry or check configuration."
.format(self.conf.args.hostlocker_user))
raise HostLocker(message=msg)
log.info("OpTestSystem HostLocker reserved host '{}' "
"hostlocker-user '{}'".format(self.conf.args.hostlocker,
self.conf.args.hostlocker_user))
def hostlocker_locked(self):
# if called during signal handler cleanup
# we may not have user yet
if self.conf.args.hostlocker_user is None:
return 1, []
if self.conf.util_server is None:
self.setup()
uri = "/host/{}/".format(self.conf.args.hostlocker)
try:
r = self.conf.util_server.get(uri=uri)
except HTTPCheck as check:
log.debug("HTTPCheck Exception={} check.message={}".format(check, check.message))
raise HostLocker(message="OpTestSystem HostLocker unknown host '{}'"
.format(self.conf.args.hostlocker))
except Exception as e:
log.debug("hostlocker_locked did NOT get any host details for '{}', "
"please manually verify and release, Exception={}"
.format(self.conf.args.hostlocker, e))
return 1, [] # if unable to confirm, flag it
uri = "/lock/"
payload = {"host" : self.conf.args.hostlocker}
try:
r = self.conf.util_server.get(uri=uri,
params=payload)
locks = r.json()
except Exception as e:
log.debug("hostlocker_locked did NOT get any lock details for "
"host '{}', please manually verify and release, Exception={}"
.format(self.conf.args.hostlocker, e))
return 1, [] # if unable to confirm, flag it
lockers = []
log.debug("locks JSON: {}".format(locks))
try:
for l in locks:
lockers.append(str(l.get('locker')))
if l.get('locker') == self.conf.args.hostlocker_user:
# lockers list is incomplete but only if we don't
# find hostlocker_user do we care
return 1, lockers
return 0, lockers
except Exception as e:
log.debug("LOCKERS lockers={} Exception={}".format(lockers, e))
def hostlocker_unlock(self):
if self.conf.util_server is None:
self.setup()
uri = "/lock/"
payload = {"host" : self.conf.args.hostlocker,
"user" : self.conf.args.hostlocker_user}
try:
r = self.conf.util_server.get(uri=uri,
params=payload)
except HTTPCheck as check:
log.debug("HTTPCheck Exception={} check.message={}".format(check, check.message))
msg = ("OpTestSystem HostLocker unexpected case hostlocker-user '{}', "
"you would need to have logged in to HostLocker via the web"
" at least once prior, manually verify and release, see Exception={}"
.format(self.conf.args.hostlocker_user, check))
raise HostLocker(message=msg)
except Exception as e:
log.info("OpTestSystem HostLocker hostlocker_unlock tried to "
"unlock host '{}' hostlocker-user '{}' but encountered a problem, "
"manually verify and release, see Exception={}"
.format(self.conf.args.hostlocker,
self.conf.args.hostlocker_user, e))
return
locks = r.json()
if len(locks) == 0:
# Host is not locked, so just return
log.debug("hostlocker_unlock tried to delete a lock but it was "
"NOT there, see details={}".format(locks))
return
if len(locks) > 1:
# this may never happen, but it came up in debug
# with hardcoded changes to check error paths
log.warning("hostlocker_unlock tried to delete lock for "
"host '{}' but we found multiple locks and we should "
"have only received hostlocker-user '{}' we queried "
"for, please manually verify and release"
.format(self.conf.args.hostlocker,
self.conf.args.hostlocker_user))
return
if locks[0].get('locker') != self.conf.args.hostlocker_user:
log.debug("hostlocker_unlock found that the locker did not "
"match the hostlocker_user '{}'".format(self.conf.args.hostlocker_user))
uri = "/lock/{}".format(locks[0].get('id'))
try:
r = self.conf.util_server.delete(uri=uri)
except HTTPCheck as check:
log.debug("HTTPCheck hostlocker_unlock tried to delete a lock"
" but encountered an HTTP problem, "
"Exception={} check.message={}".format(check, check.message))
raise HostLocker(message="hostlocker_unlock tried to delete a lock "
"but it was NOT there")
except Exception as e:
log.debug("hostlocker_unlock tried to delete a lock but it was "
"NOT there, see Exception={}".format(e))
##
# @brief Pings 2 packages to system under test
#
# @param i_ip @type string: ip address of system under test
# @param i_try @type int: number of times the system is
# pinged before returning Failed
#
# @return BMC_CONST.PING_SUCCESS when PASSED or
# raise OpTestError when FAILED
#
def PingFunc(self, i_ip, i_try=1, totalSleepTime=BMC_CONST.HOST_BRINGUP_TIME):
if i_ip == None:
raise ParameterCheck(message="PingFunc has i_ip set to 'None', "
"check your configuration and setup")
sleepTime = 0;
while(i_try != 0):
p1 = subprocess.Popen(["ping", "-c 2", str(i_ip)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout_value, stderr_value = p1.communicate()
if(stdout_value.__contains__("2 received")):
log.debug(i_ip + " is pinging")
return BMC_CONST.PING_SUCCESS
else:
# need to print message otherwise no interactive feedback
# and user left guessing something is not happening
log.info("PingFunc is not pinging '{}', waited {} of {}, {} "
"more loop cycles remaining, you may start to check "
"your configuration for bmc_ip or host_ip"
.format(i_ip, sleepTime, totalSleepTime, i_try))
log.debug("%s is not pinging (Waited %d of %d, %d more "
"loop cycles remaining)" % (i_ip, sleepTime,
totalSleepTime, i_try))
time.sleep(1)
sleepTime += 1
if (sleepTime == totalSleepTime):
i_try -= 1
sleepTime = 0
log.error("'{}' is not pinging and we tried many times, "
"check your configuration and setup.".format(i_ip))
raise ParameterCheck(message="PingFunc fails to ping '{}', "
"check your configuration and setup and manually "
"verify and release any reservations".format(i_ip))
def copyFilesToDest(self, hostfile, destid, destName, destPath, passwd):
arglist = (
"sshpass",
"-p", passwd,
"/usr/bin/scp",
"-o","UserKnownHostsFile=/dev/null",
"-o","StrictHostKeyChecking=no",
hostfile,
"{}@{}:{}".format(destid,destName,destPath))
log.debug(' '.join(arglist))
subprocess.check_call(arglist)
def copyFilesFromDest(self, destid, destName, destPath, passwd, sourcepath):
arglist = (
"sshpass",
"-p", passwd,
"/usr/bin/scp",
"-r",
"-o","UserKnownHostsFile=/dev/null",
"-o","StrictHostKeyChecking=no",
"{}@{}:{}".format(destid,destName,destPath),
sourcepath)
log.debug(' '.join(arglist))
subprocess.check_output(arglist)
# It waits for a ping to fail, Ex: After a BMC/FSP reboot
def ping_fail_check(self, i_ip):
cmd = "ping -c 1 " + i_ip + " 1> /dev/null; echo $?"
count = 0
while count < 500:
output = commands.getstatusoutput(cmd)
if output[1] != '0':
log.debug("IP %s Comes down" % i_ip)
break
count = count + 1
time.sleep(2)
else:
log.debug("IP %s keeps on pinging up" % i_ip)
return False
return True
def build_prompt(self, prompt=None):
if prompt:
built_prompt = prompt
else:
built_prompt = "\[console-expect\]#"
return built_prompt
def clear_state(self, track_obj):
track_obj.PS1_set = 0
track_obj.SUDO_set = 0
track_obj.LOGIN_set = 0
def clear_system_state(self, system_obj):
# clears attributes of the system object
# called when OpTestSystem transitions states
# unique from when track_obj's need clearing
if system_obj.cronus_capable():
system_obj.conf.cronus.env_ready = False
system_obj.conf.cronus.cronus_ready = False
def try_recover(self, term_obj, counter=3):
# callers beware that the connect can affect previous states and objects
for i in range(counter):
log.warning("OpTestSystem detected something, working on recovery")
pty = term_obj.connect()
log.debug("USING TR Expect Buffer ID={}".format(hex(id(pty))))
pty.sendcontrol('c')
time.sleep(1)
try_rc = pty.expect([".*#", "Petitboot", "login: ", pexpect.TIMEOUT, pexpect.EOF], timeout=10)
log.debug("try_rc={}".format(try_rc))
log.debug("pty.before={}".format(pty.before))
log.debug("pty.after={}".format(pty.after))
if try_rc in [0,1,2]:
log.warning("OpTestSystem recovered from temporary issue, continuing")
return
else:
log.warning("OpTestSystem Unable to recover from temporary issue, calling close and continuing")
term_obj.close()
log.warning("OpTestSystem Unable to recover to known state, raised Exception RecoverFailed but continuing")
raise RecoverFailed(before=pty.before, after=pty.after, msg='Unable to recover to known state, retry')
def try_sendcontrol(self, term_obj, command, counter=3):
pty = term_obj.get_console()
log.debug("USING TSC Expect Buffer ID={}".format(hex(id(pty))))
res = pty.before
log.warning("OpTestSystem detected something, working on recovery")
pty.sendcontrol('c')
time.sleep(1)
try_list = []
rc = pty.expect([".*#", pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc != 0:
term_obj.close()
self.try_recover(term_obj, counter)
# if we get back here we still fail but have a working prompt to give back
log.warning("OpTestSystem recovered from temporary issue, but the command output is unavailable,"
" raised Exception CommandFailed but continuing")
raise CommandFailed(command, "run_command TIMEOUT in try_sendcontrol, we recovered the prompt,"
" but the command output is unavailable", -1)
else:
# may have lost prompt
log.warning('OpTestSystem recovered from a temporary issue, continuing')
try_list = res.splitlines() # give back what we do have for triage
echo_rc = 1
return try_list, echo_rc
def get_versions(self, term_obj, pty, expect_prompt):
check_list = ["No such file or directory",
"not found",
]
if term_obj.system.conf.firmware_versions is None:
pty.sendline("date")
rc = pty.expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
pty.sendline("lsprop /sys/firmware/devicetree/base/ibm,firmware-versions")
time.sleep(1)
rc = pty.expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
term_obj.system.conf.firmware_versions = \
pty.before.replace("\r\r\n","\n").splitlines()
matching = [xs for xs in check_list if any(xs in xa for xa in term_obj.system.conf.firmware_versions)]
if len(matching):
term_obj.system.conf.firmware_versions = ["Firmware Versions Unavailable"]
else:
log.debug("OpTestSystem unable to dump firmware versions tested")
def set_PS1(self, term_obj, pty, prompt):
# prompt comes in as the string desired, needs to be pre-built
# on success caller is returned 1, otherwise exception thrown
# order of execution and commands are sensitive here to provide reliability
if term_obj.setup_term_disable == 1:
return -1
expect_prompt = prompt + "$"
pty.sendline("which bash && exec bash --norc --noprofile")
time.sleep(0.2)
pty.sendline('PS1=' + prompt)
time.sleep(0.2)
rc = pty.expect([prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
pty.sendline("which stty && stty cols 300;which stty && stty rows 30")
time.sleep(0.2)
rc = pty.expect([prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
# mambo echos twice so turn off
if term_obj.system.disable_stty_echo():
pty.sendline("which stty && stty -echo")
time.sleep(0.2)
rc = pty.expect([prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
pty.sendline("export LANG=C")
rc = pty.expect([prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
time.sleep(0.2)
pty.sendline() # needed to sync buffers later on
time.sleep(0.2) # pause for first time setup, buffers you know, more sensitive in petitboot shell, pexpect or console buffer not sure
rc = pty.expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
log.debug("Shell prompt changed")
self.get_versions(term_obj, pty, expect_prompt)
return 1 # caller needs to save state
else: # we don't seem to have anything so try to get something
term_obj.close()
try:
# special case to allow calls back to connect which is where we probably came from
self.orig_system_setup_term = term_obj.get_system_setup_term()
self.orig_block_setup_term = term_obj.get_block_setup_term()
term_obj.set_system_setup_term(1) # block so the new connect will not try to come back here
term_obj.set_block_setup_term(1) # block so the new connect will not try to come back here
self.try_recover(term_obj, counter=3) # if try_recover bails we leave things blocked, they'll get reset
# if we get back here we have a new prompt and unknown console
# in future if state can change or block flags can change this needs revisted
pty = term_obj.connect() # need a new pty since we recovered
term_obj.set_system_setup_term = self.orig_system_setup_term
term_obj.set_block_setup_term = self.orig_block_setup_term
pty.sendline("which bash && exec bash --norc --noprofile")
time.sleep(0.2)
pty.sendline('PS1=' + prompt)
time.sleep(0.2)
pty.sendline("which stty && stty cols 300;which stty && stty rows 30")
time.sleep(0.2)
pty.sendline("export LANG=C")
time.sleep(0.2)
pty.sendline() # needed to sync buffers later on
time.sleep(0.2) # pause for first time setup, buffers you know, more sensitive in petitboot shell, pexpect or console buffer not sure
rc = pty.expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
log.debug("Shell prompt changed")
self.get_versions(term_obj, pty, expect_prompt)
return 1 # caller needs to save state
else:
if term_obj.setup_term_quiet == 0:
log.warning("OpTestSystem Change of shell prompt not completed after last final retry,"
" probably a connection issue, raised Exception ConsoleSettings but continuing")
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Change of shell prompt not completed after last final retry, probably a connection issue, retry")
else:
term_obj.setup_term_disable = 1
return -1
except RecoverFailed as e:
if term_obj.setup_term_quiet == 0:
log.warning("OpTestSystem Change of shell prompt not completed after last retry,"
" probably a connection issue, raised Exception ConsoleSettings but continuing")
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Change of shell prompt not completed after last retry, probably a connection issue, retry")
else:
term_obj.setup_term_disable = 1
return -1
def get_login(self, host, term_obj, pty, prompt):
# prompt comes in as the string desired, needs to be pre-built
if term_obj.setup_term_disable == 1:
return -1, -1
my_user = host.username()
my_pwd = host.password()
pty.sendline()
rc = pty.expect(['login: ', pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
pty.sendline(my_user)
time.sleep(0.1)
rc = pty.expect([r"[Pp]assword:", pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
pty.sendline(my_pwd)
time.sleep(0.5)
rc = pty.expect(['login: $', ".*#$", ".*# $", ".*\$", 'Petitboot', pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc not in [1,2,3]:
if term_obj.setup_term_quiet == 0:
log.warning("OpTestSystem Problem with the login and/or password prompt,"
" raised Exception ConsoleSettings but continuing")
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Problem with the login and/or password prompt, probably a connection or credential issue, retry")
else:
term_obj.setup_term_disable = 1
return -1, -1
else:
if term_obj.setup_term_quiet == 0:
log.warning("OpTestSystem Problem with the login and/or password prompt, raised Exception ConsoleSettings but continuing")
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Problem with the login and/or password prompt, probably a connection or credential issue, retry")
else:
term_obj.setup_term_disable = 1
return -1, -1
my_PS1_set = self.set_PS1(term_obj, pty, prompt)
my_LOGIN_set = 1
else: # timeout eof
pty.sendline()
rc = pty.expect(['login: ', pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
pty.sendline(my_user)
time.sleep(0.1)
rc = pty.expect([r"[Pp]assword:", pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
pty.sendline(my_pwd)
time.sleep(0.5)
rc = pty.expect(['login: $', ".*#$", ".*# $", ".*\$", 'Petitboot', pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc not in [1,2,3]:
if term_obj.setup_term_quiet == 0:
log.warning("OpTestSystem Problem with the login and/or password prompt,"
" raised Exception ConsoleSettings but continuing")
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Problem with the login and/or password prompt, probably a connection or credential issue, retry")
else:
term_obj.setup_term_disable = 1
return -1, -1
else:
if term_obj.setup_term_quiet == 0:
log.warning("OpTestSystem Problem with the login and/or password prompt after a secondary connection issue,"
" raised Exception ConsoleSettings but continuing")
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Problem with the login and/or password prompt after a secondary connection or credential issue, retry")
else:
term_obj.setup_term_disable = 1
return -1, -1
my_PS1_set = self.set_PS1(term_obj, pty, prompt)
my_LOGIN_set = 1
else: # timeout eof
if term_obj.setup_term_quiet == 0:
log.warning("OpTestSystem Problem with the login and/or password prompt after a previous connection issue,"
" raised Exception ConsoleSettings but continuing")
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Problem with the login and/or password prompt last try, probably a connection or credential issue, retry")
else:
term_obj.setup_term_disable = 1
return -1, -1
return my_PS1_set, my_LOGIN_set # caller needs to save state
def check_root(self, pty, prompt):
# we do the best we can to verify, but if not oh well
expect_prompt = prompt + "$"
pty.sendline("date") # buffer kicker needed
pty.sendline("which whoami && whoami")
time.sleep(2)
rc = pty.expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
log.debug("check_root rc={}".format(rc))
log.debug("check_root before={}".format(pty.before))
log.debug("check_root after={}".format(pty.after))
if rc == 0:
before = pty.before.replace("\r\r\n", "\n")
try:
whoami = before.splitlines()[-1]
log.debug("check_root whoami={}".format(whoami))
except Exception:
pass
pty.sendline("echo $?")
time.sleep(1)
rc = pty.expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
log.debug("check_root 2 rc={}".format(rc))
log.debug("check_root 2 before={}".format(pty.before))
log.debug("check_root 2 after={}".format(pty.after))
before = pty.before.replace("\r\r\n", "\n")
if rc == 0:
try:
echo_rc = int(before.splitlines()[-1])
log.debug("check_root echo_rc={}".format(echo_rc))
except Exception as e:
echo_rc = -1
if echo_rc == 0:
# Owing to hscroot being user name for HMC
if "root" in whoami:
log.debug("OpTestSystem now running as root")
return True
else:
log.warning("OpTestSystem running as \'{}\' not root".format(whoami))
else:
log.debug("OpTestSystem should be running as root, unable to verify")
return False
def get_sudo(self, host, term_obj, pty, prompt):
# prompt comes in as the string desired, needs to be pre-built
# must have PS1 expect_prompt already set
# must be already logged in
if term_obj.setup_term_disable == 1:
return -1, -1
pty.sendline()
if self.check_root(pty, prompt) is True:
# If we logged in as root or we're in the Petitboot shell we may
# already be root.
return 1, 1
my_pwd = host.password()
pty.sendline("which sudo && sudo -s")
rc = pty.expect([r"[Pp]assword for", pexpect.TIMEOUT, pexpect.EOF], timeout=5)
# we must not add # prompt to the expect, we get false hit when complicated user login prompt and control chars,
# we need to cleanly ignore everything but password and then we blindly next do PS1 setup, ignoring who knows what
if rc == 0:
pty.sendline(my_pwd)
time.sleep(0.5) # delays for next call
my_PS1_set = self.set_PS1(term_obj, pty, prompt)
self.check_root(pty, prompt)
my_SUDO_set = 1
return my_PS1_set, my_SUDO_set # caller needs to save state
elif rc == 1: # we must have been root, we first filter out password prompt above
my_PS1_set = self.set_PS1(term_obj, pty, prompt)
self.check_root(pty, prompt)
my_SUDO_set = 1
return my_PS1_set, my_SUDO_set # caller needs to save state
else:
if term_obj.setup_term_quiet == 0:
log.warning("OpTestSystem Unable to setup root access, probably a connection issue,"
" raised Exception ConsoleSettings but continuing")
raise ConsoleSettings(before=pty.before, after=pty.after,
msg='Unable to setup root access, probably a connection issue, retry')
else:
term_obj.setup_term_disable = 1
return -1, -1
def setup_term(self, system, pty, ssh_obj=None, block=0):
# Login and/or setup any terminal
# pty needs to be the opexpect object
# This will behave correctly even if already logged in
# Petitboot Menu is special case to NOT participate in this setup, conditionally checks if system state is PETITBOOT and skips
# CANNOT CALL GET_CONSOLE OR CONNECT from here since get_console and connect call into setup_term
if block == 1:
return
if ssh_obj is not None:
track_obj = ssh_obj
term_obj = ssh_obj
system_obj = ssh_obj.system
else:
track_obj = system
term_obj = system.console
system_obj = system
if system_obj.state == 3: # OpSystemState.PETITBOOT
return
rc = pty.expect(['login: $', ".*#$", ".*# $", ".*\$", "~>", 'Petitboot', pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
track_obj.PS1_set, track_obj.LOGIN_set = self.get_login(system_obj.cv_HOST, term_obj, pty, self.build_prompt(system_obj.prompt))
track_obj.PS1_set, track_obj.SUDO_set = self.get_sudo(system_obj.cv_HOST, term_obj, pty, self.build_prompt(system_obj.prompt))
return
if rc in [1,2,3,4]:
track_obj.PS1_set = self.set_PS1(term_obj, pty, self.build_prompt(system_obj.prompt))
track_obj.LOGIN_set = 1 # ssh port 22 can get in which uses sshpass or Petitboot, do this after set_PS1 to make sure we have something
track_obj.PS1_set, track_obj.SUDO_set = self.get_sudo(system_obj.cv_HOST, term_obj, pty, self.build_prompt(system_obj.prompt))
return
if rc == 5:
return # Petitboot so nothing to do
if rc == 7: # EOF
term_obj.close() # mark as bad
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Getting login and sudo not successful, probably connection or credential issue, retry")
# now just timeout
if system_obj.state == 6: # OpSystemState.OS
# The login prompt doesn't respond properly to Ctrl-L
pty.sendline()
else:
pty.sendcontrol('l')
# Ctrl-L may cause a esc[J (erase) character to appear in the buffer.
# Include this in the patterns that expect $ (end of line)
rc = pty.expect(['login: (\x1b\[J)*$', ".*#(\x1b\[J)*$", ".*# (\x1b\[J)*$", ".*\$(\x1b\[J)*", "~>(\x1b\[J)", 'Petitboot', pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
track_obj.PS1_set, track_obj.LOGIN_set = self.get_login(system_obj.cv_HOST, term_obj, pty, self.build_prompt(system_obj.prompt))
track_obj.PS1_set, track_obj.SUDO_set = self.get_sudo(system_obj.cv_HOST, term_obj, pty, self.build_prompt(system_obj.prompt))
return
if rc in [1,2,3,4]:
track_obj.LOGIN_set = track_obj.PS1_set = self.set_PS1(term_obj, pty, self.build_prompt(system_obj.prompt))
track_obj.PS1_set, track_obj.SUDO_set = self.get_sudo(system_obj.cv_HOST, term_obj, pty, self.build_prompt(system_obj.prompt))
return
if rc == 5:
return # Petitboot do nothing
else:
if term_obj.setup_term_quiet == 0:
term_obj.close() # mark as bad
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Getting login and sudo not successful, probably connection issue, retry")
else:
# this case happens when detect_target sets the quiet flag and we are timing out
log.info("OpTestSystem detected something, checking if your system is powered off, will retry")
def set_env(self, term_obj, pty):
set_env_list = []
pty.sendline("which bash && exec bash --norc --noprofile")
expect_prompt = self.build_prompt(term_obj.prompt) + "$"
pty.sendline('PS1=' + self.build_prompt(term_obj.prompt))
rc = pty.expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc == 0:
combo_io = (pty.before + pty.after).replace("\r\r\n", "\n").lstrip()
set_env_list += combo_io.splitlines()
# remove the expect prompt since matched generic #
del set_env_list[-1]
return set_env_list
else:
raise ConsoleSettings(before=pty.before, after=pty.after,
msg="Setting environment for sudo command not successful, probably connection issue, retry")
def retry_password(self, term_obj, pty, command):
retry_list_output = []
a = 0
while a < 3:
a += 1
pty.sendline(term_obj.system.cv_HOST.password())
rc = pty.expect([".*#", "try again.", pexpect.TIMEOUT, pexpect.EOF])
if (rc == 0) or (rc == 1):
combo_io = pty.before + pty.after
retry_list_output += combo_io.replace("\r\r\n","\n").splitlines()
matching = [xs for xs in sudo_responses if any(xs in xa for xa in pty.after.splitlines())]
if len(matching):
echo_rc = 1
rc = -1 # use to flag the failure next
if rc == 0:
retry_list_output += self.set_env(term_obj, pty)
echo_rc = 0
break
elif a == 2:
echo_rc = 1
break
elif (rc == 2):
raise CommandFailed(command, 'Retry Password TIMEOUT ' + ''.join(retry_list_output), -1)
elif (rc == 3):
term_obj.close()
raise ConsoleSettings(before=pty.before, after=pty.after,
msg='SSH session/console issue, probably connection issue, retry')
return retry_list_output, echo_rc
def handle_password(self, term_obj, pty, command):
# this is for run_command 'sudo -s' or the like
handle_list_output = []
failure_list_output = []
pre_combo_io = pty.before + pty.after
pty.sendline(term_obj.system.cv_HOST.password())
rc = pty.expect([".*#$", "try again.", pexpect.TIMEOUT, pexpect.EOF])
if (rc == 0) or (rc == 1):
combo_io = pre_combo_io + pty.before + pty.after
handle_list_output += combo_io.replace("\r\r\n","\n").splitlines()
matching = [xs for xs in sudo_responses if any(xs in xa for xa in pty.after.splitlines())]
if len(matching):
# remove the expect prompt since matched generic #
del handle_list_output[-1]
echo_rc = 1
rc = -1 # use this to flag the failure next
if rc == 0:
# with unknown prompts and unknown environment unable to capture echo $?
echo_rc = 0
self.set_env(term_obj, pty)
list_output = handle_list_output
elif rc == 1:
retry_list_output, echo_rc = self.retry_password(term_obj, pty, command)
list_output = (handle_list_output + retry_list_output)
else:
if (rc == 2) or (rc == 3):
failure_list_output += ['Password Problem/TIMEOUT ']
failure_list_output += pre_combo_io.replace("\r\r\n","\n").splitlines()
# timeout path needs access to output
# handle_list_output empty if timeout or EOF
failure_list_output += handle_list_output
if (rc == 3):
term_obj.close()
raise SSHSessionDisconnected("SSH session/console exited early!")
else:
raise CommandFailed(command, ''.join(failure_list_output), -1)
return list_output, echo_rc
def run_command(self, term_obj, command, timeout=60, retry=0):
# retry=0 will perform one pass
counter = 0
while counter <= retry:
try:
output = self.try_command(term_obj, command, timeout)
return output
except CommandFailed as cf:
log.debug("CommandFailed cf={}".format(cf))
if counter == retry:
raise cf
else:
counter += 1
log.debug("run_command retry sleeping 2 seconds, before retry")
time.sleep(2)
log.debug("Retry command={}".format(command))
log.info("\n \nOpTestSystem detected a command issue, we will retry the command,"
" this will be retry \"{:02}\" of a total of \"{:02}\"\n \n".format(counter, retry))
def try_command(self, term_obj, command, timeout=60):
running_sudo_s = False
extra_sudo_output = False
expect_prompt = self.build_prompt(term_obj.prompt) + "$"
pty = term_obj.get_console() # if previous caller environment leaves buffer hung can show up here, e.g. PS2 prompt
pty.sendline(command)
if command == 'sudo -s':
running_sudo_s = True
# special case to catch loss of env
rc = pty.expect([".*#", r"[Pp]assword for", pexpect.TIMEOUT, pexpect.EOF], timeout=timeout)
else:
rc = pty.expect([expect_prompt, r"[Pp]assword for", pexpect.TIMEOUT, pexpect.EOF], timeout=timeout)
output_list = []
output_list += pty.before.replace("\r\r\n","\n").splitlines()
try:
del output_list[:1] # remove command from the list
except Exception as e:
pass # nothing there
# if we are running 'sudo -s' as root then catch on generic # prompt, restore env
if running_sudo_s and (rc == 0):
extra_sudo_output = True
set_env_list = self.set_env(term_obj, pty)
if rc == 0:
if extra_sudo_output:
output_list += set_env_list
pty.sendline("echo $?")
rc2 = pty.expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=timeout)
if rc2 == 0:
echo_output = []
echo_output += pty.before.replace("\r\r\n","\n").splitlines()
try:
del echo_output[:1] # remove command from the list
except Exception as e:
pass # nothing there
try:
echo_rc = int(echo_output[-1])
except Exception as e:
echo_rc = -1
else:
raise CommandFailed(command, "run_command echo TIMEOUT, the command may have been ok,"
" but unable to get echo output to confirm result", -1)
elif rc == 1:
handle_output_list, echo_rc = self.handle_password(term_obj, pty, command)
# remove the expect prompt since matched generic #
del handle_output_list[-1]
output_list = handle_output_list
elif rc == 2: # timeout
output_list, echo_rc = self.try_sendcontrol(term_obj, command) # original raw buffer if it holds any clues
else:
term_obj.close()
raise CommandFailed(command, "run_command TIMEOUT or EOF, the command timed out or something,"
" probably a connection issue, retry", -1)
res = output_list
if echo_rc != 0:
raise CommandFailed(command, res, echo_rc)
return res
# This command just runs and returns the output & ignores the failure
# A straight copy of what's in OpTestIPMI
def run_command_ignore_fail(self, term_obj, command, timeout=60, retry=0):
try:
output = self.run_command(term_obj, command, timeout, retry)
except CommandFailed as cf:
output = cf.output
return output
def mambo_run_command(self, term_obj, command, timeout=60, retry=0):
expect_prompt = "systemsim %"
term_obj.get_console().sendline(command)
rc = term_obj.get_console().expect([expect_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=timeout)
output_list = []
output_list += term_obj.get_console().before.replace("\r\r\n","\n").splitlines()
try:
del output_list[:1] # remove command from the list
except Exception as e:
pass # nothing there
return output_list
def mambo_enter(self, term_obj):
term_obj.get_console().sendcontrol('c')
rc = term_obj.get_console().expect(["systemsim %", pexpect.TIMEOUT, pexpect.EOF], timeout=10)
if rc != 0:
raise UnexpectedCase(state="Mambo", message="We tried to send control-C"
" to Mambo and we failed, probably just retry")
def mambo_exit(self, term_obj):
# this method will remove the mysim go from the output
expect_prompt = self.build_prompt(term_obj.prompt) + "$"
term_obj.get_console().sendline("mysim go")
rc = term_obj.get_console().expect(["mysim go", pexpect.TIMEOUT, pexpect.EOF], timeout=10)
output_list = []
output_list += term_obj.get_console().before.replace("\r\r\n","\n").splitlines()
try:
del output_list[:1] # remove command from the list
except Exception as e:
pass # nothing there
return output_list
def cronus_subcommand(self, command=None, minutes=2):
# OpTestCronus class calls this, so be cautious on recursive calls
assert 0 < minutes <= 120, (
"cronus_subcommand minutes='{}' is out of the desired range of 1-120"
.format(minutes))
completed = False
try:
p1 = subprocess.Popen(["bash", "-c", command],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# set the polling appropriate
if minutes > 5:
sleep_period = 60
custom_range = minutes
else:
sleep_period = 1
custom_range = minutes*60
log.debug("cronus_subcommand sleep_period seconds='{}' number of periods to wait (custom_range)='{}'\n"
" Waiting for minutes='{}' which is seconds='{}')"
.format(sleep_period, custom_range, minutes, minutes*60))
for t in range(custom_range):
log.debug("polling t={}".format(t))
time.sleep(sleep_period)
if p1.poll() is not None:
log.debug("polling completed=True")
completed = True
break
if not completed:
log.warning("cronus_subcommand did NOT complete in '{}' minutes, rc={}".format(minutes, p1.returncode))
p1.kill()
log.warning("cronus_subcommand killed command='{}'".format(command))
raise UnexpectedCase(message="Cronus issue rc={}".format(p1.returncode))
else:
log.debug("cronus_subcommand rc={}".format(p1.returncode))
stdout_value, stderr_value = p1.communicate()
log.debug("command='{}' p1.returncode={}"
.format(command, p1.returncode))
if p1.returncode:
log.warning("RC={} cronus_subcommand='{}', debug log contains stdout/stderr"
.format(p1.returncode, command))
log.debug("cronus_subcommand command='{}' stdout='{}' stderr='{}'"
.format(command, stdout_value, stderr_value))
if stderr_value:
# some calls get stderr which is noise
log.debug("Unknown if this is a problem, Command '{}' stderr='{}'".format(command, stderr_value))
return stdout_value
except subprocess.CalledProcessError as e:
tb = traceback.format_exc()
log.debug("cronus_subcommand issue CalledProcessError={}, Traceback={}".format(e, tb))
raise UnexpectedCase(message="Cronus issue rc={} output={}".format(e.returncode, e.output))
except Exception as e:
tb = traceback.format_exc()
log.debug("cronus_subcommand issue Exception={}, Traceback={}".format(e, tb))
raise UnexpectedCase(message="cronus_subcommand issue Exception={}, Traceback={}".format(e, tb))
def cronus_run_command(self, command=None, minutes=2):
# callers should assure its not too early in system life to call
# we need a system object, OpTestConfiguration.py env_ready cronus_ready
assert 0 < minutes <= 120, (
"cronus_run_command minutes='{}' is out of the desired range of 1-120"
.format(minutes))
log.debug("env_ready={} cronus_ready={}"
.format(self.conf.cronus.env_ready, self.conf.cronus.cronus_ready))
if not self.conf.cronus.env_ready or not self.conf.cronus.cronus_ready:
log.debug("Cronus not ready, calling setup")
self.conf.cronus.setup()
if not self.conf.cronus.env_ready or not self.conf.cronus.cronus_ready:
log.warning("We tried to setup Cronus, either Cronus is not installed"
" on your op-test box or target system is NOT supported yet"
" (only OpenBMC so far), "
"or some other system problem, checking further")
if self.conf.cronus.cv_SYSTEM is not None:
cronus_state = self.conf.cronus.cv_SYSTEM.get_state()
log.warning("cronus_state={} capable={}"
.format(cronus_state, self.conf.cronus.capable))
raise UnexpectedCase(state=cronus_state,
message="We tried to setup Cronus and something is "
"not working, check the debug log")
else:
log.warning("We do not have a system object yet, it "
"may be too early to call cronus_run_command")
raise UnexpectedCase(message="We do not have a system "
"object yet, it may be too early to call cronus_run_command")
if not command:
log.warning("cronus_run_command requires a command to run")
raise ParameterCheck(message="cronus_run_command requires a command to run")
self.conf.cronus.dump_env()
log.debug("cronus_run_command='{}' target='{}'"
.format(command, self.conf.cronus.current_target))
stdout_value = self.cronus_subcommand(command=command, minutes=minutes)
return stdout_value
class Server(object):
'''
Generic Server Requests Session Object to abstract retry and error
handling logic. There are two common uses of the requests
session object:
1 - Single Request with no retry. Create the Server instance with
minutes set to None. This will flag the calls to cycle once and
return non-OK requests back to the caller for handling.
Special case is the login needed, that case will be caught and
login attempted and original request retried.
2 - Request calls which need to be tolerant of communication
glitches and possible server disconnections. Caller must create
the Server instance with minutes set to a value. If the caller
wants to modify the minutes it must be done on a call by call
basis (create the Server instance with a default minutes value
and if longer time needed make the change on the specific call).
Login is done for the caller, so no need to call login, just
make the GET/PUT/POST/DELETE call.
'''
def __init__(self, url=None,
base_url=None,
proxy=None,
username=None,
password=None,
verify=False,
minutes=3,
timeout=30):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
OpTestLogger.optest_logger_glob.setUpChildLogger("urllib3")
self.username = username
self.password = password
self.session = requests.Session()
if self.username is not None and self.password is not None:
self.session.auth = (self.username, self.password)
self.session.verify = verify
self.jsonHeader = {'Content-Type' : 'application/json'}
self.xAuthHeader = {}
self.timeout = timeout
self.minutes = minutes
self.session.mount('https://', HTTPAdapter(max_retries=5))
# value.max_retries for future debug if needed
# for key, value in self.session.adapters.items():
# log.debug("max_retries={}".format(value.max_retries))
if proxy:
self.session.proxies = {"http" : proxy}
else:
self.session.proxies = {}
self.base_url = url + (base_url if base_url else "")
def _url(self, suffix):
return ''.join([self.base_url, suffix])
def login(self, username=None, password=None):
if username is None:
username = self.username
if password is None:
password = self.password
uri = "/login"
payload = {"data": [username, password]}
# make direct call to requests post, by-pass loop_it
try:
r = self.session.post(self._url(uri),
headers=self.jsonHeader,
json=payload)
if r.status_code != requests.codes.ok:
log.debug("Requests post problem with logging "
"in, r.status_code={} r.text={} r.headers={} "
"r.request.headers={}"
.format(r.status_code, r.text,
r.headers, r.request.headers))
raise HTTPCheck(message="Requests post problem logging in,"
" check that your credentials are properly setup,"
" r.status_code={} r.text={} r.headers={} "
" r.request.headers={} username={} password={}"
.format(r.status_code, r.text, r.headers,
r.request.headers, username, password))
cookie = r.headers['Set-Cookie']
match = re.search('SESSION=(\w+);', cookie)
if match:
self.xAuthHeader['X-Auth-Token'] = match.group(1)
self.jsonHeader.update(self.xAuthHeader)
json_data = json.loads(r.text)
log.debug("r.status_code={} json_data['status']={}"
" r.text={} r.headers={} r.request.headers={}"
.format(r.status_code, json_data['status'],
r.text, r.headers, r.request.headers))
if (json_data['status'] != "ok"):
log.debug("Requests COOKIE post problem logging in,"
" check that your credentials are properly setup,"
" r.status_code={} r.text={} r.headers={} "
" r.request.headers={} username={} password={}"
.format(r.status_code, r.text, r.headers,
r.request.headers, username, password))
raise HTTPCheck(message="Requests COOKIE post problem logging in,"
" check that your credentials are properly setup,"
" r.status_code={} r.text={} r.headers={} "
" r.request.headers={} username={} password={}"
.format(r.status_code, r.text, r.headers,
r.request.headers, username, password))
except Exception as e:
log.debug("Requests post problem, check that your "
"credentials are properly setup URL={} username={} "
"password={}, Exception={}"
.format(self._url(uri), username, password, e))
raise HTTPCheck(message="Requests post problem, check that your "
"credentials are properly setup URL={} username={} "
"password={}, Exception={}"
.format(self._url(uri), username, password, e))
return r
def logout(self, uri=None):
uri = "/logout"
payload = {"data" : []}
try:
# make direct call to requests post, by-pass loop_it
r = self.session.post(self._url(uri), json=payload)
if r.status_code != requests.codes.ok:
log.debug("Requests post problem with logging "
"out, r.status_code={} r.text={} r.headers={} "
"r.request.headers={}"
.format(r.status_code, r.text,
r.headers, r.request.headers))
return r
except Exception as e:
log.debug("Requests post problem logging out"
" URL={} Exception={}".format(self._url(uri), e))
def get(self, **kwargs):
kwargs['cmd'] = 'get'
r = self.loop_it(**kwargs)
return r
def put(self, **kwargs):
kwargs['cmd'] = 'put'
r = self.loop_it(**kwargs)
return r
def post(self, **kwargs):
kwargs['cmd'] = 'post'
r = self.loop_it(**kwargs)
return r
def delete(self, **kwargs):
kwargs['cmd'] = 'delete'
r = self.loop_it(**kwargs)
return r
def loop_it(self, **kwargs):
default_vals = {'cmd' : None, 'uri' : None, 'data' : None,
'json' : None, 'params' : None, 'minutes' : None,
'files' : None, 'stream' : False,
'verify' : False, 'headers' : None}
for key in default_vals:
if key not in kwargs.keys():
kwargs[key] = default_vals[key]
command_dict = { 'get' : self.session.get,
'put' : self.session.put,
'post' : self.session.post,
'delete' : self.session.delete,
}
if kwargs['minutes'] is not None:
loop_time = time.time() + 60*kwargs['minutes']
else:
loop_time = time.time() + 60*5 # enough time to cycle
while True:
if time.time() > loop_time:
raise HTTPCheck(message="HTTP \"{}\" problem, we timed out "
"trying URL={} PARAMS={} DATA={} JSON={} Files={}, we "
"waited {} minutes, check the debug log for more details"
.format(kwargs['cmd'], self._url(kwargs['uri']),
kwargs['params'], kwargs['data'], kwargs['json'],
kwargs['files'], kwargs['minutes']))
try:
r = command_dict[kwargs['cmd']](self._url(kwargs['uri']),
params=kwargs['params'],
data=kwargs['data'],
json=kwargs['json'],
files=kwargs['files'],
stream=kwargs['stream'],
verify=kwargs['verify'],
headers=kwargs['headers'],
timeout=self.timeout)
except Exception as e:
# caller did not want any retry so give them the exception
log.debug("loop_it Exception={}".format(e))
if kwargs['minutes'] is None:
raise e
time.sleep(5)
continue
if r.status_code == requests.codes.unauthorized: # 401
try:
log.debug("loop_it unauthorized, trying to login")
self.login()
continue
except Exception as e:
log.debug("Unauthorized login failed, Exception={}".format(e))
if kwargs['minutes'] is None:
# caller did not want retry so give them the exception
raise e
time.sleep(5)
continue
if r.status_code == requests.codes.ok:
log.debug("OpTestSystem HTTP r={} r.status_code={} r.text={}"
.format(r, r.status_code, r.text))
return r
else:
if kwargs['minutes'] is None:
# caller did not want any retry so give them what we have
log.debug("OpTestSystem HTTP (no retry) r={} r.status_code={} r.text={}"
.format(r, r.status_code, r.text))
return r
time.sleep(5)
def close(self):
self.session.close()
|
apache-2.0
|
mbedmicro/mbed
|
drivers/tests/TESTS/host_tests/usb_device_hid.py
|
13
|
24283
|
"""
mbed SDK
Copyright (c) 2019 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import functools
import time
import threading
import uuid
import sys
import mbed_host_tests
import usb.core
from usb.util import (
CTRL_IN,
CTRL_OUT,
CTRL_TYPE_STANDARD,
CTRL_TYPE_CLASS,
CTRL_RECIPIENT_DEVICE,
CTRL_RECIPIENT_INTERFACE,
DESC_TYPE_CONFIG,
build_request_type)
if sys.platform.startswith('win'):
# Use libusb0 on Windows. libusb1 implementation for Windows
# does not support all features necessary for testing.
import usb.backend.libusb0
USB_BACKEND = usb.backend.libusb0.get_backend()
else:
# Use a default backend on other platforms.
USB_BACKEND = None
try:
import hid
except ImportError:
CYTHON_HIDAPI_PRESENT = False
else:
CYTHON_HIDAPI_PRESENT = True
# USB device -- device classes
USB_CLASS_HID = 0x03
# USB device -- standard requests
USB_REQUEST_GET_DESCRIPTOR = 0x06
# USB device -- HID class requests
HID_REQUEST_GET_REPORT = 0x01
HID_REQUEST_SET_REPORT = 0x09
HID_REQUEST_GET_IDLE = 0x02
HID_REQUEST_SET_IDLE = 0x0A
HID_REQUEST_GET_PROTOCOL = 0x03
HID_REQUEST_SET_PROTOCOL = 0x0B
# USB device -- HID class descriptors
DESC_TYPE_HID_HID = 0x21
DESC_TYPE_HID_REPORT = 0x22
DESC_TYPE_HID_PHYSICAL = 0x23
# USB device -- HID class descriptor lengths
DESC_LEN_HID_HID = 0x09
# USB device -- descriptor fields offsets
DESC_OFFSET_BLENGTH = 0
DESC_OFFSET_BDESCRIPTORTYPE = 1
# USB device -- HID subclasses
HID_SUBCLASS_NONE = 0
HID_SUBCLASS_BOOT = 1
# USB device -- HID protocols
HID_PROTOCOL_NONE = 0
HID_PROTOCOL_KEYBOARD = 1
HID_PROTOCOL_MOUSE = 2
# Greentea message keys used for callbacks
MSG_KEY_DEVICE_READY = 'dev_ready'
MSG_KEY_HOST_READY = 'host_ready'
MSG_KEY_SERIAL_NUMBER = 'usb_dev_sn'
MSG_KEY_TEST_GET_DESCRIPTOR_HID = 'test_get_desc_hid'
MSG_KEY_TEST_GET_DESCRIPTOR_CFG = 'test_get_desc_cfg'
MSG_KEY_TEST_REQUESTS = 'test_requests'
MSG_KEY_TEST_RAW_IO = 'test_raw_io'
# Greentea message keys used to notify DUT of test status
MSG_KEY_TEST_CASE_FAILED = 'fail'
MSG_KEY_TEST_CASE_PASSED = 'pass'
MSG_VALUE_DUMMY = '0'
MSG_VALUE_NOT_SUPPORTED = 'not_supported'
# Constants for the tests.
KEYBOARD_IDLE_RATE_TO_SET = 0x00 # Duration = 0 (indefinite)
HID_PROTOCOL_TO_SET = 0x01 # Protocol = 1 (Report Protocol)
RAW_IO_REPS = 16 # Number of loopback test reps.
def build_get_desc_value(desc_type, desc_index):
"""Build and return a wValue field for control requests."""
return (desc_type << 8) | desc_index
def usb_hid_path(serial_number):
"""Get a USB HID device system path based on the serial number."""
if not CYTHON_HIDAPI_PRESENT:
return None
for device_info in hid.enumerate(): # pylint: disable=no-member
if device_info.get('serial_number') == serial_number: # pylint: disable=not-callable
return device_info['path']
return None
def get_descriptor_types(desc):
"""Return a list of all bDescriptorType values found in desc.
desc is expected to be a sequence of bytes, i.e. array.array('B')
returned from usb.core.
From the USB 2.0 spec, paragraph 9.5:
Each descriptor begins with a byte-wide field that contains the total
number of bytes in the descriptor followed by a byte-wide field that
identifies the descriptor type.
"""
tmp_desc = desc[DESC_OFFSET_BLENGTH:]
desc_types = []
while True:
try:
bLength = tmp_desc[DESC_OFFSET_BLENGTH] # pylint: disable=invalid-name
bDescriptorType = tmp_desc[DESC_OFFSET_BDESCRIPTORTYPE] # pylint: disable=invalid-name
desc_types.append(int(bDescriptorType))
tmp_desc = tmp_desc[int(bLength):]
except IndexError:
break
return desc_types
def get_hid_descriptor_parts(hid_descriptor):
"""Return bNumDescriptors, bDescriptorType, wDescriptorLength from hid_descriptor."""
err_msg = 'Invalid HID class descriptor'
try:
if hid_descriptor[1] != DESC_TYPE_HID_HID:
raise TypeError(err_msg)
bNumDescriptors = int(hid_descriptor[5]) # pylint: disable=invalid-name
bDescriptorType = int(hid_descriptor[6]) # pylint: disable=invalid-name
wDescriptorLength = int((hid_descriptor[8] << 8) | hid_descriptor[7]) # pylint: disable=invalid-name
except (IndexError, ValueError):
raise TypeError(err_msg)
return bNumDescriptors, bDescriptorType, wDescriptorLength
def get_usbhid_dev_type(intf):
"""Return a name of the HID device class type for intf."""
if not isinstance(intf, usb.core.Interface):
return None
if intf.bInterfaceClass != USB_CLASS_HID:
# USB Device Class Definition for HID, v1.11, paragraphs 4.1, 4.2 & 4.3:
# the class is specified in the Interface descriptor
# and not the Device descriptor.
return None
if (intf.bInterfaceSubClass == HID_SUBCLASS_BOOT
and intf.bInterfaceProtocol == HID_PROTOCOL_KEYBOARD):
return 'boot_keyboard'
if (intf.bInterfaceSubClass == HID_SUBCLASS_BOOT
and intf.bInterfaceProtocol == HID_PROTOCOL_MOUSE):
return 'boot_mouse'
# Determining any other HID dev type, like a non-boot_keyboard or
# a non-boot_mouse requires getting and parsing a HID Report descriptor
# for intf.
# Only the boot_keyboard, boot_mouse and other_device are used for this
# greentea test suite.
return 'other_device'
class RetryError(Exception):
"""Exception raised by retry_fun_call()."""
def retry_fun_call(fun, num_retries=3, retry_delay=0.0):
"""Call fun and retry if any exception was raised.
fun is called at most num_retries with a retry_dalay in between calls.
Raises RetryError if the retry limit is exhausted.
"""
verbose = False
final_err = None
for retry in range(1, num_retries + 1):
try:
return fun() # pylint: disable=not-callable
except Exception as exc: # pylint: disable=broad-except
final_err = exc
if verbose:
print('Retry {}/{} failed ({})'
.format(retry, num_retries, str(fun)))
time.sleep(retry_delay)
err_msg = 'Failed with "{}". Tried {} times.'
raise RetryError(err_msg.format(final_err, num_retries))
def raise_if_different(expected, actual, text=''):
"""Raise a RuntimeError if actual is different than expected."""
if expected != actual:
raise RuntimeError('{}Got {!r}, expected {!r}.'.format(text, actual, expected))
def raise_if_false(expression, text):
"""Raise a RuntimeError if expression is False."""
if not expression:
raise RuntimeError(text)
class USBHIDTest(mbed_host_tests.BaseHostTest):
"""Host side test for USB device HID class."""
@staticmethod
def get_usb_hid_path(usb_id_str):
"""Get a USB HID device path as registered in the system.
Search is based on the unique USB SN generated by the host
during test suite setup.
Raises RuntimeError if the device is not found.
"""
hid_path = usb_hid_path(usb_id_str)
if hid_path is None:
err_msg = 'USB HID device (SN={}) not found.'
raise RuntimeError(err_msg.format(usb_id_str))
return hid_path
@staticmethod
def get_usb_dev(usb_id_str):
"""Get a usb.core.Device instance.
Search is based on the unique USB SN generated by the host
during test suite setup.
Raises RuntimeError if the device is not found.
"""
usb_dev = usb.core.find(custom_match=lambda d: d.serial_number == usb_id_str, backend=USB_BACKEND)
if usb_dev is None:
err_msg = 'USB device (SN={}) not found.'
raise RuntimeError(err_msg.format(usb_id_str))
return usb_dev
def __init__(self):
super(USBHIDTest, self).__init__()
self.__bg_task = None
self.dut_usb_dev_sn = uuid.uuid4().hex # 32 hex digit string
def notify_error(self, msg):
"""Terminate the test with an error msg."""
self.log('TEST ERROR: {}'.format(msg))
self.notify_complete(None)
def notify_failure(self, msg):
"""Report a host side test failure to the DUT."""
self.log('TEST FAILED: {}'.format(msg))
self.send_kv(MSG_KEY_TEST_CASE_FAILED, MSG_VALUE_DUMMY)
def notify_success(self, value=None, msg=''):
"""Report a host side test success to the DUT."""
if msg:
self.log('TEST PASSED: {}'.format(msg))
if value is None:
value = MSG_VALUE_DUMMY
self.send_kv(MSG_KEY_TEST_CASE_PASSED, value)
def cb_test_get_hid_desc(self, key, value, timestamp):
"""Verify the device handles Get_Descriptor request correctly.
Two requests are tested for every HID interface:
1. Get_Descriptor(HID),
2. Get_Descriptor(Report).
Details in USB Device Class Definition for HID, v1.11, paragraph 7.1.
"""
kwargs_hid_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero for
# HID class descriptors other than Physical ones.
'wValue': build_get_desc_value(DESC_TYPE_HID_HID, 0x00),
# wIndex is replaced with the Interface Number in the loop.
'wIndex': None,
'data_or_wLength': DESC_LEN_HID_HID}
kwargs_report_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero for
# HID class descriptors other than Physical ones.
'wValue': build_get_desc_value(DESC_TYPE_HID_REPORT, 0x00),
# wIndex is replaced with the Interface Number in the loop.
'wIndex': None,
# wLength is replaced with the Report Descriptor Length in the loop.
'data_or_wLength': None}
mbed_hid_dev = None
report_desc_lengths = []
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
try:
for intf in mbed_hid_dev.get_active_configuration(): # pylint: disable=not-callable
if intf.bInterfaceClass != USB_CLASS_HID:
continue
try:
if mbed_hid_dev.is_kernel_driver_active(intf.bInterfaceNumber):
mbed_hid_dev.detach_kernel_driver(intf.bInterfaceNumber) # pylint: disable=not-callable
except (NotImplementedError, AttributeError):
pass
# Request the HID descriptor.
kwargs_hid_desc_req['wIndex'] = intf.bInterfaceNumber
hid_desc = mbed_hid_dev.ctrl_transfer(**kwargs_hid_desc_req) # pylint: disable=not-callable
try:
bNumDescriptors, bDescriptorType, wDescriptorLength = get_hid_descriptor_parts(hid_desc) # pylint: disable=invalid-name
except TypeError as exc:
self.notify_error(exc)
return
raise_if_different(1, bNumDescriptors, 'Exactly one HID Report descriptor expected. ')
raise_if_different(DESC_TYPE_HID_REPORT, bDescriptorType, 'Invalid HID class descriptor type. ')
raise_if_false(wDescriptorLength > 0, 'Invalid HID Report descriptor length. ')
# Request the Report descriptor.
kwargs_report_desc_req['wIndex'] = intf.bInterfaceNumber
kwargs_report_desc_req['data_or_wLength'] = wDescriptorLength
report_desc = mbed_hid_dev.ctrl_transfer(**kwargs_report_desc_req) # pylint: disable=not-callable
raise_if_different(wDescriptorLength, len(report_desc),
'The size of data received does not match the HID Report descriptor length. ')
report_desc_lengths.append(len(report_desc))
except usb.core.USBError as exc:
self.notify_failure('Get_Descriptor request failed. {}'.format(exc))
except RuntimeError as exc:
self.notify_failure(exc)
else:
# Send the report desc len to the device.
# USBHID::report_desc_length() returns uint16_t
msg_value = '{0:04x}'.format(max(report_desc_lengths))
self.notify_success(msg_value)
def cb_test_get_cfg_desc(self, key, value, timestamp):
"""Verify the device provides required HID descriptors.
USB Device Class Definition for HID, v1.11, paragraph 7.1:
When a Get_Descriptor(Configuration) request is issued, it
returns (...), and the HID descriptor for each interface.
"""
kwargs_cfg_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_DEVICE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero.
'wValue': build_get_desc_value(DESC_TYPE_CONFIG, 0x00),
# wIndex is reset to zero.
'wIndex': 0x00,
# wLength unknown, set to 1024.
'data_or_wLength': 1024}
mbed_hid_dev = None
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
try:
# Request the Configuration descriptor.
cfg_desc = mbed_hid_dev.ctrl_transfer(**kwargs_cfg_desc_req) # pylint: disable=not-callable
raise_if_false(DESC_TYPE_HID_HID in get_descriptor_types(cfg_desc),
'No HID class descriptor in the Configuration descriptor.')
except usb.core.USBError as exc:
self.notify_failure('Get_Descriptor request failed. {}'.format(exc))
except RuntimeError as exc:
self.notify_failure(exc)
else:
self.notify_success()
def cb_test_class_requests(self, key, value, timestamp):
"""Verify all required HID requests are supported.
USB Device Class Definition for HID, v1.11, Appendix G:
1. Get_Report -- required for all types,
2. Set_Report -- not required if dev doesn't declare an Output Report,
3. Get_Idle -- required for keyboards,
4. Set_Idle -- required for keyboards,
5. Get_Protocol -- required for boot_keyboard and boot_mouse,
6. Set_Protocol -- required for boot_keyboard and boot_mouse.
Details in USB Device Class Definition for HID, v1.11, paragraph 7.2.
"""
kwargs_get_report_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_REPORT,
# wValue: ReportType = Input, ReportID = 0 (not used)
'wValue': (0x01 << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
# wLength: unknown, set to 1024
'data_or_wLength': 1024}
kwargs_get_idle_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_IDLE,
# wValue: 0, ReportID = 0 (not used)
'wValue': (0x00 << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 1}
kwargs_set_idle_request = {
'bmRequestType': build_request_type(
CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_SET_IDLE,
# wValue: Duration, ReportID = 0 (all input reports)
'wValue': (KEYBOARD_IDLE_RATE_TO_SET << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 0}
kwargs_get_protocol_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_PROTOCOL,
'wValue': 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 1}
kwargs_set_protocol_request = {
'bmRequestType': build_request_type(
CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_SET_PROTOCOL,
'wValue': HID_PROTOCOL_TO_SET,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 0}
mbed_hid_dev = None
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
hid_dev_type = None
tested_request_name = None
try:
for intf in mbed_hid_dev.get_active_configuration(): # pylint: disable=not-callable
hid_dev_type = get_usbhid_dev_type(intf)
if hid_dev_type is None:
continue
try:
if mbed_hid_dev.is_kernel_driver_active(intf.bInterfaceNumber):
mbed_hid_dev.detach_kernel_driver(intf.bInterfaceNumber) # pylint: disable=not-callable
except (NotImplementedError, AttributeError):
pass
if hid_dev_type == 'boot_keyboard':
# 4. Set_Idle
tested_request_name = 'Set_Idle'
kwargs_set_idle_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_set_idle_request) # pylint: disable=not-callable
# 3. Get_Idle
tested_request_name = 'Get_Idle'
kwargs_get_idle_request['wIndex'] = intf.bInterfaceNumber
idle_rate = mbed_hid_dev.ctrl_transfer(**kwargs_get_idle_request) # pylint: disable=not-callable
raise_if_different(KEYBOARD_IDLE_RATE_TO_SET, idle_rate, 'Invalid idle rate received. ')
if hid_dev_type in ('boot_keyboard', 'boot_mouse'):
# 6. Set_Protocol
tested_request_name = 'Set_Protocol'
kwargs_set_protocol_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_set_protocol_request) # pylint: disable=not-callable
# 5. Get_Protocol
tested_request_name = 'Get_Protocol'
kwargs_get_protocol_request['wIndex'] = intf.bInterfaceNumber
protocol = mbed_hid_dev.ctrl_transfer(**kwargs_get_protocol_request) # pylint: disable=not-callable
raise_if_different(HID_PROTOCOL_TO_SET, protocol, 'Invalid protocol received. ')
# 1. Get_Report
tested_request_name = 'Get_Report'
kwargs_get_report_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_get_report_request) # pylint: disable=not-callable
except usb.core.USBError as exc:
self.notify_failure('The {!r} does not support the {!r} HID class request ({}).'
.format(hid_dev_type, tested_request_name, exc))
except RuntimeError as exc:
self.notify_failure('Set/Get data mismatch for {!r} for the {!r} HID class request ({}).'
.format(hid_dev_type, tested_request_name, exc))
else:
self.notify_success()
def raw_loopback(self, report_size):
"""Send every input report back to the device."""
mbed_hid_path = None
mbed_hid = hid.device()
try:
mbed_hid_path = retry_fun_call(
fun=functools.partial(self.get_usb_hid_path, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
retry_fun_call(
fun=functools.partial(mbed_hid.open_path, mbed_hid_path), # pylint: disable=not-callable
num_retries=10,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
# Notify the device it can send reports now.
self.send_kv(MSG_KEY_HOST_READY, MSG_VALUE_DUMMY)
try:
for _ in range(RAW_IO_REPS):
# There are no Report ID tags in the Report descriptor.
# Receiving only the Report Data, Report ID is omitted.
report_in = mbed_hid.read(report_size)
report_out = report_in[:]
# Set the Report ID to 0x00 (not used).
report_out.insert(0, 0x00)
mbed_hid.write(report_out)
except (ValueError, IOError) as exc:
self.notify_failure('HID Report transfer failed. {}'.format(exc))
finally:
mbed_hid.close()
def setup(self):
self.register_callback(MSG_KEY_DEVICE_READY, self.cb_device_ready)
self.register_callback(MSG_KEY_TEST_GET_DESCRIPTOR_HID, self.cb_test_get_hid_desc)
self.register_callback(MSG_KEY_TEST_GET_DESCRIPTOR_CFG, self.cb_test_get_cfg_desc)
self.register_callback(MSG_KEY_TEST_REQUESTS, self.cb_test_class_requests)
self.register_callback(MSG_KEY_TEST_RAW_IO, self.cb_test_raw_io)
def cb_device_ready(self, key, value, timestamp):
"""Send a unique USB SN to the device.
DUT uses this SN every time it connects to host as a USB device.
"""
self.send_kv(MSG_KEY_SERIAL_NUMBER, self.dut_usb_dev_sn)
def start_bg_task(self, **thread_kwargs):
"""Start a new daemon thread.
Some callbacks delegate HID dev handling to a background task to
prevent any delays in the device side assert handling. Only one
background task is kept running to prevent multiple access
to the HID device.
"""
try:
self.__bg_task.join()
except (AttributeError, RuntimeError):
pass
self.__bg_task = threading.Thread(**thread_kwargs)
self.__bg_task.daemon = True
self.__bg_task.start()
def cb_test_raw_io(self, key, value, timestamp):
"""Receive HID reports and send them back to the device."""
if not CYTHON_HIDAPI_PRESENT:
self.send_kv(MSG_KEY_HOST_READY, MSG_VALUE_NOT_SUPPORTED)
return
try:
# The size of input and output reports used in test.
report_size = int(value)
except ValueError as exc:
self.notify_error(exc)
return
self.start_bg_task(
target=self.raw_loopback,
args=(report_size, ))
|
apache-2.0
|
XiaodunServerGroup/ddyedx
|
lms/djangoapps/bulk_email/tasks.py
|
12
|
33434
|
"""
This module contains celery task functions for handling the sending of bulk email
to a course.
"""
import re
import random
import json
from time import sleep
from dogapi import dog_stats_api
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPException
from boto.ses.exceptions import (
SESAddressNotVerifiedError,
SESIdentityNotVerifiedError,
SESDomainNotConfirmedError,
SESAddressBlacklistedError,
SESDailyQuotaExceededError,
SESMaxSendingRateExceededError,
SESDomainEndsWithDotError,
SESLocalAddressCharacterError,
SESIllegalAddressError,
)
from boto.exception import AWSConnectionError
from celery import task, current_task
from celery.utils.log import get_task_logger
from celery.states import SUCCESS, FAILURE, RETRY
from celery.exceptions import RetryTaskError
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMultiAlternatives, get_connection
from django.core.urlresolvers import reverse
from bulk_email.models import (
CourseEmail, Optout, CourseEmailTemplate,
SEND_TO_MYSELF, SEND_TO_ALL, TO_OPTIONS,
)
from courseware.courses import get_course, course_image_url
from student.roles import CourseStaffRole, CourseInstructorRole
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
SubtaskStatus,
queue_subtasks_for_query,
check_subtask_is_valid,
update_subtask_status,
)
from xmodule.modulestore import Location
log = get_task_logger(__name__)
# Errors that an individual email is failing to be sent, and should just
# be treated as a fail.
SINGLE_EMAIL_FAILURE_ERRORS = (
SESAddressBlacklistedError, # Recipient's email address has been temporarily blacklisted.
SESDomainEndsWithDotError, # Recipient's email address' domain ends with a period/dot.
SESIllegalAddressError, # Raised when an illegal address is encountered.
SESLocalAddressCharacterError, # An address contained a control or whitespace character.
)
# Exceptions that, if caught, should cause the task to be re-tried.
# These errors will be caught a limited number of times before the task fails.
LIMITED_RETRY_ERRORS = (
SMTPConnectError,
SMTPServerDisconnected,
AWSConnectionError,
)
# Errors that indicate that a mailing task should be retried without limit.
# An example is if email is being sent too quickly, but may succeed if sent
# more slowly. When caught by a task, it triggers an exponential backoff and retry.
# Retries happen continuously until the email is sent.
# Note that the SMTPDataErrors here are only those within the 4xx range.
# Those not in this range (i.e. in the 5xx range) are treated as hard failures
# and thus like SINGLE_EMAIL_FAILURE_ERRORS.
INFINITE_RETRY_ERRORS = (
SESMaxSendingRateExceededError, # Your account's requests/second limit has been exceeded.
SMTPDataError,
)
# Errors that are known to indicate an inability to send any more emails,
# and should therefore not be retried. For example, exceeding a quota for emails.
# Also, any SMTP errors that are not explicitly enumerated above.
BULK_EMAIL_FAILURE_ERRORS = (
SESAddressNotVerifiedError, # Raised when a "Reply-To" address has not been validated in SES yet.
SESIdentityNotVerifiedError, # Raised when an identity has not been verified in SES yet.
SESDomainNotConfirmedError, # Raised when domain ownership is not confirmed for DKIM.
SESDailyQuotaExceededError, # 24-hour allotment of outbound email has been exceeded.
SMTPException,
)
def _get_recipient_queryset(user_id, to_option, course_id, course_location):
"""
Returns a query set of email recipients corresponding to the requested to_option category.
`to_option` is either SEND_TO_MYSELF, SEND_TO_STAFF, or SEND_TO_ALL.
Recipients who are in more than one category (e.g. enrolled in the course and are staff or self)
will be properly deduped.
"""
if to_option not in TO_OPTIONS:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
if to_option == SEND_TO_MYSELF:
recipient_qset = User.objects.filter(id=user_id)
else:
staff_qset = CourseStaffRole(course_location).users_with_role()
instructor_qset = CourseInstructorRole(course_location).users_with_role()
recipient_qset = staff_qset | instructor_qset
if to_option == SEND_TO_ALL:
# We also require students to have activated their accounts to
# provide verification that the provided email address is valid.
enrollment_qset = User.objects.filter(
is_active=True,
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
recipient_qset = recipient_qset | enrollment_qset
recipient_qset = recipient_qset.distinct()
recipient_qset = recipient_qset.order_by('pk')
return recipient_qset
def _get_course_email_context(course):
"""
Returns context arguments to apply to all emails, independent of recipient.
"""
course_id = course.id
course_title = course.display_name
course_url = 'https://{}{}'.format(
settings.SITE_NAME,
reverse('course_root', kwargs={'course_id': course_id})
)
image_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))
email_context = {
'course_title': course_title,
'course_url': course_url,
'course_image_url': image_url,
'account_settings_url': 'https://{}{}'.format(settings.SITE_NAME, reverse('dashboard')),
'platform_name': settings.PLATFORM_NAME,
}
return email_context
def perform_delegate_email_batches(entry_id, course_id, task_input, action_name):
"""
Delegates emails by querying for the list of recipients who should
get the mail, chopping up into batches of no more than settings.BULK_EMAIL_EMAILS_PER_TASK
in size, and queueing up worker jobs.
"""
entry = InstructorTask.objects.get(pk=entry_id)
# Get inputs to use in this task from the entry.
user_id = entry.requester.id
task_id = entry.task_id
# Perfunctory check, since expansion is made for convenience of other task
# code that doesn't need the entry_id.
if course_id != entry.course_id:
format_msg = u"Course id conflict: explicit value {} does not match task value {}"
log.warning("Task %s: %s", task_id, format_msg.format(course_id, entry.course_id))
raise ValueError("Course id conflict: explicit value does not match task value")
# Fetch the CourseEmail.
email_id = task_input['email_id']
try:
email_obj = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist:
# The CourseEmail object should be committed in the view function before the task
# is submitted and reaches this point.
log.warning("Task %s: Failed to get CourseEmail with id %s", task_id, email_id)
raise
# Check to see if email batches have already been defined. This seems to
# happen sometimes when there is a loss of connection while a task is being
# queued. When this happens, the same task gets called again, and a whole
# new raft of subtasks gets queued up. We will assume that if subtasks
# have already been defined, there is no need to redefine them below.
# So we just return right away. We don't raise an exception, because we want
# the current task to be marked with whatever it had been marked with before.
if len(entry.subtasks) > 0 and len(entry.task_output) > 0:
log.warning("Task %s has already been processed for email %s! InstructorTask = %s", task_id, email_id, entry)
progress = json.loads(entry.task_output)
return progress
# Sanity check that course for email_obj matches that of the task referencing it.
if course_id != email_obj.course_id:
format_msg = u"Course id conflict: explicit value {} does not match email value {}"
log.warning("Task %s: %s", task_id, format_msg.format(course_id, entry.course_id))
raise ValueError("Course id conflict: explicit value does not match email value")
# Fetch the course object.
try:
course = get_course(course_id)
except ValueError:
log.exception("Task %s: course not found: %s", task_id, course_id)
raise
# Get arguments that will be passed to every subtask.
to_option = email_obj.to_option
global_email_context = _get_course_email_context(course)
def _create_send_email_subtask(to_list, initial_subtask_status):
"""Creates a subtask to send email to a given recipient list."""
subtask_id = initial_subtask_status.task_id
new_subtask = send_course_email.subtask(
(
entry_id,
email_id,
to_list,
global_email_context,
initial_subtask_status.to_dict(),
),
task_id=subtask_id,
routing_key=settings.BULK_EMAIL_ROUTING_KEY,
)
return new_subtask
recipient_qset = _get_recipient_queryset(user_id, to_option, course_id, course.location)
recipient_fields = ['profile__name', 'email']
log.info(u"Task %s: Preparing to queue subtasks for sending emails for course %s, email %s, to_option %s",
task_id, course_id, email_id, to_option)
progress = queue_subtasks_for_query(
entry,
action_name,
_create_send_email_subtask,
recipient_qset,
recipient_fields,
settings.BULK_EMAIL_EMAILS_PER_QUERY,
settings.BULK_EMAIL_EMAILS_PER_TASK
)
# We want to return progress here, as this is what will be stored in the
# AsyncResult for the parent task as its return value.
# The AsyncResult will then be marked as SUCCEEDED, and have this return value as its "result".
# That's okay, for the InstructorTask will have the "real" status, and monitoring code
# should be using that instead.
return progress
@task(default_retry_delay=settings.BULK_EMAIL_DEFAULT_RETRY_DELAY, max_retries=settings.BULK_EMAIL_MAX_RETRIES) # pylint: disable=E1102
def send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status_dict):
"""
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status_dict` : dict containing values representing current status. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
Most values will be zero on initial call, but may be different when the task is
invoked as part of a retry.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html. Updates InstructorTask object
with status information (sends, failures, skips) and updates number of subtasks completed.
"""
subtask_status = SubtaskStatus.from_dict(subtask_status_dict)
current_task_id = subtask_status.task_id
num_to_send = len(to_list)
log.info("Preparing to send email %s to %d recipients as subtask %s for instructor task %d: context = %s, status=%s",
email_id, num_to_send, current_task_id, entry_id, global_email_context, subtask_status)
# Check that the requested subtask is actually known to the current InstructorTask entry.
# If this fails, it throws an exception, which should fail this subtask immediately.
# This can happen when the parent task has been run twice, and results in duplicate
# subtasks being created for the same InstructorTask entry. This can happen when Celery
# loses its connection to its broker, and any current tasks get requeued.
# We hope to catch this condition in perform_delegate_email_batches() when it's the parent
# task that is resubmitted, but just in case we fail to do so there, we check here as well.
# There is also a possibility that this task will be run twice by Celery, for the same reason.
# To deal with that, we need to confirm that the task has not already been completed.
check_subtask_is_valid(entry_id, current_task_id, subtask_status)
send_exception = None
new_subtask_status = None
try:
course_title = global_email_context['course_title']
with dog_stats_api.timer('course_email.single_task.time.overall', tags=[_statsd_tag(course_title)]):
new_subtask_status, send_exception = _send_course_email(
entry_id,
email_id,
to_list,
global_email_context,
subtask_status,
)
except Exception:
# Unexpected exception. Try to write out the failure to the entry before failing.
log.exception("Send-email task %s for email %s: failed unexpectedly!", current_task_id, email_id)
# We got here for really unexpected reasons. Since we don't know how far
# the task got in emailing, we count all recipients as having failed.
# It at least keeps the counts consistent.
subtask_status.increment(failed=num_to_send, state=FAILURE)
update_subtask_status(entry_id, current_task_id, subtask_status)
raise
if send_exception is None:
# Update the InstructorTask object that is storing its progress.
log.info("Send-email task %s for email %s: succeeded", current_task_id, email_id)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
elif isinstance(send_exception, RetryTaskError):
# If retrying, a RetryTaskError needs to be returned to Celery.
# We assume that the the progress made before the retry condition
# was encountered has already been updated before the retry call was made,
# so we only log here.
log.warning("Send-email task %s for email %s: being retried", current_task_id, email_id)
raise send_exception # pylint: disable=E0702
else:
log.error("Send-email task %s for email %s: failed: %s", current_task_id, email_id, send_exception)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
raise send_exception # pylint: disable=E0702
# return status in a form that can be serialized by Celery into JSON:
log.info("Send-email task %s for email %s: returning status %s", current_task_id, email_id, new_subtask_status)
return new_subtask_status.to_dict()
def _filter_optouts_from_recipients(to_list, course_id):
"""
Filters a recipient list based on student opt-outs for a given course.
Returns the filtered recipient list, as well as the number of optouts
removed from the list.
"""
optouts = Optout.objects.filter(
course_id=course_id,
user__in=[i['pk'] for i in to_list]
).values_list('user__email', flat=True)
optouts = set(optouts)
# Only count the num_optout for the first time the optouts are calculated.
# We assume that the number will not change on retries, and so we don't need
# to calculate it each time.
num_optout = len(optouts)
to_list = [recipient for recipient in to_list if recipient['email'] not in optouts]
return to_list, num_optout
def _get_source_address(course_id, course_title):
"""
Calculates an email address to be used as the 'from-address' for sent emails.
Makes a unique from name and address for each course, e.g.
"COURSE_TITLE" Course Staff <coursenum-no-reply@courseupdates.edx.org>
"""
course_title_no_quotes = re.sub(r'"', '', course_title)
# The course_id is assumed to be in the form 'org/course_num/run',
# so pull out the course_num. Then make sure that it can be used
# in an email address, by substituting a '_' anywhere a non-(ascii, period, or dash)
# character appears.
course_num = Location.parse_course_id(course_id)['course']
invalid_chars = re.compile(r"[^\w.-]")
course_num = invalid_chars.sub('_', course_num)
from_addr = u'"{0}" Course Staff <{1}-{2}>'.format(course_title_no_quotes, course_num, settings.BULK_EMAIL_DEFAULT_FROM_EMAIL)
return from_addr
def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):
"""
Performs the email sending task.
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status` : object of class SubtaskStatus representing current status.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html.
Returns a tuple of two values:
* First value is a SubtaskStatus object which represents current progress at the end of this call.
* Second value is an exception returned by the innards of the method, indicating a fatal error.
In this case, the number of recipients that were not sent have already been added to the
'failed' count above.
"""
# Get information from current task's request:
task_id = subtask_status.task_id
try:
course_email = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist as exc:
log.exception("Task %s: could not find email id:%s to send.", task_id, email_id)
raise
# Exclude optouts (if not a retry):
# Note that we don't have to do the optout logic at all if this is a retry,
# because we have presumably already performed the optout logic on the first
# attempt. Anyone on the to_list on a retry has already passed the filter
# that existed at that time, and we don't need to keep checking for changes
# in the Optout list.
if subtask_status.get_retry_count() == 0:
to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)
subtask_status.increment(skipped=num_optout)
course_title = global_email_context['course_title']
subject = "[" + course_title + "] " + course_email.subject
from_addr = _get_source_address(course_email.course_id, course_title)
course_email_template = CourseEmailTemplate.get_template()
try:
connection = get_connection()
connection.open()
# Define context values to use in all course emails:
email_context = {'name': '', 'email': ''}
email_context.update(global_email_context)
while to_list:
# Update context with user-specific values from the user at the end of the list.
# At the end of processing this user, they will be popped off of the to_list.
# That way, the to_list will always contain the recipients remaining to be emailed.
# This is convenient for retries, which will need to send to those who haven't
# yet been emailed, but not send to those who have already been sent to.
current_recipient = to_list[-1]
email = current_recipient['email']
email_context['email'] = email
email_context['name'] = current_recipient['profile__name']
# Construct message content using templates and context:
plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)
html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)
# Create email:
email_msg = EmailMultiAlternatives(
subject,
plaintext_msg,
from_addr,
[email],
connection=connection
)
email_msg.attach_alternative(html_msg, 'text/html')
# Throttle if we have gotten the rate limiter. This is not very high-tech,
# but if a task has been retried for rate-limiting reasons, then we sleep
# for a period of time between all emails within this task. Choice of
# the value depends on the number of workers that might be sending email in
# parallel, and what the SES throttle rate is.
if subtask_status.retried_nomax > 0:
sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
try:
log.debug('Email with id %s to be sent to %s', email_id, email)
with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):
connection.send_messages([email_msg])
except SMTPDataError as exc:
# According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.
if exc.smtp_code >= 400 and exc.smtp_code < 500:
# This will cause the outer handler to catch the exception and retry the entire task.
raise exc
else:
# This will fall through and not retry the message.
log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
except SINGLE_EMAIL_FAILURE_ERRORS as exc:
# This will fall through and not retry the message.
log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
else:
dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])
if settings.BULK_EMAIL_LOG_SENT_EMAILS:
log.info('Email with id %s sent to %s', email_id, email)
else:
log.debug('Email with id %s sent to %s', email_id, email)
subtask_status.increment(succeeded=1)
# Pop the user that was emailed off the end of the list only once they have
# successfully been processed. (That way, if there were a failure that
# needed to be retried, the user is still on the list.)
to_list.pop()
except INFINITE_RETRY_ERRORS as exc:
dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_nomax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_nomax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True
)
except LIMITED_RETRY_ERRORS as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# Errors caught are those that indicate a temporary condition that might succeed on retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
except BULK_EMAIL_FAILURE_ERRORS as exc:
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
num_pending = len(to_list)
log.exception('Task %s: email with id %d caused send_course_email task to fail with "fatal" exception. %d emails unsent.',
task_id, email_id, num_pending)
# Update counters with progress to date, counting unsent emails as failures,
# and set the state to FAILURE:
subtask_status.increment(failed=num_pending, state=FAILURE)
return subtask_status, exc
except Exception as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# These are unexpected errors. Since they might be due to a temporary condition that might
# succeed on retry, we give them a retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',
task_id, email_id)
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
else:
# All went well. Update counters with progress to date,
# and set the state to SUCCESS:
subtask_status.increment(state=SUCCESS)
# Successful completion is marked by an exception value of None.
return subtask_status, None
finally:
# Clean up at the end.
connection.close()
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
def _submit_for_retry(entry_id, email_id, to_list, global_email_context, current_exception, subtask_status, skip_retry_max=False):
"""
Helper function to requeue a task for retry, using the new version of arguments provided.
Inputs are the same as for running a task, plus two extra indicating the state at the time of retry.
These include the `current_exception` that the task encountered that is causing the retry attempt,
and the `subtask_status` that is to be returned. A third extra argument `skip_retry_max`
indicates whether the current retry should be subject to a maximum test.
Returns a tuple of two values:
* First value is a dict which represents current progress. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
* Second value is an exception returned by the innards of the method. If the retry was
successfully submitted, this value will be the RetryTaskError that retry() returns.
Otherwise, it (ought to be) the current_exception passed in.
"""
task_id = subtask_status.task_id
log.info("Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)",
task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)
# Calculate time until we retry this task (in seconds):
# The value for max_retries is increased by the number of times an "infinite-retry" exception
# has been retried. We want the regular retries to trigger max-retry checking, but not these
# special retries. So we count them separately.
max_retries = _get_current_task().max_retries + subtask_status.retried_nomax
base_delay = _get_current_task().default_retry_delay
if skip_retry_max:
# once we reach five retries, don't increase the countdown further.
retry_index = min(subtask_status.retried_nomax, 5)
exception_type = 'sending-rate'
# if we have a cap, after all, apply it now:
if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):
retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax
max_retries = min(max_retries, retry_cap)
else:
retry_index = subtask_status.retried_withmax
exception_type = 'transient'
# Skew the new countdown value by a random factor, so that not all
# retries are deferred by the same amount.
countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)
log.warning('Task %s: email with id %d not delivered due to %s error %s, retrying send to %d recipients in %s seconds (with max_retry=%s)',
task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)
# we make sure that we update the InstructorTask with the current subtask status
# *before* actually calling retry(), to be sure that there is no race
# condition between this update and the update made by the retried task.
update_subtask_status(entry_id, task_id, subtask_status)
# Now attempt the retry. If it succeeds, it returns a RetryTaskError that
# needs to be returned back to Celery. If it fails, we return the existing
# exception.
try:
send_course_email.retry(
args=[
entry_id,
email_id,
to_list,
global_email_context,
subtask_status.to_dict(),
],
exc=current_exception,
countdown=countdown,
max_retries=max_retries,
throw=True,
)
except RetryTaskError as retry_error:
# If the retry call is successful, update with the current progress:
log.exception('Task %s: email with id %d caused send_course_email task to retry.',
task_id, email_id)
return subtask_status, retry_error
except Exception as retry_exc:
# If there are no more retries, because the maximum has been reached,
# we expect the original exception to be raised. We catch it here
# (and put it in retry_exc just in case it's different, but it shouldn't be),
# and update status as if it were any other failure. That means that
# the recipients still in the to_list are counted as failures.
log.exception('Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',
task_id, email_id, [i['email'] for i in to_list])
num_failed = len(to_list)
subtask_status.increment(subtask_status, failed=num_failed, state=FAILURE)
return subtask_status, retry_exc
def _statsd_tag(course_title):
"""
Calculate the tag we will use for DataDog.
"""
tag = u"course_email:{0}".format(course_title)
return tag[:200]
|
agpl-3.0
|
ashutrix03/inteygrate_flaskapp-master
|
google/protobuf/unittest_mset_wire_format_pb2.py
|
43
|
3837
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/unittest_mset_wire_format.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/unittest_mset_wire_format.proto',
package='proto2_wireformat_unittest',
syntax='proto2',
serialized_pb=_b('\n/google/protobuf/unittest_mset_wire_format.proto\x12\x1aproto2_wireformat_unittest\"\x1e\n\x0eTestMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"d\n!TestMessageSetWireFormatContainer\x12?\n\x0bmessage_set\x18\x01 \x01(\x0b\x32*.proto2_wireformat_unittest.TestMessageSetB)H\x01\xf8\x01\x01\xaa\x02!Google.ProtocolBuffers.TestProtos')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TESTMESSAGESET = _descriptor.Descriptor(
name='TestMessageSet',
full_name='proto2_wireformat_unittest.TestMessageSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\010\001')),
is_extendable=True,
syntax='proto2',
extension_ranges=[(4, 2147483647), ],
oneofs=[
],
serialized_start=79,
serialized_end=109,
)
_TESTMESSAGESETWIREFORMATCONTAINER = _descriptor.Descriptor(
name='TestMessageSetWireFormatContainer',
full_name='proto2_wireformat_unittest.TestMessageSetWireFormatContainer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message_set', full_name='proto2_wireformat_unittest.TestMessageSetWireFormatContainer.message_set', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=111,
serialized_end=211,
)
_TESTMESSAGESETWIREFORMATCONTAINER.fields_by_name['message_set'].message_type = _TESTMESSAGESET
DESCRIPTOR.message_types_by_name['TestMessageSet'] = _TESTMESSAGESET
DESCRIPTOR.message_types_by_name['TestMessageSetWireFormatContainer'] = _TESTMESSAGESETWIREFORMATCONTAINER
TestMessageSet = _reflection.GeneratedProtocolMessageType('TestMessageSet', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESET,
__module__ = 'google.protobuf.unittest_mset_wire_format_pb2'
# @@protoc_insertion_point(class_scope:proto2_wireformat_unittest.TestMessageSet)
))
_sym_db.RegisterMessage(TestMessageSet)
TestMessageSetWireFormatContainer = _reflection.GeneratedProtocolMessageType('TestMessageSetWireFormatContainer', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETWIREFORMATCONTAINER,
__module__ = 'google.protobuf.unittest_mset_wire_format_pb2'
# @@protoc_insertion_point(class_scope:proto2_wireformat_unittest.TestMessageSetWireFormatContainer)
))
_sym_db.RegisterMessage(TestMessageSetWireFormatContainer)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\001\370\001\001\252\002!Google.ProtocolBuffers.TestProtos'))
_TESTMESSAGESET.has_options = True
_TESTMESSAGESET._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\010\001'))
# @@protoc_insertion_point(module_scope)
|
gpl-3.0
|
sunlianqiang/kbengine
|
kbe/res/scripts/common/Lib/encodings/cp855.py
|
272
|
33850
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u2116' # 0x00ef -> NUMERO SIGN
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
'\xa7' # 0x00fd -> SECTION SIGN
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
lgpl-3.0
|
discosultan/quake-console
|
Samples/Sandbox/Lib/encodings/cp1252.py
|
594
|
13767
|
""" Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1252',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
ofilipowicz/owndb
|
owndb/settings.py
|
1
|
3421
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
SECRET_KEY = 'lv^ce%zv9ppcl(v-mix+-&x2q#1mtq3@qxl==_bvyqy-k9soru'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
CRISPY_TEMPLATE_PACK = 'bootstrap3'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'crispy_forms',
'allauth.socialaccount',
#'allauth.socialaccount.providers.facebook',
'imagekit',
'friendship',
'store',
'pages'
)
SITE_ID = 1
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
('pl', _('Polski')),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
"django.core.context_processors.request",
"allauth.account.context_processors.account",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"allauth.socialaccount.context_processors.socialaccount",
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
LOGIN_REDIRECT_URL = '/store'
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', 'publish_stream'],
'METHOD': 'js_sdk' # instead of 'oauth2'
}
}
ROOT_URLCONF = 'owndb.urls'
WSGI_APPLICATION = 'owndb.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'owndb1',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# E-mail settings
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'owndb.mail@gmail.com'
# Internationalization
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOGIN_URL = '/accounts/login'
MEDIA_URL = '/media/'
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
try:
from owndb.local_settings import *
except ImportError as e:
pass
|
mit
|
PrashantJalan/Mail-Cat
|
dict_create.py
|
3
|
1060
|
import os
from collections import Counter
from collections import OrderedDict
import nltk
from nltk.tokenize import *
from nltk.probability import *
def main():
ran = range(3)
d_file = open('dictionary2').readlines()
dict_key = []
dict_occ = []
for line in d_file:
tmp = line.strip()
tmp = tmp.split()
dict_key.append(tmp[0])
dict_occ.append(int(tmp[1]))
d = open('dictionary2', 'w')
docs = 0
for labl in ran:
path = 'class'+str(labl)+'/'
for files in os.listdir(path):
if "file" in files:
inp = open(path+files).read().lower()
tokenizer = RegexpTokenizer('[\w\.]+(@)[\w\.]+|[\w]*(http)[s]?[^"<>\s]+|\w+')
words = tokenizer.tokenize(inp)
fdist = FreqDist(words)
for it in fdist.keys():
if it in dict_key:
dict_occ[dict_key.index(it)] += 1
else:
dict_key.append(it)
dict_occ.append(1)
docs = docs+1
i=0
for it in dict_key:
d.write(it+' '+str(dict_occ[i])+'\n')
i = i+1
print "Total no. of Documents = "+str(docs)
if __name__=='__main__':
main()
|
apache-2.0
|
fauferoth/assignment
|
.mywaflib/waflib/extras/c_dumbpreproc.py
|
5
|
1647
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
Dumb C/C++ preprocessor for finding dependencies
It will look at all include files it can find after removing the comments, so the following
will always add the dependency on both "a.h" and "b.h"::
#include "a.h"
#ifdef B
#include "b.h"
#endif
int main() {
return 0;
}
To use::
def configure(conf):
conf.load('compiler_c')
conf.load('c_dumbpreproc')
"""
import re
from waflib.Tools import c_preproc
re_inc = re.compile(
'^[ \t]*(#|%:)[ \t]*(include)[ \t]*[<"](.*)[>"]\r*$',
re.IGNORECASE | re.MULTILINE)
def lines_includes(node):
code = node.read()
if c_preproc.use_trigraphs:
for (a, b) in c_preproc.trig_def: code = code.split(a).join(b)
code = c_preproc.re_nl.sub('', code)
code = c_preproc.re_cpp.sub(c_preproc.repl, code)
return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)]
parser = c_preproc.c_parser
class dumb_parser(parser):
def addlines(self, node):
if node in self.nodes[:-1]:
return
self.currentnode_stack.append(node.parent)
# Avoid reading the same files again
try:
lines = self.parse_cache[node]
except KeyError:
lines = self.parse_cache[node] = lines_includes(node)
self.lines = lines + [(c_preproc.POPFILE, '')] + self.lines
def start(self, node, env):
try:
self.parse_cache = node.ctx.parse_cache
except AttributeError:
self.parse_cache = node.ctx.parse_cache = {}
self.addlines(node)
while self.lines:
(x, y) = self.lines.pop(0)
if x == c_preproc.POPFILE:
self.currentnode_stack.pop()
continue
self.tryfind(y)
c_preproc.c_parser = dumb_parser
|
bsd-3-clause
|
tadashi-aikawa/gemini
|
tests/addons/reqs2reqs/test_add.py
|
1
|
3227
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from jumeaux.addons.reqs2reqs.add import Executor
from jumeaux.models import Reqs2ReqsAddOnPayload
class TestExec:
def test(self):
payload: Reqs2ReqsAddOnPayload = Reqs2ReqsAddOnPayload.from_dict(
{
"requests": [
{"path": "/origin", "headers": {"h1": "header1"}, "qs": {"q1": ["query1"]}}
]
}
)
actual: Reqs2ReqsAddOnPayload = Executor(
{
"reqs": [
{"path": "/added1", "qs": {"q2": ["query2"]}},
{"path": "/added2", "headers": {"h2": "header2"}},
]
}
).exec(payload, None)
assert actual.to_dict() == {
"requests": [
{
"method": "GET",
"path": "/added1",
"headers": {},
"qs": {"q2": ["query2"]},
"url_encoding": "utf-8",
},
{
"method": "GET",
"path": "/added2",
"headers": {"h2": "header2"},
"qs": {},
"url_encoding": "utf-8",
},
{
"method": "GET",
"path": "/origin",
"headers": {"h1": "header1"},
"qs": {"q1": ["query1"]},
"url_encoding": "utf-8",
},
]
}
def test_head(self):
payload: Reqs2ReqsAddOnPayload = Reqs2ReqsAddOnPayload.from_dict(
{"requests": [{"path": "/origin", "headers": {}, "qs": {}}]}
)
actual: Reqs2ReqsAddOnPayload = Executor({"reqs": [{"path": "/added"}]}).exec(payload, None)
assert actual.to_dict() == {
"requests": [
{
"method": "GET",
"path": "/added",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
{
"method": "GET",
"path": "/origin",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
]
}
def test_tail(self):
payload: Reqs2ReqsAddOnPayload = Reqs2ReqsAddOnPayload.from_dict(
{"requests": [{"path": "/origin", "headers": {}, "qs": {}}]}
)
actual: Reqs2ReqsAddOnPayload = Executor(
{"location": "tail", "reqs": [{"path": "/added"}]}
).exec(payload, None)
assert actual.to_dict() == {
"requests": [
{
"method": "GET",
"path": "/origin",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
{
"method": "GET",
"path": "/added",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
]
}
|
mit
|
zhufenggood/shadowsocks
|
shadowsocks/asyncdns.py
|
655
|
17416
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
apache-2.0
|
afourmy/pyNMS
|
pyNMS/views/geographical_view.py
|
2
|
7551
|
# Copyright (C) 2017 Antoine Fourmy <antoine dot fourmy at gmail dot com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
from os.path import join
from .base_view import BaseView
from math import asin, cos, radians, sin, sqrt
try:
import shapefile
import shapely.geometry
from pyproj import Proj
except ImportError as e:
import warnings
warnings.warn(str(e))
warnings.warn('SHP librairies missing: pyNMS will not start')
warnings.warn('please install "pyshp", "shapely", and "pyproj" with pip')
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import (
QBrush,
QPen,
QColor,
QDrag,
QPainter,
QPixmap
)
from PyQt5.QtWidgets import (
QFrame,
QPushButton,
QWidget,
QApplication,
QLabel,
QGraphicsItem,
QGraphicsLineItem,
QGraphicsPixmapItem,
QGroupBox,
)
class GeographicalView(BaseView):
def __init__(self, controller):
super().__init__(controller)
# initialize the map
self.world_map = Map(self)
def update_geographical_coordinates(self, *gnodes):
for gnode in gnodes:
lon, lat = self.world_map.to_geographical_coordinates(gnode.x, gnode.y)
gnode.node.longitude, gnode.node.latitude = lon, lat
def update_logical_coordinates(self, *gnodes):
for gnode in gnodes:
gnode.node.logical_x, gnode.node.logical_y = gnode.x, gnode.y
def move_to_geographical_coordinates(self, *gnodes):
if not gnodes:
gnodes = self.all_gnodes()
for gnode in gnodes:
gnode.x, gnode.y = self.world_map.to_canvas_coordinates(
gnode.node.longitude,
gnode.node.latitude
)
def move_to_logical_coordinates(self, *gnodes):
if not gnodes:
gnodes = self.all_gnodes()
for gnode in gnodes:
gnode.x, gnode.y = gnode.node.logical_x, gnode.node.logical_y
def haversine_distance(self, s, d):
coord = (s.longitude, s.latitude, d.longitude, d.latitude)
# decimal degrees to radians conversion
lon_s, lat_s, lon_d, lat_d = map(radians, coord)
delta_lon = lon_d - lon_s
delta_lat = lat_d - lat_s
a = sin(delta_lat/2)**2 + cos(lat_s)*cos(lat_d)*sin(delta_lon/2)**2
c = 2*asin(sqrt(a))
# radius of earth (km)
r = 6371
return c*r
class Map():
projections = OrderedDict([
('Spherical', Proj('+proj=ortho +lat_0=48 +lon_0=17')),
('Mercator', Proj(init='epsg:3395')),
('WGS84', Proj(init='epsg:3857')),
('ETRS89 - LAEA Europe', Proj("+init=EPSG:3035"))
])
def __init__(self, view):
self.view = view
self.proj = 'Spherical'
self.ratio, self.offset = 1/1000, (0, 0)
self.display = True
self.polygons = self.view.scene.createItemGroup([])
# brush for water and lands
self.water_brush = QBrush(QColor(64, 164, 223))
self.land_brush = QBrush(QColor(52, 165, 111))
self.land_pen = QPen(QColor(52, 165, 111))
def to_geographical_coordinates(self, x, y):
px, py = (x - self.offset[0])/self.ratio, (self.offset[1] - y)/self.ratio
return self.projections[self.proj](px, py, inverse=True)
def to_canvas_coordinates(self, longitude, latitude):
px, py = self.projections[self.proj](longitude, latitude)
return px*self.ratio + self.offset[0], -py*self.ratio + self.offset[1]
def draw_water(self):
if self.proj in ('Spherical', 'ETRS89 - LAEA Europe'):
cx, cy = self.to_canvas_coordinates(17, 48)
# if the projection is ETRS89, we need the diameter and not the radius
R = 6371000*self.ratio*(1 if self.proj == 'Spherical' else 2)
earth_water = QtWidgets.QGraphicsEllipseItem(cx - R, cy - R, 2*R, 2*R)
earth_water.setZValue(0)
earth_water.setBrush(self.water_brush)
self.polygons.addToGroup(earth_water)
else:
# we compute the projected bounds of the Mercator (3395) projection
# upper-left corner x and y coordinates:
ulc_x, ulc_y = self.to_canvas_coordinates(-180, 84)
# lower-right corner x and y coordinates
lrc_x, lrc_y = self.to_canvas_coordinates(180, -84.72)
# width and height of the map (required for the QRectItem)
width, height = lrc_x - ulc_x, lrc_y - ulc_y
earth_water = QtWidgets.QGraphicsRectItem(ulc_x, ulc_y, width, height)
earth_water.setZValue(0)
earth_water.setBrush(self.water_brush)
self.polygons.addToGroup(earth_water)
def draw_polygons(self):
sf = shapefile.Reader(self.shapefile)
polygons = sf.shapes()
for polygon in polygons:
# convert shapefile geometries into shapely geometries
# to extract the polygons of a multipolygon
polygon = shapely.geometry.shape(polygon)
# if it is a polygon, we use a list to make it iterable
if polygon.geom_type == 'Polygon':
polygon = [polygon]
for land in polygon:
qt_polygon = QtGui.QPolygonF()
longitudes, latitudes = land.exterior.coords.xy
for lon, lat in zip(longitudes, latitudes):
px, py = self.to_canvas_coordinates(lon, lat)
if px > 1e+10:
continue
qt_polygon.append(QtCore.QPointF(px, py))
polygon_item = QtWidgets.QGraphicsPolygonItem(qt_polygon)
polygon_item.setBrush(self.land_brush)
polygon_item.setPen(self.land_pen)
polygon_item.setZValue(1)
yield polygon_item
def show_hide_map(self):
self.display = not self.display
self.polygons.show() if self.display else self.polygons.hide()
def delete_map(self):
self.view.scene.removeItem(self.polygons)
def redraw_map(self):
self.delete_map()
self.polygons = self.view.scene.createItemGroup(self.draw_polygons())
self.draw_water()
# replace the nodes at their geographical location
self.view.move_to_geographical_coordinates()
|
gpl-3.0
|
KellyChan/Python
|
python/pygame/tetris/lib/shape.py
|
3
|
8684
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import pygame
from pygame.locals import *
import util
class Tile:
def __init__(self, color, image = None):
self.color = color
self.image = image
class Shape(object):
SHAPE_WIDTH = 4
SHAPE_HEIGHT = 4
SHAPES = (
( ((0,0,0,0), #
(0,1,1,0), # [][]
(0,1,1,0), # [][]
(0,0,0,0),), #
),
( ((0,0,0,0), #
(1,1,1,1), # [][][][]
(0,0,0,0), #
(0,0,0,0),), #
((0,1,0,0), # []
(0,1,0,0), # []
(0,1,0,0), # []
(0,1,0,0),), # []
),
( ((0,0,0,0), #
(0,1,1,0), # [][]
(1,1,0,0), # [][]
(0,0,0,0),), #
((1,0,0,0), # []
(1,1,0,0), # [][]
(0,1,0,0), # []
(0,0,0,0),),
),
( ((0,0,0,0), #
(1,1,0,0), # [][]
(0,1,1,0), # [][]
(0,0,0,0),), #
((0,1,0,0), # []
(1,1,0,0), # [][]
(1,0,0,0), # []
(0,0,0,0),), #
),
( ((0,0,0,0), #
(1,1,1,0), # [][][]
(1,0,0,0), # []
(0,0,0,0),), #
((0,1,0,0), # []
(0,1,0,0), # []
(0,1,1,0), # [][]
(0,0,0,0),), #
((0,0,1,0), # []
(1,1,1,0), # [][][]
(0,0,0,0), #
(0,0,0,0),), #
((1,1,0,0), # [][]
(0,1,0,0), # []
(0,1,0,0), # []
(0,0,0,0),), #
),
( ((0,0,0,0), #
(1,1,1,0), # [][][]
(0,0,1,0), # []
(0,0,0,0),), #
((0,1,1,0), # [][]
(0,1,0,0), # []
(0,1,0,0), # []
(0,0,0,0),), #
((1,0,0,0), # []
(1,1,1,0), # [][][]
(0,0,0,0), #
(0,0,0,0),), #
((0,1,0,0), # []
(0,1,0,0), # []
(1,1,0,0), # [][]
(0,0,0,0),), #
),
( ((0,0,0,0), #
(1,1,1,0), # [][][]
(0,1,0,0), # []
(0,0,0,0),), #
((0,1,0,0), # []
(0,1,1,0), # [][]
(0,1,0,0), # []
(0,0,0,0),), #
((0,1,0,0), # []
(1,1,1,0), # [][][]
(0,0,0,0), #
(0,0,0,0),), #
((0,1,0,0), # []
(1,1,0,0), # [][]
(0,1,0,0), # []
(0,0,0,0),), #
),
)
COLORS = ((0xcc, 0x66, 0x66),(0x66, 0xcc, 0x66), \
(0x66, 0x66, 0xcc),(0xcc, 0xcc, 0x66), \
(0xcc, 0x66, 0xcc),(0x66, 0xcc, 0xcc), \
(0xda, 0xaa, 0x00))
def __init__(self, board_start, (board_w, board_h), (w, h)):
self.start = board_start
self.W, self.H = w, h #width of board
self.length = board_w / w #width/height of a title
self.x = 0
self.y = 0
self.index = 0 # the type of shape
self.indexN = 0 # the type of next shape
self.subindex = 0 # the index in the specil shape
self.shapes = [] # record the shapes
self.color = ()
self.shape = None
self.image = pygame.Surface((self.length*self.SHAPE_WIDTH, \
self.length*self.SHAPE_HEIGHT), SRCALPHA, 32)
self.image_next = pygame.Surface((self.length*self.SHAPE_WIDTH, \
self.length*self.SHAPE_HEIGHT), SRCALPHA, 32)
self.board = [] #the current board stat
self.new()
def set_board(self, board):
self.board = board
def new(self):
self.x = self.W /2 - 2
self.y = 0
self.index = self.indexN
self.shapes = self.SHAPES[self.index]
self.subindex = random.randint(0, len(self.shapes) - 1)
self.color = self.COLORS[self.index]
self.shape = self.shapes[self.subindex]
self.indexN = random.randint(0, len(self.SHAPES) - 1)
self.draw_current_shape()
self.draw_next_shape()
def rotate(self):
self.subindex = (self.subindex + 1) % len(self.shapes)
self.shape = self.shapes[self.subindex]
if self.check_legal():
pass
else:
self.subindex = (self.subindex - 1) % len(self.shapes)
self.shape = self.shapes[self.subindex]
self.draw_current_shape()
def check_legal(self, r=0, c=0):
for x in xrange(self.SHAPE_WIDTH):
for y in xrange(self.SHAPE_HEIGHT):
if (self.shape[y][x] and # a tile there
(self.x+x+r < 0 or # left outside
self.x+x+r >= self.W or # right outside
self.y+y+c >= self.H or # bottom outside
self.board[self.y+y+c][self.x+x+r] # tile cover
)):
return False
return True
def move(self, r, c):
if self.check_legal(r, c):
self.x += r
self.y += c
def at_bottom(self):
for x in xrange(self.SHAPE_WIDTH):
for y in xrange(self.SHAPE_HEIGHT - 1, -1, -1):
if (self.shape[y][x] and \
(self.y+y+1 >= self. H or \
self.board[self.y+y+1][self.x+x])):
return True
return False
def draw_current_shape(self):
self._draw_shape(self.image, self.index, self.subindex)
def draw_next_shape(self):
self._draw_shape(self.image_next, self.indexN)
def _draw_shape(self, surface, index, subindex = -1):
""" Draw the shape to surface """
surface.fill((0, 0, 0, 0))
if subindex == -1:
subindex = 0
shape = self.SHAPES[index][subindex]
color = self.COLORS[index]
for x in xrange(self.SHAPE_HEIGHT):
for y in xrange(self.SHAPE_WIDTH):
if shape[x][y]:
surface.fill(color, \
(y*self.length, x*self.length, \
self.length, self.length))
pygame.draw.rect(surface, \
(255, 255, 255, 100), \
(y*self.length, x*self.length, self.length, self.length), \
1)
def draw(self, screen):
screen.blit(self.image, (self.start[0]+self.length*self.x, \
self.start[1]+self.length*self.y))
class Shape2(Shape):
def __init__(self, board_start, (board_width, board_height), (w, h)):
self.SHAPES = ((((1,) * w, ), ), )
self.SHAPE_WIDTH = w
self.SHAPE_HEIGHT = 1
super(Shape2, self).__init__(board_start, \
(board_width, board_height), (w, h))
def new(self):
self.x = 0
self.y = 0
self.index = self.indexN
self.shapes = self.SHAPES[self.index]
self.subindex = random.randint(0, len(self.shapes) - 1)
self.color = self.COLORS[self.index]
self.shape = self.shapes[self.subindex]
self.indexN = random.randint(0, len(self.SHAPES) - 1)
self.draw_current_shape()
self.draw_next_shape()
def draw_next_shape(self):
pass
class Shape4(Shape):
def __init__(self, board_start, (board_width, board_height), (w, h)):
self.SHAPES += (
(((1,1,1,1), (1,1,1,1), (1,1,1,1), (1,1,1,1)),),
)
self.COLORS += ((0, 0, 0),)
self._image = {}
self.image[7] = pygame.image.load(util.file_path('neko.png')).convert_alpha()
super(Shape4, self).__init__(board_start, (board_width, board_height), (w, h))
def _draw_shape(self, surface, index, subindex = -1):
surface.fill((0, 0, 0,0))
if index > 6:
surface.blit(self._image[index], (0, 0))
else:
super(Shape4, self)._draw_shape(surface, index, subindex)
def get_part_image(self, x, y):
return self._image[self.index].subsurface( \
(y*self.length, x*self.length), (self.length, self.length))
|
mit
|
FireWRT/OpenWrt-Firefly-Libraries
|
staging_dir/host/lib/python2.7/atexit.py
|
336
|
1705
|
"""
atexit.py - allow programmer to define multiple exit functions to be executed
upon normal program termination.
One public function, register, is defined.
"""
__all__ = ["register"]
import sys
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
exc_info = None
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
try:
func(*targs, **kargs)
except SystemExit:
exc_info = sys.exc_info()
except:
import traceback
print >> sys.stderr, "Error in atexit._run_exitfuncs:"
traceback.print_exc()
exc_info = sys.exc_info()
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator.
"""
_exithandlers.append((func, targs, kargs))
return func
if hasattr(sys, "exitfunc"):
# Assume it's another registered exit function - append it to our list
register(sys.exitfunc)
sys.exitfunc = _run_exitfuncs
if __name__ == "__main__":
def x1():
print "running x1"
def x2(n):
print "running x2(%r)" % (n,)
def x3(n, kwd=None):
print "running x3(%r, kwd=%r)" % (n, kwd)
register(x1)
register(x2, 12)
register(x3, 5, "bar")
register(x3, "no kwd args")
|
gpl-2.0
|
chafique-delli/OpenUpgrade
|
addons/l10n_fr_rib/__init__.py
|
433
|
1046
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
badele/home-assistant
|
homeassistant/components/isy994.py
|
1
|
7195
|
"""
homeassistant.components.isy994
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Connects to an ISY-994 controller and loads relevant components to control its
devices. Also contains the base classes for ISY Sensors, Lights, and Switches.
For configuration details please visit the documentation for this component at
https://home-assistant.io/components/isy994/
"""
import logging
from urllib.parse import urlparse
from homeassistant import bootstrap
from homeassistant.loader import get_component
from homeassistant.helpers import validate_config
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, EVENT_PLATFORM_DISCOVERED,
EVENT_HOMEASSISTANT_STOP, ATTR_SERVICE, ATTR_DISCOVERED,
ATTR_FRIENDLY_NAME)
DOMAIN = "isy994"
DEPENDENCIES = []
REQUIREMENTS = ['PyISY==1.0.5']
DISCOVER_LIGHTS = "isy994.lights"
DISCOVER_SWITCHES = "isy994.switches"
DISCOVER_SENSORS = "isy994.sensors"
ISY = None
SENSOR_STRING = 'Sensor'
HIDDEN_STRING = '{HIDE ME}'
CONF_TLS_VER = 'tls'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""
Setup ISY994 component.
This will automatically import associated lights, switches, and sensors.
"""
try:
import PyISY
except ImportError:
_LOGGER.error("Error while importing dependency PyISY.")
return False
# pylint: disable=global-statement
# check for required values in configuration file
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return False
# pull and parse standard configuration
user = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
host = urlparse(config[DOMAIN][CONF_HOST])
addr = host.geturl()
if host.scheme == 'http':
addr = addr.replace('http://', '')
https = False
elif host.scheme == 'https':
addr = addr.replace('https://', '')
https = True
else:
_LOGGER.error('isy994 host value in configuration file is invalid.')
return False
port = host.port
addr = addr.replace(':{}'.format(port), '')
# pull and parse optional configuration
global SENSOR_STRING
global HIDDEN_STRING
SENSOR_STRING = str(config[DOMAIN].get('sensor_string', SENSOR_STRING))
HIDDEN_STRING = str(config[DOMAIN].get('hidden_string', HIDDEN_STRING))
tls_version = config[DOMAIN].get(CONF_TLS_VER, None)
# connect to ISY controller
global ISY
ISY = PyISY.ISY(addr, port, user, password, use_https=https,
tls_ver=tls_version, log=_LOGGER)
if not ISY.connected:
return False
# listen for HA stop to disconnect
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
# Load components for the devices in the ISY controller that we support
for comp_name, discovery in ((('sensor', DISCOVER_SENSORS),
('light', DISCOVER_LIGHTS),
('switch', DISCOVER_SWITCHES))):
component = get_component(comp_name)
bootstrap.setup_component(hass, component.DOMAIN, config)
hass.bus.fire(EVENT_PLATFORM_DISCOVERED,
{ATTR_SERVICE: discovery,
ATTR_DISCOVERED: {}})
ISY.auto_update = True
return True
def stop(event):
""" Cleanup the ISY subscription. """
ISY.auto_update = False
class ISYDeviceABC(ToggleEntity):
""" Abstract Class for an ISY device. """
_attrs = {}
_onattrs = []
_states = []
_dtype = None
_domain = None
_name = None
def __init__(self, node):
# setup properties
self.node = node
self.hidden = HIDDEN_STRING in self.raw_name
# track changes
self._change_handler = self.node.status. \
subscribe('changed', self.on_update)
def __del__(self):
""" cleanup subscriptions because it is the right thing to do. """
self._change_handler.unsubscribe()
@property
def domain(self):
""" Returns the domain of the entity. """
return self._domain
@property
def dtype(self):
""" Returns the data type of the entity (binary or analog). """
if self._dtype in ['analog', 'binary']:
return self._dtype
return 'binary' if self.unit_of_measurement is None else 'analog'
@property
def should_poll(self):
""" Tells Home Assistant not to poll this entity. """
return False
@property
def value(self):
""" Returns the unclean value from the controller. """
# pylint: disable=protected-access
return self.node.status._val
@property
def state_attributes(self):
""" Returns the state attributes for the node. """
attr = {ATTR_FRIENDLY_NAME: self.name}
for name, prop in self._attrs.items():
attr[name] = getattr(self, prop)
attr = self._attr_filter(attr)
return attr
def _attr_filter(self, attr):
""" Placeholder for attribute filters. """
# pylint: disable=no-self-use
return attr
@property
def unique_id(self):
""" Returns the id of this ISY sensor. """
# pylint: disable=protected-access
return self.node._id
@property
def raw_name(self):
""" Returns the unclean node name. """
return str(self._name) \
if self._name is not None else str(self.node.name)
@property
def name(self):
""" Returns the cleaned name of the node. """
return self.raw_name.replace(HIDDEN_STRING, '').strip() \
.replace('_', ' ')
def update(self):
""" Update state of the sensor. """
# ISY objects are automatically updated by the ISY's event stream
pass
def on_update(self, event):
""" Handles the update received event. """
self.update_ha_state()
@property
def is_on(self):
""" Returns boolean response if the node is on. """
return bool(self.value)
@property
def is_open(self):
""" Returns boolean respons if the node is open. On = Open. """
return self.is_on
@property
def state(self):
""" Returns the state of the node. """
if len(self._states) > 0:
return self._states[0] if self.is_on else self._states[1]
return self.value
def turn_on(self, **kwargs):
""" Turns the device on. """
if self.domain is not 'sensor':
attrs = [kwargs.get(name) for name in self._onattrs]
self.node.on(*attrs)
else:
_LOGGER.error('ISY cannot turn on sensors.')
def turn_off(self, **kwargs):
""" Turns the device off. """
if self.domain is not 'sensor':
self.node.off()
else:
_LOGGER.error('ISY cannot turn off sensors.')
@property
def unit_of_measurement(self):
""" Returns the defined units of measurement or None. """
try:
return self.node.units
except AttributeError:
return None
|
mit
|
DougFirErickson/qgisSpaceSyntaxToolkit
|
esstoolkit/external/pyqtgraph/graphicsItems/GraphicsWidgetAnchor.py
|
54
|
4080
|
from ..Qt import QtGui, QtCore
from ..Point import Point
class GraphicsWidgetAnchor(object):
"""
Class used to allow GraphicsWidgets to anchor to a specific position on their
parent. The item will be automatically repositioned if the parent is resized.
This is used, for example, to anchor a LegendItem to a corner of its parent
PlotItem.
"""
def __init__(self):
self.__parent = None
self.__parentAnchor = None
self.__itemAnchor = None
self.__offset = (0,0)
if hasattr(self, 'geometryChanged'):
self.geometryChanged.connect(self.__geometryChanged)
def anchor(self, itemPos, parentPos, offset=(0,0)):
"""
Anchors the item at its local itemPos to the item's parent at parentPos.
Both positions are expressed in values relative to the size of the item or parent;
a value of 0 indicates left or top edge, while 1 indicates right or bottom edge.
Optionally, offset may be specified to introduce an absolute offset.
Example: anchor a box such that its upper-right corner is fixed 10px left
and 10px down from its parent's upper-right corner::
box.anchor(itemPos=(1,0), parentPos=(1,0), offset=(-10,10))
"""
parent = self.parentItem()
if parent is None:
raise Exception("Cannot anchor; parent is not set.")
if self.__parent is not parent:
if self.__parent is not None:
self.__parent.geometryChanged.disconnect(self.__geometryChanged)
self.__parent = parent
parent.geometryChanged.connect(self.__geometryChanged)
self.__itemAnchor = itemPos
self.__parentAnchor = parentPos
self.__offset = offset
self.__geometryChanged()
def autoAnchor(self, pos, relative=True):
"""
Set the position of this item relative to its parent by automatically
choosing appropriate anchor settings.
If relative is True, one corner of the item will be anchored to
the appropriate location on the parent with no offset. The anchored
corner will be whichever is closest to the parent's boundary.
If relative is False, one corner of the item will be anchored to the same
corner of the parent, with an absolute offset to achieve the correct
position.
"""
pos = Point(pos)
br = self.mapRectToParent(self.boundingRect()).translated(pos - self.pos())
pbr = self.parentItem().boundingRect()
anchorPos = [0,0]
parentPos = Point()
itemPos = Point()
if abs(br.left() - pbr.left()) < abs(br.right() - pbr.right()):
anchorPos[0] = 0
parentPos[0] = pbr.left()
itemPos[0] = br.left()
else:
anchorPos[0] = 1
parentPos[0] = pbr.right()
itemPos[0] = br.right()
if abs(br.top() - pbr.top()) < abs(br.bottom() - pbr.bottom()):
anchorPos[1] = 0
parentPos[1] = pbr.top()
itemPos[1] = br.top()
else:
anchorPos[1] = 1
parentPos[1] = pbr.bottom()
itemPos[1] = br.bottom()
if relative:
relPos = [(itemPos[0]-pbr.left()) / pbr.width(), (itemPos[1]-pbr.top()) / pbr.height()]
self.anchor(anchorPos, relPos)
else:
offset = itemPos - parentPos
self.anchor(anchorPos, anchorPos, offset)
def __geometryChanged(self):
if self.__parent is None:
return
if self.__itemAnchor is None:
return
o = self.mapToParent(Point(0,0))
a = self.boundingRect().bottomRight() * Point(self.__itemAnchor)
a = self.mapToParent(a)
p = self.__parent.boundingRect().bottomRight() * Point(self.__parentAnchor)
off = Point(self.__offset)
pos = p + (o-a) + off
self.setPos(pos)
|
gpl-3.0
|
thjashin/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py
|
22
|
4097
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AffineLinearOperator Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops.bijectors import affine_linear_operator as affine_linear_operator_lib
from tensorflow.python.platform import test
class AffineLinearOperatorTest(test.TestCase):
def testIdentity(self):
with self.test_session():
affine = affine_linear_operator_lib.AffineLinearOperator(
validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = x
ildj = 0.
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())
self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),
affine.forward_log_det_jacobian(x).eval())
def testDiag(self):
with self.test_session():
shift = np.array([-1, 0, 1], dtype=np.float32)
diag = np.array([[1, 2, 3],
[2, 5, 6]], dtype=np.float32)
scale = linalg.LinearOperatorDiag(diag, is_non_singular=True)
affine = affine_linear_operator_lib.AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = diag * x + shift
ildj = -np.sum(np.log(np.abs(diag)), axis=-1)
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())
self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),
affine.forward_log_det_jacobian(x).eval())
def testTriL(self):
with self.test_session():
shift = np.array([-1, 0, 1], dtype=np.float32)
tril = np.array([[[1, 0, 0],
[2, -1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, -2, 0],
[4, 3, 2]]],
dtype=np.float32)
scale = linalg.LinearOperatorTriL(tril, is_non_singular=True)
affine = affine_linear_operator_lib.AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[[1, 0, -1],
[2, 3, 4]],
[[4, 1, -7],
[6, 9, 8]]],
dtype=np.float32)
# If we made the bijector do x*A+b then this would be simplified to:
# y = np.matmul(x, tril) + shift.
y = np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
ildj = -np.sum(np.log(np.abs(np.diagonal(
tril, axis1=-2, axis2=-1))),
axis=-1)
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())
self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),
affine.forward_log_det_jacobian(x).eval())
if __name__ == "__main__":
test.main()
|
apache-2.0
|
os2webscanner/os2webscanner
|
django-os2webscanner/os2webscanner/migrations/0040_auto_20180711_1457.py
|
1
|
1657
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-07-11 12:57
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
dependencies = [
('os2webscanner', '0039_exchangedomain'),
]
operations = [
migrations.CreateModel(
name='ExchangeScanner',
fields=[
('scanner_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='os2webscanner.Scanner')),
('domains', models.ManyToManyField(related_name='exchangedomains', to='os2webscanner.ExchangeDomain', verbose_name='Exchange Domæner')),
],
options={
'db_table': 'os2webscanner_exchangescanner',
},
bases=('os2webscanner.scanner',),
),
migrations.AlterField(
model_name='scan',
name='columns',
field=models.CharField(blank=True, max_length=128, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z', 32), code='invalid', message='Enter only digits separated by commas.')]),
),
migrations.AlterField(
model_name='scanner',
name='columns',
field=models.CharField(blank=True, max_length=128, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z', 32), code='invalid', message='Enter only digits separated by commas.')]),
),
]
|
mpl-2.0
|
ksrajkumar/openerp-6.1
|
openerp/addons/base_report_designer/base_report_designer.py
|
9
|
3544
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from openerp_sxw2rml import sxw2rml
from StringIO import StringIO
import base64
import pooler
import addons
class report_xml(osv.osv):
_inherit = 'ir.actions.report.xml'
def sxwtorml(self, cr, uid, file_sxw, file_type):
'''
The use of this function is to get rml file from sxw file.
'''
sxwval = StringIO(base64.decodestring(file_sxw))
if file_type=='sxw':
fp = open(addons.get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml.xsl'),'rb')
if file_type=='odt':
fp = open(addons.get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb')
return {'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read()))}
def upload_report(self, cr, uid, report_id, file_sxw, file_type, context=None):
'''
Untested function
'''
pool = pooler.get_pool(cr.dbname)
sxwval = StringIO(base64.decodestring(file_sxw))
if file_type=='sxw':
fp = open(addons.get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml.xsl'),'rb')
if file_type=='odt':
fp = open(addons.get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb')
report = pool.get('ir.actions.report.xml').write(cr, uid, [report_id], {
'report_sxw_content': base64.decodestring(file_sxw),
'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read())),
})
pool.get('ir.actions.report.xml').register_all(cr)
return True
def report_get(self, cr, uid, report_id, context=None):
if context is None:
context = {}
# skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases
context.update(bin_raw=True)
report = self.browse(cr, uid, report_id, context=context)
sxw_data = report.report_sxw_content
rml_data = report.report_rml_content
if isinstance(sxw_data, unicode):
sxw_data = sxw_data.encode("iso-8859-1", "replace")
if isinstance(rml_data, unicode):
rml_data = rml_data.encode("iso-8859-1", "replace")
return {
'file_type' : report.report_type,
'report_sxw_content': sxw_data and base64.encodestring(sxw_data) or False,
'report_rml_content': rml_data and base64.encodestring(rml_data) or False
}
report_xml()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
aviciimaxwell/odoo
|
addons/l10n_cr/__openerp__.py
|
178
|
3140
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# __openerp__.py
# l10n_cr_account
# First author: Carlos Vásquez <carlos.vasquez@clearcorp.co.cr> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
{
'name': 'Costa Rica - Accounting',
'version': '0.1',
'url': 'http://launchpad.net/openerp-costa-rica',
'author': 'ClearCorp S.A.',
'website': 'http://clearcorp.co.cr',
'category': 'Localization/Account Charts',
'description': """
Chart of accounts for Costa Rica.
=================================
Includes:
---------
* account.type
* account.account.template
* account.tax.template
* account.tax.code.template
* account.chart.template
Everything is in English with Spanish translation. Further translations are welcome,
please go to http://translations.launchpad.net/openerp-costa-rica.
""",
'depends': ['account', 'account_chart', 'base'],
'demo': [],
'data': [
'l10n_cr_base_data.xml',
'data/account_account_type.xml',
'data/account_account_template.xml',
'data/account_tax_code_template.xml',
'data/account_chart_template.xml',
'data/account_tax_template.xml',
'l10n_wizard.xml',
],
'license': 'Other OSI approved licence',
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Edraak/edraak-platform
|
lms/djangoapps/courseware/tests/test_date_summary.py
|
1
|
39412
|
# -*- coding: utf-8 -*-
"""Tests for course home page date summary blocks."""
from datetime import datetime, timedelta
from unittest import expectedFailure
import ddt
import waffle
from django.contrib.messages.middleware import MessageMiddleware
from django.urls import reverse
from django.test import RequestFactory
from freezegun import freeze_time
from mock import patch
from nose.plugins.attrib import attr
from pytz import utc
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from courseware.courses import get_course_date_blocks
from courseware.date_summary import (
CertificateAvailableDate,
CourseEndDate,
CourseStartDate,
TodaysDate,
VerificationDeadlineDate,
VerifiedUpgradeDeadlineDate
)
from courseware.models import (
CourseDynamicUpgradeDeadlineConfiguration,
DynamicUpgradeDeadlineConfiguration,
OrgDynamicUpgradeDeadlineConfiguration
)
from lms.djangoapps.commerce.models import CommerceConfiguration
from lms.djangoapps.verify_student.models import VerificationDeadline
from lms.djangoapps.verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.schedules.signals import CREATE_SCHEDULE_WAFFLE_FLAG
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.features.course_experience import UNIFIED_COURSE_TAB_FLAG, UPGRADE_DEADLINE_MESSAGE, CourseHomeMessages
from student.tests.factories import TEST_PASSWORD, CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
@ddt.ddt
class CourseDateSummaryTest(SharedModuleStoreTestCase):
"""Tests for course date summary blocks."""
def setUp(self):
super(CourseDateSummaryTest, self).setUp()
SelfPacedConfiguration.objects.create(enable_course_home_improvements=True)
def test_course_info_feature_flag(self):
SelfPacedConfiguration(enable_course_home_improvements=False).save()
course = create_course_run()
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
self.client.login(username=user.username, password=TEST_PASSWORD)
url = reverse('info', args=(course.id,))
response = self.client.get(url)
self.assertNotIn('date-summary', response.content)
def test_course_home_logged_out(self):
course = create_course_run()
url = reverse('openedx.course_experience.course_home', args=(course.id,))
response = self.client.get(url)
self.assertEqual(200, response.status_code)
# Tests for which blocks are enabled
def assert_block_types(self, course, user, expected_blocks):
"""Assert that the enabled block types for this course are as expected."""
blocks = get_course_date_blocks(course, user)
self.assertEqual(len(blocks), len(expected_blocks))
self.assertEqual(set(type(b) for b in blocks), set(expected_blocks))
@ddt.data(
# Verified enrollment with no photo-verification before course start
({}, {}, (CourseEndDate, CourseStartDate, TodaysDate, VerificationDeadlineDate)),
# Verified enrollment with `approved` photo-verification after course end
({'days_till_start': -10,
'days_till_end': -5,
'days_till_upgrade_deadline': -6,
'days_till_verification_deadline': -5,
},
{'verification_status': 'approved'},
(TodaysDate, CourseEndDate)),
# Verified enrollment with `expired` photo-verification during course run
({'days_till_start': -10},
{'verification_status': 'expired'},
(TodaysDate, CourseEndDate, VerificationDeadlineDate)),
# Verified enrollment with `approved` photo-verification during course run
({'days_till_start': -10, },
{'verification_status': 'approved'},
(TodaysDate, CourseEndDate)),
# Verified enrollment with *NO* course end date
({'days_till_end': None},
{},
(CourseStartDate, TodaysDate, VerificationDeadlineDate)),
# Verified enrollment with no photo-verification during course run
({'days_till_start': -1},
{},
(TodaysDate, CourseEndDate, VerificationDeadlineDate)),
# Verification approved
({'days_till_start': -10,
'days_till_upgrade_deadline': -1,
'days_till_verification_deadline': 1,
},
{'verification_status': 'approved'},
(TodaysDate, CourseEndDate)),
# After upgrade deadline
({'days_till_start': -10,
'days_till_upgrade_deadline': -1},
{},
(TodaysDate, CourseEndDate, VerificationDeadlineDate)),
# After verification deadline
({'days_till_start': -10,
'days_till_upgrade_deadline': -2,
'days_till_verification_deadline': -1},
{},
(TodaysDate, CourseEndDate, VerificationDeadlineDate)),
)
@ddt.unpack
def test_enabled_block_types(self, course_kwargs, user_kwargs, expected_blocks):
course = create_course_run(**course_kwargs)
user = create_user(**user_kwargs)
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
self.assert_block_types(course, user, expected_blocks)
@ddt.data(
# Course not started
({}, (CourseStartDate, TodaysDate, CourseEndDate)),
# Course active
({'days_till_start': -1}, (TodaysDate, CourseEndDate)),
# Course ended
({'days_till_start': -10, 'days_till_end': -5},
(TodaysDate, CourseEndDate)),
)
@ddt.unpack
def test_enabled_block_types_without_enrollment(self, course_kwargs, expected_blocks):
course = create_course_run(**course_kwargs)
user = create_user()
self.assert_block_types(course, user, expected_blocks)
def test_enabled_block_types_with_non_upgradeable_course_run(self):
course = create_course_run(days_till_start=-10, days_till_verification_deadline=None)
user = create_user()
CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED).delete()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
self.assert_block_types(course, user, (TodaysDate, CourseEndDate))
def test_todays_date_block(self):
"""
Helper function to test that today's date block renders correctly
and displays the correct time, accounting for daylight savings
"""
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
block = TodaysDate(course, user)
self.assertTrue(block.is_enabled)
self.assertEqual(block.date, datetime.now(utc))
self.assertEqual(block.title, 'current_datetime')
@ddt.data(
'info',
'openedx.course_experience.course_home',
)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=True)
def test_todays_date_no_timezone(self, url_name):
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
self.client.login(username=user.username, password=TEST_PASSWORD)
html_elements = [
'<h3 class="hd hd-6 handouts-header">Important Course Dates</h3>',
'<div class="date-summary-container">',
'<div class="date-summary date-summary-todays-date">',
'<span class="hd hd-6 heading localized-datetime"',
'data-datetime="2015-01-02 00:00:00+00:00"',
'data-string="Today is {date}"',
'data-timezone="None"'
]
url = reverse(url_name, args=(course.id,))
response = self.client.get(url, follow=True)
for html in html_elements:
self.assertContains(response, html)
@ddt.data(
'info',
'openedx.course_experience.course_home',
)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=True)
def test_todays_date_timezone(self, url_name):
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
self.client.login(username=user.username, password=TEST_PASSWORD)
set_user_preference(user, 'time_zone', 'America/Los_Angeles')
url = reverse(url_name, args=(course.id,))
response = self.client.get(url, follow=True)
html_elements = [
'<h3 class="hd hd-6 handouts-header">Important Course Dates</h3>',
'<div class="date-summary-container">',
'<div class="date-summary date-summary-todays-date">',
'<span class="hd hd-6 heading localized-datetime"',
'data-datetime="2015-01-02 00:00:00+00:00"',
'data-string="Today is {date}"',
'data-timezone="America/Los_Angeles"'
]
for html in html_elements:
self.assertContains(response, html)
## Tests Course Start Date
def test_course_start_date(self):
course = create_course_run()
user = create_user()
block = CourseStartDate(course, user)
self.assertEqual(block.date, course.start)
@ddt.data(
'info',
'openedx.course_experience.course_home',
)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=True)
def test_start_date_render(self, url_name):
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
self.client.login(username=user.username, password=TEST_PASSWORD)
url = reverse(url_name, args=(course.id,))
response = self.client.get(url, follow=True)
html_elements = [
'data-string="in 1 day - {date}"',
'data-datetime="2015-01-03 00:00:00+00:00"'
]
for html in html_elements:
self.assertContains(response, html)
@ddt.data(
'info',
'openedx.course_experience.course_home',
)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=True)
def test_start_date_render_time_zone(self, url_name):
with freeze_time('2015-01-02'):
course = create_course_run()
user = create_user()
self.client.login(username=user.username, password=TEST_PASSWORD)
set_user_preference(user, 'time_zone', 'America/Los_Angeles')
url = reverse(url_name, args=(course.id,))
response = self.client.get(url, follow=True)
html_elements = [
'data-string="in 1 day - {date}"',
'data-datetime="2015-01-03 00:00:00+00:00"',
'data-timezone="America/Los_Angeles"'
]
for html in html_elements:
self.assertContains(response, html)
## Tests Course End Date Block
def test_course_end_date_for_certificate_eligible_mode(self):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = CourseEndDate(course, user)
self.assertEqual(
block.description,
'To earn a certificate, you must complete all requirements before this date.'
)
def test_course_end_date_for_non_certificate_eligible_mode(self):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
block = CourseEndDate(course, user)
self.assertEqual(
block.description,
'After this date, course content will be archived.'
)
self.assertEqual(block.title, 'Course End')
def test_course_end_date_after_course(self):
course = create_course_run(days_till_start=-2, days_till_end=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = CourseEndDate(course, user)
self.assertEqual(
block.description,
'This course is archived, which means you can review course content but it is no longer active.'
)
self.assertEqual(block.title, 'Course End')
def test_ecommerce_checkout_redirect(self):
"""Verify the block link redirects to ecommerce checkout if it's enabled."""
sku = 'TESTSKU'
configuration = CommerceConfiguration.objects.create(checkout_on_ecommerce_service=True)
course = create_course_run()
user = create_user()
course_mode = CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED)
course_mode.sku = sku
course_mode.save()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerifiedUpgradeDeadlineDate(course, user)
self.assertEqual(block.link, '{}?sku={}'.format(configuration.basket_checkout_page, sku))
## CertificateAvailableDate
@waffle.testutils.override_switch('certificates.auto_certificate_generation', True)
def test_no_certificate_available_date(self):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
block = CertificateAvailableDate(course, user)
self.assertEqual(block.date, None)
self.assertFalse(block.is_enabled)
## CertificateAvailableDate
@waffle.testutils.override_switch('certificates.auto_certificate_generation', True)
def test_no_certificate_available_date_for_self_paced(self):
course = create_self_paced_course_run()
verified_user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=verified_user, mode=CourseMode.VERIFIED)
course.certificate_available_date = datetime.now(utc) + timedelta(days=7)
course.save()
block = CertificateAvailableDate(course, verified_user)
self.assertNotEqual(block.date, None)
self.assertFalse(block.is_enabled)
def test_no_certificate_available_date_for_audit_course(self):
"""
Tests that Certificate Available Date is not visible in the course "Important Course Dates" section
if the course only has audit mode.
"""
course = create_course_run()
audit_user = create_user()
# Enroll learner in the audit mode and verify the course only has 1 mode (audit)
CourseEnrollmentFactory(course_id=course.id, user=audit_user, mode=CourseMode.AUDIT)
CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED).delete()
all_course_modes = CourseMode.modes_for_course(course.id)
self.assertEqual(len(all_course_modes), 1)
self.assertEqual(all_course_modes[0].slug, CourseMode.AUDIT)
course.certificate_available_date = datetime.now(utc) + timedelta(days=7)
course.save()
# Verify Certificate Available Date is not enabled for learner.
block = CertificateAvailableDate(course, audit_user)
self.assertFalse(block.is_enabled)
self.assertNotEqual(block.date, None)
@waffle.testutils.override_switch('certificates.auto_certificate_generation', True)
def test_certificate_available_date_defined(self):
course = create_course_run()
audit_user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=audit_user, mode=CourseMode.AUDIT)
verified_user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=verified_user, mode=CourseMode.VERIFIED)
course.certificate_available_date = datetime.now(utc) + timedelta(days=7)
enable_course_certificates(course)
CertificateAvailableDate(course, audit_user)
for block in (CertificateAvailableDate(course, audit_user), CertificateAvailableDate(course, verified_user)):
self.assertIsNotNone(course.certificate_available_date)
self.assertEqual(block.date, course.certificate_available_date)
self.assertTrue(block.is_enabled)
## VerificationDeadlineDate
def test_no_verification_deadline(self):
course = create_course_run(days_till_start=-1, days_till_verification_deadline=None)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
self.assertFalse(block.is_enabled)
def test_no_verified_enrollment(self):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.AUDIT)
block = VerificationDeadlineDate(course, user)
self.assertFalse(block.is_enabled)
def test_verification_deadline_date_upcoming(self):
with freeze_time('2015-01-02'):
course = create_course_run(days_till_start=-1)
user = create_user()
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
self.assertEqual(block.css_class, 'verification-deadline-upcoming')
self.assertEqual(block.title, 'Verification Deadline')
self.assertEqual(block.date, datetime.now(utc) + timedelta(days=14))
self.assertEqual(
block.description,
'You must successfully complete verification before this date to qualify for a Verified Certificate.'
)
self.assertEqual(block.link_text, 'Verify My Identity')
self.assertEqual(block.link, reverse('verify_student_verify_now', args=(course.id,)))
def test_verification_deadline_date_retry(self):
with freeze_time('2015-01-02'):
course = create_course_run(days_till_start=-1)
user = create_user(verification_status='denied')
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
self.assertEqual(block.css_class, 'verification-deadline-retry')
self.assertEqual(block.title, 'Verification Deadline')
self.assertEqual(block.date, datetime.now(utc) + timedelta(days=14))
self.assertEqual(
block.description,
'You must successfully complete verification before this date to qualify for a Verified Certificate.'
)
self.assertEqual(block.link_text, 'Retry Verification')
self.assertEqual(block.link, reverse('verify_student_reverify'))
def test_verification_deadline_date_denied(self):
with freeze_time('2015-01-02'):
course = create_course_run(days_till_start=-10, days_till_verification_deadline=-1)
user = create_user(verification_status='denied')
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
self.assertEqual(block.css_class, 'verification-deadline-passed')
self.assertEqual(block.title, 'Missed Verification Deadline')
self.assertEqual(block.date, datetime.now(utc) + timedelta(days=-1))
self.assertEqual(
block.description,
"Unfortunately you missed this course's deadline for a successful verification."
)
self.assertEqual(block.link_text, 'Learn More')
self.assertEqual(block.link, '')
@ddt.data(
(-1, '1 day ago - {date}'),
(1, 'in 1 day - {date}')
)
@ddt.unpack
def test_render_date_string_past(self, delta, expected_date_string):
with freeze_time('2015-01-02'):
course = create_course_run(days_till_start=-10, days_till_verification_deadline=delta)
user = create_user(verification_status='denied')
CourseEnrollmentFactory(course_id=course.id, user=user, mode=CourseMode.VERIFIED)
block = VerificationDeadlineDate(course, user)
self.assertEqual(block.relative_datestring, expected_date_string)
@attr(shard=1)
@ddt.ddt
class TestDateAlerts(SharedModuleStoreTestCase):
"""
Unit tests for date alerts.
"""
def setUp(self):
super(TestDateAlerts, self).setUp()
with freeze_time('2017-07-01 09:00:00'):
self.course = create_course_run(days_till_start=0)
self.course.certificate_available_date = self.course.start + timedelta(days=21)
enable_course_certificates(self.course)
self.enrollment = CourseEnrollmentFactory(course_id=self.course.id, mode=CourseMode.AUDIT)
self.request = RequestFactory().request()
self.request.session = {}
self.request.user = self.enrollment.user
MessageMiddleware().process_request(self.request)
@ddt.data(
['2017-01-01 09:00:00', u'in 6 months on <span class="date localized-datetime" data-format="shortDate"'],
['2017-06-17 09:00:00', u'in 2 weeks on <span class="date localized-datetime" data-format="shortDate"'],
['2017-06-30 10:00:00', u'in 1 day at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-01 08:00:00', u'in 1 hour at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-01 08:55:00', u'in 5 minutes at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-01 09:00:00', None],
['2017-08-01 09:00:00', None],
)
@ddt.unpack
def test_start_date_alert(self, current_time, expected_message_html):
"""
Verify that course start date alerts are registered.
"""
with freeze_time(current_time):
block = CourseStartDate(self.course, self.request.user)
block.register_alerts(self.request, self.course)
messages = list(CourseHomeMessages.user_messages(self.request))
if expected_message_html:
self.assertEqual(len(messages), 1)
self.assertIn(expected_message_html, messages[0].message_html)
else:
self.assertEqual(len(messages), 0)
@ddt.data(
['2017-06-30 09:00:00', None],
['2017-07-01 09:00:00', u'in 2 weeks on <span class="date localized-datetime" data-format="shortDate"'],
['2017-07-14 10:00:00', u'in 1 day at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-15 08:00:00', u'in 1 hour at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-15 08:55:00', u'in 5 minutes at <span class="date localized-datetime" data-format="shortTime"'],
['2017-07-15 09:00:00', None],
['2017-08-15 09:00:00', None],
)
@ddt.unpack
def test_end_date_alert(self, current_time, expected_message_html):
"""
Verify that course end date alerts are registered.
"""
with freeze_time(current_time):
block = CourseEndDate(self.course, self.request.user)
block.register_alerts(self.request, self.course)
messages = list(CourseHomeMessages.user_messages(self.request))
if expected_message_html:
self.assertEqual(len(messages), 1)
self.assertIn(expected_message_html, messages[0].message_html)
else:
self.assertEqual(len(messages), 0)
@ddt.data(
['2017-06-20 09:00:00', None],
['2017-06-21 09:00:00', u'Don't forget, you have 2 weeks left to upgrade to a Verified Certificate.'],
['2017-07-04 10:00:00', u'Don't forget, you have 1 day left to upgrade to a Verified Certificate.'],
['2017-07-05 08:00:00', u'Don't forget, you have 1 hour left to upgrade to a Verified Certificate.'],
['2017-07-05 08:55:00', u'Don't forget, you have 5 minutes left to upgrade to a Verified Certificate.'],
['2017-07-05 09:00:00', None],
['2017-08-05 09:00:00', None],
)
@ddt.unpack
@override_waffle_flag(UPGRADE_DEADLINE_MESSAGE, active=True)
def test_verified_upgrade_deadline_alert(self, current_time, expected_message_html):
"""
Verify the verified upgrade deadline alerts.
"""
with freeze_time(current_time):
block = VerifiedUpgradeDeadlineDate(self.course, self.request.user)
block.register_alerts(self.request, self.course)
messages = list(CourseHomeMessages.user_messages(self.request))
if expected_message_html:
self.assertEqual(len(messages), 1)
self.assertIn(expected_message_html, messages[0].message_html)
else:
self.assertEqual(len(messages), 0)
@expectedFailure # Edraak: Details of alert message has been deleted. Also see date_summary.py
@ddt.data(
['2017-07-15 08:00:00', None],
['2017-07-15 09:00:00', u'If you have earned a certificate, you will be able to access it 1 week from now.'],
['2017-07-21 09:00:00', u'If you have earned a certificate, you will be able to access it 1 day from now.'],
['2017-07-22 08:00:00', u'If you have earned a certificate, you will be able to access it 1 hour from now.'],
['2017-07-22 09:00:00', None],
['2017-07-23 09:00:00', None],
)
@ddt.unpack
@waffle.testutils.override_switch('certificates.auto_certificate_generation', True)
def test_certificate_availability_alert(self, current_time, expected_message_html):
"""
Verify the verified upgrade deadline alerts.
"""
with freeze_time(current_time):
block = CertificateAvailableDate(self.course, self.request.user)
block.register_alerts(self.request, self.course)
messages = list(CourseHomeMessages.user_messages(self.request))
if expected_message_html:
self.assertEqual(len(messages), 1)
self.assertIn(expected_message_html, messages[0].message_html)
else:
self.assertEqual(len(messages), 0)
@ddt.ddt
@attr(shard=1)
class TestScheduleOverrides(SharedModuleStoreTestCase):
def setUp(self):
super(TestScheduleOverrides, self).setUp()
patcher = patch('openedx.core.djangoapps.schedules.signals.get_current_site')
mock_get_current_site = patcher.start()
self.addCleanup(patcher.stop)
mock_get_current_site.return_value = SiteFactory.create()
@override_waffle_flag(CREATE_SCHEDULE_WAFFLE_FLAG, True)
def test_date_with_self_paced_with_enrollment_before_course_start(self):
""" Enrolling before a course begins should result in the upgrade deadline being set relative to the
course start date. """
global_config = DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True)
course = create_self_paced_course_run(days_till_start=3)
overview = CourseOverview.get_from_id(course.id)
expected = overview.start + timedelta(days=global_config.deadline_days)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
self.assertEqual(block.date, expected)
self._check_text(block)
def _check_text(self, upgrade_date_summary):
self.assertEqual(upgrade_date_summary.title, 'Upgrade to Verified Certificate')
self.assertEqual(
upgrade_date_summary.description,
'Don\'t miss the opportunity to highlight your new knowledge and skills by earning a verified'
' certificate.'
)
self.assertEqual(upgrade_date_summary.relative_datestring, 'by {date}')
@override_waffle_flag(CREATE_SCHEDULE_WAFFLE_FLAG, True)
def test_date_with_self_paced_with_enrollment_after_course_start(self):
""" Enrolling after a course begins should result in the upgrade deadline being set relative to the
enrollment date.
Additionally, OrgDynamicUpgradeDeadlineConfiguration should override the number of days until the deadline,
and CourseDynamicUpgradeDeadlineConfiguration should override the org-level override.
"""
global_config = DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True)
course = create_self_paced_course_run(days_till_start=-1, org_id='TestOrg')
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
expected = enrollment.created + timedelta(days=global_config.deadline_days)
self.assertEqual(block.date, expected)
# Orgs should be able to override the deadline
org_config = OrgDynamicUpgradeDeadlineConfiguration.objects.create(
enabled=True, org_id=course.org, deadline_days=4
)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
expected = enrollment.created + timedelta(days=org_config.deadline_days)
self.assertEqual(block.date, expected)
# Courses should be able to override the deadline (and the org-level override)
course_config = CourseDynamicUpgradeDeadlineConfiguration.objects.create(
enabled=True, course_id=course.id, deadline_days=3
)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
expected = enrollment.created + timedelta(days=course_config.deadline_days)
self.assertEqual(block.date, expected)
@override_waffle_flag(CREATE_SCHEDULE_WAFFLE_FLAG, True)
def test_date_with_self_paced_without_dynamic_upgrade_deadline(self):
""" Disabling the dynamic upgrade deadline functionality should result in the verified mode's
expiration date being returned. """
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=False)
course = create_self_paced_course_run()
expected = CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED).expiration_datetime
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
self.assertEqual(block.date, expected)
@override_waffle_flag(CREATE_SCHEDULE_WAFFLE_FLAG, True)
def test_date_with_existing_schedule(self):
""" If a schedule is created while deadlines are disabled, they shouldn't magically appear once the feature is
turned on. """
course = create_self_paced_course_run(days_till_start=-1)
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=False)
course_config = CourseDynamicUpgradeDeadlineConfiguration.objects.create(enabled=False, course_id=course.id)
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT)
# The enrollment has a schedule, but the upgrade deadline should be None
self.assertIsNone(enrollment.schedule.upgrade_deadline)
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
expected = CourseMode.objects.get(course_id=course.id, mode_slug=CourseMode.VERIFIED).expiration_datetime
self.assertEqual(block.date, expected)
# Now if we turn on the feature for this course, this existing enrollment should be unaffected
course_config.enabled = True
course_config.save()
block = VerifiedUpgradeDeadlineDate(course, enrollment.user)
self.assertEqual(block.date, expected)
@ddt.data(
# (enroll before configs, org enabled, org opt-out, course enabled, course opt-out, expected dynamic deadline)
(False, False, False, False, False, True),
(False, False, False, False, True, True),
(False, False, False, True, False, True),
(False, False, False, True, True, False),
(False, False, True, False, False, True),
(False, False, True, False, True, True),
(False, False, True, True, False, True),
(False, False, True, True, True, False),
(False, True, False, False, False, True),
(False, True, False, False, True, True),
(False, True, False, True, False, True),
(False, True, False, True, True, False), # course-level overrides org-level
(False, True, True, False, False, False),
(False, True, True, False, True, False),
(False, True, True, True, False, True), # course-level overrides org-level
(False, True, True, True, True, False),
(True, False, False, False, False, True),
(True, False, False, False, True, True),
(True, False, False, True, False, True),
(True, False, False, True, True, False),
(True, False, True, False, False, True),
(True, False, True, False, True, True),
(True, False, True, True, False, True),
(True, False, True, True, True, False),
(True, True, False, False, False, True),
(True, True, False, False, True, True),
(True, True, False, True, False, True),
(True, True, False, True, True, False), # course-level overrides org-level
(True, True, True, False, False, False),
(True, True, True, False, True, False),
(True, True, True, True, False, True), # course-level overrides org-level
(True, True, True, True, True, False),
)
@ddt.unpack
@override_waffle_flag(CREATE_SCHEDULE_WAFFLE_FLAG, True)
def test_date_with_org_and_course_config_overrides(self, enroll_first, org_config_enabled, org_config_opt_out,
course_config_enabled, course_config_opt_out,
expected_dynamic_deadline):
""" Runs through every combination of org-level plus course-level DynamicUpgradeDeadlineConfiguration enabled
and opt-out states to verify that course-level overrides the org-level config. """
course = create_self_paced_course_run(days_till_start=-1, org_id='TestOrg')
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True)
if enroll_first:
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT, course__self_paced=True)
OrgDynamicUpgradeDeadlineConfiguration.objects.create(
enabled=org_config_enabled, opt_out=org_config_opt_out, org_id=course.id.org
)
CourseDynamicUpgradeDeadlineConfiguration.objects.create(
enabled=course_config_enabled, opt_out=course_config_opt_out, course_id=course.id
)
if not enroll_first:
enrollment = CourseEnrollmentFactory(course_id=course.id, mode=CourseMode.AUDIT, course__self_paced=True)
# The enrollment has a schedule, and the upgrade_deadline is set when expected_dynamic_deadline is True
if not enroll_first:
self.assertEqual(enrollment.schedule.upgrade_deadline is not None, expected_dynamic_deadline)
# The CourseEnrollment.upgrade_deadline property method is checking the configs
self.assertEqual(enrollment.dynamic_upgrade_deadline is not None, expected_dynamic_deadline)
def create_user(verification_status=None):
""" Create a new User instance.
Arguments:
verification_status (str): User's verification status. If this value is set an instance of
SoftwareSecurePhotoVerification will be created for the user with the specified status.
"""
user = UserFactory()
if verification_status is not None:
SoftwareSecurePhotoVerificationFactory.create(user=user, status=verification_status)
return user
def create_course_run(
days_till_start=1, days_till_end=14, days_till_upgrade_deadline=4, days_till_verification_deadline=14,
):
""" Create a new course run and course modes.
All date-related arguments are relative to the current date-time (now) unless otherwise specified.
Both audit and verified `CourseMode` objects will be created for the course run.
Arguments:
days_till_end (int): Number of days until the course ends.
days_till_start (int): Number of days until the course starts.
days_till_upgrade_deadline (int): Number of days until the course run's upgrade deadline.
days_till_verification_deadline (int): Number of days until the course run's verification deadline. If this
value is set to `None` no deadline will be verification deadline will be created.
"""
now = datetime.now(utc)
course = CourseFactory.create(start=now + timedelta(days=days_till_start))
course.end = None
if days_till_end is not None:
course.end = now + timedelta(days=days_till_end)
CourseModeFactory(course_id=course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory(
course_id=course.id,
mode_slug=CourseMode.VERIFIED,
expiration_datetime=now + timedelta(days=days_till_upgrade_deadline)
)
if days_till_verification_deadline is not None:
VerificationDeadline.objects.create(
course_key=course.id,
deadline=now + timedelta(days=days_till_verification_deadline)
)
return course
def create_self_paced_course_run(days_till_start=1, org_id=None):
""" Create a new course run and course modes.
All date-related arguments are relative to the current date-time (now) unless otherwise specified.
Both audit and verified `CourseMode` objects will be created for the course run.
Arguments:
days_till_start (int): Number of days until the course starts.
org_id (string): String org id to assign the course to (default: None; use CourseFactory default)
"""
now = datetime.now(utc)
course = CourseFactory.create(start=now + timedelta(days=days_till_start), self_paced=True,
org=org_id if org_id else 'TestedX')
CourseModeFactory(
course_id=course.id,
mode_slug=CourseMode.AUDIT
)
CourseModeFactory(
course_id=course.id,
mode_slug=CourseMode.VERIFIED,
expiration_datetime=now + timedelta(days=100)
)
return course
def enable_course_certificates(course):
"""
Enable course certificate configuration.
"""
course.certificates = {
u'certificates': [{
u'course_title': u'Test',
u'name': u'',
u'is_active': True,
}]
}
course.save()
|
agpl-3.0
|
twalthr/flink
|
flink-python/pyflink/table/examples/batch/word_count.py
|
7
|
2909
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import os
import shutil
import sys
import tempfile
from pyflink.table import EnvironmentSettings, TableEnvironment
from pyflink.table import expressions as expr
def word_count():
content = "line Licensed to the Apache Software Foundation ASF under one " \
"line or more contributor license agreements See the NOTICE file " \
"line distributed with this work for additional information " \
"line regarding copyright ownership The ASF licenses this file " \
"to you under the Apache License Version the " \
"License you may not use this file except in compliance " \
"with the License"
t_env = TableEnvironment.create(EnvironmentSettings.in_batch_mode())
# register Results table in table environment
tmp_dir = tempfile.gettempdir()
result_path = tmp_dir + '/result'
if os.path.exists(result_path):
try:
if os.path.isfile(result_path):
os.remove(result_path)
else:
shutil.rmtree(result_path)
except OSError as e:
logging.error("Error removing directory: %s - %s.", e.filename, e.strerror)
logging.info("Results directory: %s", result_path)
sink_ddl = """
create table Results(
word VARCHAR,
`count` BIGINT
) with (
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{}'
)
""".format(result_path)
t_env.execute_sql(sink_ddl)
elements = [(word, 1) for word in content.split(" ")]
table = t_env.from_elements(elements, ["word", "count"])
table.group_by(table.word) \
.select(table.word, expr.lit(1).count.alias('count')) \
.execute_insert("Results")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
word_count()
|
apache-2.0
|
yanheven/nova
|
nova/api/openstack/compute/image_metadata.py
|
17
|
4909
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
import nova.image
class Controller(object):
"""The image metadata API controller for the OpenStack API."""
def __init__(self):
self.image_api = nova.image.API()
def _get_image(self, context, image_id):
try:
return self.image_api.get(context, image_id)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
except exception.ImageNotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
def index(self, req, image_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
def show(self, req, image_id, id):
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
if id in metadata:
return {'meta': {id: metadata[id]}}
else:
raise exc.HTTPNotFound()
def create(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if 'metadata' in body:
for key, value in body['metadata'].iteritems():
image['properties'][key] = value
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
image = self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=image['properties'])
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
try:
meta = body['meta']
except KeyError:
expl = _('Incorrect request body format')
raise exc.HTTPBadRequest(explanation=expl)
if id not in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
image = self._get_image(context, image_id)
image['properties'][id] = meta[id]
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(meta=meta)
def update_all(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
metadata = body.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
image['properties'] = metadata
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=metadata)
@wsgi.response(204)
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if id not in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
def create_resource():
return wsgi.Resource(Controller())
|
apache-2.0
|
pyblish/pyblish-mindbender
|
mindbender/vendor/jsonschema/_format.py
|
7
|
6861
|
import datetime
import re
import socket
from .compat import str_types
from .exceptions import FormatError
class FormatChecker(object):
"""
A ``format`` property checker.
JSON Schema does not mandate that the ``format`` property actually do any
validation. If validation is desired however, instances of this class can
be hooked into validators to enable format validation.
:class:`FormatChecker` objects always return ``True`` when asked about
formats that they do not know how to validate.
To check a custom format using a function that takes an instance and
returns a ``bool``, use the :meth:`FormatChecker.checks` or
:meth:`FormatChecker.cls_checks` decorators.
:argument iterable formats: the known formats to validate. This argument
can be used to limit which formats will be used
during validation.
"""
checkers = {}
def __init__(self, formats=None):
if formats is None:
self.checkers = self.checkers.copy()
else:
self.checkers = dict((k, self.checkers[k]) for k in formats)
def checks(self, format, raises=()):
"""
Register a decorated function as validating a new format.
:argument str format: the format that the decorated function will check
:argument Exception raises: the exception(s) raised by the decorated
function when an invalid instance is found. The exception object
will be accessible as the :attr:`ValidationError.cause` attribute
of the resulting validation error.
"""
def _checks(func):
self.checkers[format] = (func, raises)
return func
return _checks
cls_checks = classmethod(checks)
def check(self, instance, format):
"""
Check whether the instance conforms to the given format.
:argument instance: the instance to check
:type: any primitive type (str, number, bool)
:argument str format: the format that instance should conform to
:raises: :exc:`FormatError` if instance does not conform to format
"""
if format not in self.checkers:
return
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
raise FormatError(
"%r is not a %r" % (instance, format), cause=cause,
)
def conforms(self, instance, format):
"""
Check whether the instance conforms to the given format.
:argument instance: the instance to check
:type: any primitive type (str, number, bool)
:argument str format: the format that instance should conform to
:rtype: bool
"""
try:
self.check(instance, format)
except FormatError:
return False
else:
return True
_draft_checkers = {"draft3": [], "draft4": []}
def _checks_drafts(both=None, draft3=None, draft4=None, raises=()):
draft3 = draft3 or both
draft4 = draft4 or both
def wrap(func):
if draft3:
_draft_checkers["draft3"].append(draft3)
func = FormatChecker.cls_checks(draft3, raises)(func)
if draft4:
_draft_checkers["draft4"].append(draft4)
func = FormatChecker.cls_checks(draft4, raises)(func)
return func
return wrap
@_checks_drafts("email")
def is_email(instance):
if not isinstance(instance, str_types):
return True
return "@" in instance
_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
@_checks_drafts(draft3="ip-address", draft4="ipv4")
def is_ipv4(instance):
if not isinstance(instance, str_types):
return True
if not _ipv4_re.match(instance):
return False
return all(0 <= int(component) <= 255 for component in instance.split("."))
if hasattr(socket, "inet_pton"):
@_checks_drafts("ipv6", raises=socket.error)
def is_ipv6(instance):
if not isinstance(instance, str_types):
return True
return socket.inet_pton(socket.AF_INET6, instance)
_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
@_checks_drafts(draft3="host-name", draft4="hostname")
def is_host_name(instance):
if not isinstance(instance, str_types):
return True
if not _host_name_re.match(instance):
return False
components = instance.split(".")
for component in components:
if len(component) > 63:
return False
return True
try:
import rfc3987
except ImportError:
pass
else:
@_checks_drafts("uri", raises=ValueError)
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI")
try:
import strict_rfc3339
except ImportError:
try:
import isodate
except ImportError:
pass
else:
@_checks_drafts("date-time", raises=(ValueError, isodate.ISO8601Error))
def is_date(instance):
if not isinstance(instance, str_types):
return True
return isodate.parse_datetime(instance)
else:
@_checks_drafts("date-time")
def is_date(instance):
if not isinstance(instance, str_types):
return True
return strict_rfc3339.validate_rfc3339(instance)
@_checks_drafts("regex", raises=re.error)
def is_regex(instance):
if not isinstance(instance, str_types):
return True
return re.compile(instance)
@_checks_drafts(draft3="date", raises=ValueError)
def is_date(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%Y-%m-%d")
@_checks_drafts(draft3="time", raises=ValueError)
def is_time(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%H:%M:%S")
try:
import webcolors
except ImportError:
pass
else:
def is_css_color_code(instance):
return webcolors.normalize_hex(instance)
@_checks_drafts(draft3="color", raises=(ValueError, TypeError))
def is_css21_color(instance):
if (
not isinstance(instance, str_types) or
instance.lower() in webcolors.css21_names_to_hex
):
return True
return is_css_color_code(instance)
def is_css3_color(instance):
if instance.lower() in webcolors.css3_names_to_hex:
return True
return is_css_color_code(instance)
draft3_format_checker = FormatChecker(_draft_checkers["draft3"])
draft4_format_checker = FormatChecker(_draft_checkers["draft4"])
|
mit
|
T3chn3/HFP
|
webapp/cgi-bin/yate.py
|
22
|
1409
|
from string import Template
def start_response(resp="text/html"):
return('Content-type: ' + resp + '\n\n')
def include_header(the_title):
with open('templates/header.html') as headf:
head_text = headf.read()
header = Template(head_text)
return(header.substitute(title=the_title))
def include_footer(the_links):
with open('templates/footer.html') as footf:
foot_text = footf.read()
link_string = ''
for key in the_links:
link_string += '<a href="' + the_links[key] + '">' + key + '</a> '
footer = Template(foot_text)
return(footer.substitute(links=link_string))
def start_form(the_url, form_type="POST"):
return('<form action="' + the_url + '" method="' + form_type + '">')
def end_form(submit_msg="Submit"):
return('<p></p><input type=submit value="' + submit_msg + '"></form>')
def radio_button(rb_name, rb_value):
return('<input type="radio" name="' + rb_name +
'" value="' + rb_value + '"> ' + rb_value + '<br />')
def u_list(items):
u_string = '<ul>'
for item in items:
u_string += '<li>' + item + '</li>'
u_string += '</ul>'
return(u_string)
def header(header_text, header_level=2):
return('<h' + str(header_level) + '>' + header_text +
'</h' + str(header_level) + '>')
def para(para_text):
return('<p>' + para_text + '</p>')
|
mit
|
metatoaster/mtj.eve.tracker
|
setup.py
|
1
|
1094
|
from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='mtj.eve.tracker',
version=version,
description="EVE Online Tracker",
long_description=open("README.rst").read() + "\n" +
open(os.path.join("docs", "HISTORY.rst")).read(),
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Tommy Yu',
author_email='y@metatoaster.com',
url='',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['mtj', 'mtj.eve'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
'zope.component',
'zope.interface',
'EVELink>=0.4.0',
'requests',
'mtj.f3u1',
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
gpl-3.0
|
mavit/ansible
|
lib/ansible/modules/remote_management/cobbler/cobbler_system.py
|
30
|
10729
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cobbler_system
version_added: '2.7'
short_description: Manage system objects in Cobbler
description:
- Add, modify or remove systems in Cobbler
options:
host:
description:
- The name or IP address of the Cobbler system.
default: 127.0.0.1
port:
description:
- Port number to be used for REST connection.
- The default value depends on parameter C(use_ssl).
username:
description:
- The username to log in to Cobbler.
default: cobbler
password:
description:
- The password to log in to Cobbler.
required: yes
use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) when used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
name:
description:
- The system name to manage.
properties:
description:
- A dictionary with system properties.
interfaces:
description:
- A list of dictionaries containing interface options.
sync:
description:
- Sync on changes.
- Concurrently syncing Cobbler is bound to fail.
type: bool
default: no
state:
description:
- Whether the system should be present, absent or a query is made.
choices: [ absent, present, query ]
default: present
author:
- Dag Wieers (@dagwieers)
notes:
- Concurrently syncing Cobbler is bound to fail with weird errors.
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
'''
EXAMPLES = r'''
- name: Ensure the system exists in Cobbler
cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: myhost
properties:
profile: CentOS6-x86_64
name_servers: [ 2.3.4.5, 3.4.5.6 ]
name_servers_search: foo.com, bar.com
interfaces:
eth0:
macaddress: 00:01:02:03:04:05
ipaddress: 1.2.3.4
delegate_to: localhost
- name: Enable network boot in Cobbler
cobbler_system:
host: bdsol-aci-cobbler-01
username: cobbler
password: ins3965!
name: bdsol-aci51-apic1.cisco.com
properties:
netboot_enabled: yes
state: present
delegate_to: localhost
- name: Query all systems in Cobbler
cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
register: cobbler_systems
delegate_to: localhost
- name: Query a specific system in Cobbler
cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: '{{ inventory_hostname }}'
register: cobbler_properties
delegate_to: localhost
- name: Ensure the system does not exist in Cobbler
cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: myhost
delegate_to: localhost
'''
RETURN = r'''
systems:
description: List of systems
returned: C(state=query) and C(name) is not provided
type: list
system:
description: (Resulting) information about the system we are working with
returned: when C(name) is provided
type: dict
'''
import copy
import datetime
import ssl
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import xmlrpc_client
from ansible.module_utils._text import to_text
IFPROPS_MAPPING = dict(
bondingopts='bonding_opts',
bridgeopts='bridge_opts',
connected_mode='connected_mode',
cnames='cnames',
dhcptag='dhcp_tag',
dnsname='dns_name',
ifgateway='if_gateway',
interfacetype='interface_type',
interfacemaster='interface_master',
ipaddress='ip_address',
ipv6address='ipv6_address',
ipv6defaultgateway='ipv6_default_gateway',
ipv6mtu='ipv6_mtu',
ipv6prefix='ipv6_prefix',
ipv6secondaries='ipv6_secondariesu',
ipv6staticroutes='ipv6_static_routes',
macaddress='mac_address',
management='management',
mtu='mtu',
netmask='netmask',
static='static',
staticroutes='static_routes',
virtbridge='virt_bridge',
)
def getsystem(conn, name, token):
system = dict()
if name:
# system = conn.get_system(name, token)
systems = conn.find_system(dict(name=name), token)
if systems:
system = systems[0]
return system
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='127.0.0.1'),
port=dict(type='int'),
username=dict(type='str', default='cobbler'),
password=dict(type='str', no_log=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
name=dict(type='str'),
interfaces=dict(type='dict'),
properties=dict(type='dict'),
sync=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
),
supports_check_mode=True,
)
username = module.params['username']
password = module.params['password']
port = module.params['port']
use_ssl = module.params['use_ssl']
validate_certs = module.params['validate_certs']
name = module.params['name']
state = module.params['state']
module.params['proto'] = 'https' if use_ssl else 'http'
if not port:
module.params['port'] = '443' if use_ssl else '80'
result = dict(
changed=False,
)
start = datetime.datetime.utcnow()
ssl_context = None
if not validate_certs:
try: # Python 2.7.9 and newer
ssl_context = ssl.create_unverified_context()
except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
ssl._create_default_context = ssl._create_unverified_context
else: # Python 2.7.8 and older
ssl._create_default_https_context = ssl._create_unverified_https_context
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
if ssl_context:
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
else:
conn = xmlrpc_client.Server(url)
try:
token = conn.login(username, password)
except xmlrpc_client.Fault as e:
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
except Exception as e:
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
system = getsystem(conn, name, token)
# result['system'] = system
if state == 'query':
if name:
result['system'] = system
else:
# Turn it into a dictionary of dictionaries
# all_systems = conn.get_systems()
# result['systems'] = { system['name']: system for system in all_systems }
# Return a list of dictionaries
result['systems'] = conn.get_systems()
elif state == 'present':
if system:
# Update existing entry
system_id = conn.get_system_handle(name, token)
for key, value in iteritems(module.params['properties']):
if key not in system:
module.warn("Property '{0}' is not a valid system property.".format(key))
if system[key] != value:
try:
conn.modify_system(system_id, key, value, token)
result['changed'] = True
except Exception as e:
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
else:
# Create a new entry
system_id = conn.new_system(token)
conn.modify_system(system_id, 'name', name, token)
result['changed'] = True
if module.params['properties']:
for key, value in iteritems(module.params['properties']):
try:
conn.modify_system(system_id, key, value, token)
except Exception as e:
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
# Add interface properties
interface_properties = dict()
if module.params['interfaces']:
for device, values in iteritems(module.params['interfaces']):
for key, value in iteritems(values):
if key == 'name':
continue
if key not in IFPROPS_MAPPING:
module.warn("Property '{0}' is not a valid system property.".format(key))
if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
interface_properties['{0}-{1}'.format(key, device)] = value
if interface_properties:
conn.modify_system(system_id, "modify_interface", interface_properties, token)
result['changed'] = True
# Only save when the entry was changed
if not module.check_mode and result['changed']:
conn.save_system(system_id, token)
elif state == 'absent':
if system:
if not module.check_mode:
conn.remove_system(name, token)
result['changed'] = True
if not module.check_mode and module.params['sync'] and result['changed']:
try:
conn.sync(token)
except Exception as e:
module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
if state in ('absent', 'present'):
result['system'] = getsystem(conn, name, token)
if module._diff:
result['diff'] = dict(before=system, after=result['system'])
elapsed = datetime.datetime.utcnow() - start
module.exit_json(elapsed=elapsed.seconds, **result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
xserty/piDS
|
Desktop/packages/rmutil/UnixDriveDetector.py
|
2
|
2004
|
import threading, sys, os, time, platform, getpass
import wx
if platform.system() == "Linux":
from wx.lib.pubsub import setupkwargs
from wx.lib.pubsub import pub as Publisher
else:
from wx.lib.pubsub import pub as Publisher
if platform.system() == "Linux":
if 'fedora' in platform.dist():
user = getpass.getuser()
VOLUMES_PATH = "/run/media/" + user
else:
VOLUMES_PATH = "/media"
else:
VOLUMES_PATH = "/Volumes"
bg_thread = None
runFlag = True
volumes = None
def waitForUSBDrive():
# load current list of volumes
global volumes
volumes = os.listdir(VOLUMES_PATH)
global bg_thread
if bg_thread == None:
bg_thread = BackgroundUSBDetection()
bg_thread.daemon = True
bg_thread.start()
bg_thread.join()
# RESULT CALL --> wx.CallAfter(Publisher.sendMessage, 'usb_connected', path=drive_path)
### THREAD FOR ASYNC USB DETECTION ###
class BackgroundUSBDetection(threading.Thread):
def __init__(self):
self.run_event = threading.Event()
threading.Thread.__init__(self, name="Mac_Drive_Detector")
def run(self):
print "Thread started..."
global runFlag, volumes
while runFlag:
# check volumes
curVols = os.listdir(VOLUMES_PATH)
newVol = self.NewVolumes(volumes, curVols)
# update list of volumes in case a volume was disconnected (e.g. retry plugging USB)
volumes = curVols
if len(newVol) > 0:
wx.CallAfter(Publisher.sendMessage, 'usb_connected', path=VOLUMES_PATH + '/' + newVol[0])
runFlag = False
time.sleep(2)
def NewVolumes(self, oldVolumes, curVolumes):
newVol = []
for volume in curVolumes:
if not volume in oldVolumes:
newVol.append(volume)
return newVol
if __name__=='__main__':
# load current list of volumes
volumes = os.listdir(VOLUMES_PATH)
waitForUSBDrive()
|
apache-2.0
|
funson/rt-xen
|
tools/python/xen/xend/uuid.py
|
48
|
2410
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
"""Universal Unique Identifiers (UUIDs). By default, UUIDs generated here are
purely random, with no internal structure. However, they are the same size,
and are formatted by the same conventions, as the UUIDs in the Open Software
Foundation's Distributed Computing Environment (OSF DCE). This allows Xend to
be used with UUIDs generated as per the DCE specification, should that be
required. These UUIDs are also, by no coincidence, the same size as the
'handle' stored by the Xen hypervisor along with the domain structure."""
import commands
import random
def getUuidUuidgen(randomly = True):
"""Generate a UUID using the command uuidgen.
If randomly is true (default) generates a random uuid.
If randomly is false generates a time-based uuid.
"""
cmd = "uuidgen"
if randomly:
cmd += " -r"
else:
cmd += " -t"
return fromString(commands.getoutput(cmd))
def getUuidRandom():
"""Generate a random UUID."""
return [ random.randint(0, 255) for _ in range(0, 16) ]
#uuidFactory = getUuidUuidgen
uuidFactory = getUuidRandom
def toString(u):
return "-".join(["%02x" * 4, "%02x" * 2, "%02x" * 2, "%02x" * 2,
"%02x" * 6]) % tuple(u)
def fromString(s):
s = s.replace('-', '')
return [ int(s[i : i + 2], 16) for i in range(0, 32, 2) ]
def create():
return uuidFactory()
def createString():
return toString(create())
|
gpl-2.0
|
certik/hermes2d
|
doc/exts/math_dollar.py
|
29
|
1813
|
import re
def process_dollars(app, docname, source):
r"""
Replace dollar signs with backticks.
More precisely, do a regular expression search. Replace a plain
dollar sign ($) by a backtick (`). Replace an escaped dollar sign
(\$) by a dollar sign ($). Don't change a dollar sign preceded or
followed by a backtick (`$ or $`), because of strings like
"``$HOME``". Don't make any changes on lines starting with
spaces, because those are indented and hence part of a block of
code or examples.
This also doesn't replaces dollar signs enclosed in curly braces,
to avoid nested math environments, such as ::
$f(n) = 0 \text{ if $n$ is prime}$
Thus the above line would get changed to
`f(n) = 0 \text{ if $n$ is prime}`
"""
s = "\n".join(source)
if s.find("$") == -1:
return
# This searches for "$blah$" inside a pair of curly braces --
# don't change these, since they're probably coming from a nested
# math environment. So for each match, we replace it with a temporary
# string, and later on we substitute the original back.
global _data
_data = {}
def repl(matchobj):
global _data
s = matchobj.group(0)
t = "___XXX_REPL_%d___" % len(_data)
_data[t] = s
return t
s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s)
# matches $...$
dollars = re.compile(r"(?<!\$)(?<!\\)\$([^\$]+?)\$")
# regular expression for \$
slashdollar = re.compile(r"\\\$")
s = dollars.sub(r":math:`\1`", s)
s = slashdollar.sub(r"$", s)
# change the original {...} things in:
for r in _data:
s = s.replace(r, _data[r])
# now save results in "source"
source[:] = [s]
def setup(app):
app.connect("source-read", process_dollars)
|
gpl-2.0
|
TheNite/namebench
|
nb_third_party/dns/rdtypes/ANY/SOA.py
|
246
|
5180
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.name
class SOA(dns.rdata.Rdata):
"""SOA record
@ivar mname: the SOA MNAME (master name) field
@type mname: dns.name.Name object
@ivar rname: the SOA RNAME (responsible name) field
@type rname: dns.name.Name object
@ivar serial: The zone's serial number
@type serial: int
@ivar refresh: The zone's refresh value (in seconds)
@type refresh: int
@ivar retry: The zone's retry value (in seconds)
@type retry: int
@ivar expire: The zone's expiration value (in seconds)
@type expire: int
@ivar minimum: The zone's negative caching time (in seconds, called
"minimum" for historical reasons)
@type minimum: int
@see: RFC 1035"""
__slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
'minimum']
def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum):
super(SOA, self).__init__(rdclass, rdtype)
self.mname = mname
self.rname = rname
self.serial = serial
self.refresh = refresh
self.retry = retry
self.expire = expire
self.minimum = minimum
def to_text(self, origin=None, relativize=True, **kw):
mname = self.mname.choose_relativity(origin, relativize)
rname = self.rname.choose_relativity(origin, relativize)
return '%s %s %d %d %d %d %d' % (
mname, rname, self.serial, self.refresh, self.retry,
self.expire, self.minimum )
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
mname = tok.get_name()
rname = tok.get_name()
mname = mname.choose_relativity(origin, relativize)
rname = rname.choose_relativity(origin, relativize)
serial = tok.get_uint32()
refresh = tok.get_ttl()
retry = tok.get_ttl()
expire = tok.get_ttl()
minimum = tok.get_ttl()
tok.get_eol()
return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum )
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.mname.to_wire(file, compress, origin)
self.rname.to_wire(file, compress, origin)
five_ints = struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
file.write(five_ints)
def to_digestable(self, origin = None):
return self.mname.to_digestable(origin) + \
self.rname.to_digestable(origin) + \
struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
(rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
if rdlen != 20:
raise dns.exception.FormError
five_ints = struct.unpack('!IIIII',
wire[current : current + rdlen])
if not origin is None:
mname = mname.relativize(origin)
rname = rname.relativize(origin)
return cls(rdclass, rdtype, mname, rname,
five_ints[0], five_ints[1], five_ints[2], five_ints[3],
five_ints[4])
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.mname = self.mname.choose_relativity(origin, relativize)
self.rname = self.rname.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.mname, other.mname)
if v == 0:
v = cmp(self.rname, other.rname)
if v == 0:
self_ints = struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
other_ints = struct.pack('!IIIII', other.serial, other.refresh,
other.retry, other.expire,
other.minimum)
v = cmp(self_ints, other_ints)
return v
|
apache-2.0
|
commaai/panda
|
tests/gmlan_harness_test.py
|
1
|
1841
|
#!/usr/bin/env python3
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from panda import Panda # noqa: E402
WHITE_GMLAN_BUS = 3
OTHER_GMLAN_BUS = 1
def set_gmlan(p):
if p.is_white():
p.set_gmlan(2)
else:
p.set_obd(True)
def set_speed_kbps(p, speed):
if p.is_white():
p.set_can_speed_kbps(WHITE_GMLAN_BUS, speed)
else:
p.set_can_speed_kbps(OTHER_GMLAN_BUS, speed)
def send(p, id_, msg):
if p.is_white():
p.can_send(id_, msg, WHITE_GMLAN_BUS)
else:
p.can_send(id_, msg, OTHER_GMLAN_BUS)
if __name__ == "__main__":
pl = Panda.list()
assert(len(pl) == 2)
p0 = Panda(pl[1])
p1 = Panda(pl[0])
p0.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p1.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
print("0: ", p0.get_type())
print("1: ", p1.get_type())
set_gmlan(p0)
set_gmlan(p1)
p0.can_clear(0xFFFF)
p1.can_clear(0xFFFF)
try:
loops = 0
while True:
for speed in [33.3, 83.3]:
set_speed_kbps(p0, speed)
set_speed_kbps(p1, speed)
p0.can_clear(0xFFFF)
p1.can_clear(0xFFFF)
print(f"Speed: {speed}")
time.sleep(0.1)
print("Send 1 -> 0")
send(p1, 1, b"1to0:" + bytes(str(loops%100), "utf-8"))
time.sleep(0.05)
rx = list(filter(lambda x: x[3] < 128, p0.can_recv()))
print(rx)
assert(len(rx) == 1)
print("Send 0 -> 1")
send(p0, 1, b"0to1:" + bytes(str(loops%100), "utf-8"))
time.sleep(0.05)
rx = list(filter(lambda x: x[3] < 128, p1.can_recv()))
print(rx)
assert(len(rx) == 1)
time.sleep(0.5)
loops += 1
print(f"Completed {loops} loops")
except Exception:
print("Test failed somehow. Did you power the black panda using the GMLAN harness?")
|
mit
|
venthur/wyrm
|
test/test_variance.py
|
2
|
1975
|
from __future__ import division
import unittest
import numpy as np
from wyrm.types import Data
from wyrm.processing import variance
from wyrm.processing import swapaxes
class TestVariance(unittest.TestCase):
def setUp(self):
ones = np.ones((10, 5))
# epo with 0, 1, 2
data = np.array([0*ones, ones, 2*ones])
channels = ['ca1', 'ca2', 'cb1', 'cb2', 'cc1']
time = np.linspace(0, 1000, 10, endpoint=False)
classes = [0, 1, 2]
self.dat = Data(data, [classes, time, channels], ['class', 'time', 'channel'], ['#', 'ms', '#'])
def test_variance(self):
"""Variance."""
dat = variance(self.dat)
# test the resulting dat has one axis less (the middle one)
self.assertEqual(dat.data.shape, self.dat.data.shape[::2])
# each epoch should have a variance of zero, test if the var of
# all epochs is 0
self.assertEqual(dat.data.var(), 0)
self.assertEqual(len(dat.axes), len(self.dat.axes)-1)
def test_variance_with_cnt(self):
"""variance must work with cnt argument."""
data = self.dat.data[1]
axes = self.dat.axes[1:]
names = self.dat.names[1:]
units = self.dat.units[1:]
dat = self.dat.copy(data=data, axes=axes, names=names, units=units)
dat = variance(dat)
self.assertEqual(dat.data.var(), 0)
self.assertEqual(len(dat.axes), len(self.dat.axes)-2)
def test_variance_swapaxes(self):
"""variance must work with nonstandard timeaxis."""
dat = variance(swapaxes(self.dat, 1, 2), timeaxis=2)
# we don't swap back here as variance removes the timeaxis
dat2 = variance(self.dat)
self.assertEqual(dat, dat2)
def test_variance_copy(self):
"""variance must not modify argument."""
cpy = self.dat.copy()
variance(self.dat)
self.assertEqual(self.dat, cpy)
if __name__ == '__main__':
unittest.main()
|
mit
|
jerem/django-rcsfield
|
rcsfield/backends/base.py
|
4
|
2535
|
"""
Base-backends for django-rcsfield.
Used to hold common functionality of all backends.
Every backend module implementd a very simple API.
Three functions are exported:
* fetch(key, revision): knows how to fetch a specific revision of the entity
referenced by ``key``
* commit(key, data): knows how to commit changed ``data`` to the entity
referenced by ``key``
* initial(): does optional setup needed for the backend to work. called on
``post_syncdb`` signal.
* get_revisions(key): returns a list of revisions in which the entity
identifed by ``key`` was changed.
* move(key_from, key_to): knows how to move an entity from ``key_from``
to ``key_to`` while keeping the history. this method is optional.
* diff(key1, rev1, key2, rev2): returns a unified diff of the contents
of ``key1``@``rev1`` against ``key2``@``rev2``.
"""
import difflib
class NoSuchRevision(Exception):
pass
class BaseBackend(object):
"""
Base-class for all rcsfield backends.
"""
def initial(self):
"""
called on ``post_syncdb`` can do some initial setup needed for
the backend to work correctly.
"""
pass
def commit(self, key, data):
"""
versionize a change of ``key`` with new ``data``.
"""
raise NotImplementedError
def fetch(self, key, rev):
"""
fetched the data of ``key`` for revision ``rev``.
"""
raise NotImplementedError
def get_revisions(self, key):
"""
return a list of all revisions in which ``key`` changed
"""
raise NotImplementedError
def move(self, key_from, key_to):
"""
Moves an entity from ``key_from`` to ``key_to`` while keeping
the history. This is useful to migrate a repository after the
``rcskey_format`` of a ``RcsTextField`` was changed.
"""
raise NotImplementedError
def diff(self, key1, rev1, key2, rev2):
"""
Returns a textual unified diff of two entities at specified revisions.
Takes two parameters for keyname to support diffing renamed files.
"""
c1 = self.fetch(key1, rev1)
c2 = self.fetch(key2, rev2)
diff = difflib.unified_diff(c1.splitlines(1),
c2.splitlines(1),
'Revision: %s' % rev1,
'Revision: %s' % rev2
)
return diff
|
bsd-3-clause
|
ygenc/onlineLDA
|
onlineldavb_new/build/scipy/build/lib.macosx-10.6-intel-2.7/scipy/sparse/sparsetools/csc.py
|
4
|
19953
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.1+capsulehack
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
# This file is compatible with both classic and new-style classes.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_csc', [dirname(__file__)])
except ImportError:
import _csc
return _csc
if fp is not None:
try:
_mod = imp.load_module('_csc', fp, pathname, description)
finally:
fp.close()
return _mod
_csc = swig_import_helper()
del swig_import_helper
else:
import _csc
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def csc_matmat_pass1(*args):
"""
csc_matmat_pass1(int n_row, int n_col, int Ap, int Ai, int Bp, int Bi,
int Cp)
"""
return _csc.csc_matmat_pass1(*args)
def csc_diagonal(*args):
"""
csc_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax,
signed char Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
unsigned char Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
unsigned short Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
unsigned int Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax,
long long Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
unsigned long long Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax,
long double Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
npy_cfloat_wrapper Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
npy_cdouble_wrapper Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
npy_clongdouble_wrapper Yx)
"""
return _csc.csc_diagonal(*args)
def csc_tocsr(*args):
"""
csc_tocsr(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bj, signed char Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bj, unsigned char Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bj, short Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bj, unsigned short Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bj, int Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bj, unsigned int Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bj, long long Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bj, unsigned long long Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bj, float Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bj, double Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bj, long double Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bj, npy_cfloat_wrapper Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bj, npy_cdouble_wrapper Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bj, npy_clongdouble_wrapper Bx)
"""
return _csc.csc_tocsr(*args)
def csc_matmat_pass2(*args):
"""
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_matmat_pass2(*args)
def csc_matvec(*args):
"""
csc_matvec(int n_row, int n_col, int Ap, int Ai, signed char Ax,
signed char Xx, signed char Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
unsigned char Xx, unsigned char Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, short Ax, short Xx,
short Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
unsigned short Xx, unsigned short Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, int Ax, int Xx,
int Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
unsigned int Xx, unsigned int Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, long long Ax,
long long Xx, long long Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
unsigned long long Xx, unsigned long long Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, float Ax, float Xx,
float Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, double Ax, double Xx,
double Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, long double Ax,
long double Xx, long double Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)
"""
return _csc.csc_matvec(*args)
def csc_matvecs(*args):
"""
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, signed char Ax,
signed char Xx, signed char Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned char Ax,
unsigned char Xx, unsigned char Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, short Ax,
short Xx, short Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned short Ax,
unsigned short Xx, unsigned short Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, int Ax,
int Xx, int Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned int Ax,
unsigned int Xx, unsigned int Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, long long Ax,
long long Xx, long long Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned long long Ax,
unsigned long long Xx,
unsigned long long Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, float Ax,
float Xx, float Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, double Ax,
double Xx, double Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, long double Ax,
long double Xx, long double Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_cfloat_wrapper Ax,
npy_cfloat_wrapper Xx,
npy_cfloat_wrapper Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_cdouble_wrapper Ax,
npy_cdouble_wrapper Xx,
npy_cdouble_wrapper Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_clongdouble_wrapper Ax,
npy_clongdouble_wrapper Xx,
npy_clongdouble_wrapper Yx)
"""
return _csc.csc_matvecs(*args)
def csc_elmul_csc(*args):
"""
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_elmul_csc(*args)
def csc_eldiv_csc(*args):
"""
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_eldiv_csc(*args)
def csc_plus_csc(*args):
"""
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_plus_csc(*args)
def csc_minus_csc(*args):
"""
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_minus_csc(*args)
|
gpl-3.0
|
benjaminjkraft/django
|
django/conf/locale/ka/formats.py
|
504
|
2180
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i a'
DATETIME_FORMAT = 'j F, Y h:i a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i'
FIRST_DAY_OF_WEEK = 1 # (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
|
bsd-3-clause
|
LeoZ123/Machine-Learning-Practice
|
Neural_Network/NeuralNetwork.py
|
1
|
3201
|
'''
Created on Mar 28, 2017
@author: Leo Zhong
'''
import numpy as np # for metrics calculation
#define functions
def tanh(x):
return np.tanh(x)
def tanh_deriv(x): #derivative for tanh
return 1.0 - np.tanh(x)*np.tanh(x)
def logistic(x):
return 1/(1 + np.exp(-x))
def logistic_derivative(x): #derivative for logistic function
return logistic(x)*(1-logistic(x))
class NeuralNetwork: #object oriented
# __init__ : constructor
# self = this in java
# layers: # of unit in each level
# activation: choose function => default is tanh
def __init__(self, layers, activation='tanh'):
# set function
if activation == 'logistic':
self.activation = logistic
self.activation_deriv = logistic_derivative
elif activation == 'tanh':
self.activation = tanh
self.activation_deriv = tanh_deriv
self.weights = [] # store weight
#initialize weight
for i in range(1, len(layers) - 1):
self.weights.append((2*np.random.random((layers[i - 1] + 1, layers[i] + 1))-1)*0.25)
self.weights.append((2*np.random.random((layers[i] + 1, layers[i + 1]))-1)*0.25)
def fit(self, X, y, learning_rate=0.2, epochs=10000):
# X: object y: class lable learning_rate: 0 ~ 1
X = np.atleast_2d(X) # ensure at least 2D
temp = np.ones([X.shape[0], X.shape[1]+1]) # initialize metrics
# X.shape[0]: row, X.shape[1]: col
temp[:, 0:-1] = X # adding the bias unit to the input layer
X = temp
y = np.array(y) # to np array
for k in range(epochs):
i = np.random.randint(X.shape[0])
a = [X[i]]
for l in range(len(self.weights)): #going forward network, for each layer
a.append(self.activation(np.dot(a[l], self.weights[l]))) #Computer the node value for each layer (O_i) using activation function
error = y[i] - a[-1] #Computer the error at the top layer
deltas = [error * self.activation_deriv(a[-1])] #For output layer, Err calculation (delta is updated error)
#Staring backprobagation
for l in range(len(a) - 2, 0, -1): # we need to begin at the second to last layer
#Compute the updated error (i,e, deltas) for each node going from top layer to input layer
deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))
deltas.reverse()
for i in range(len(self.weights)):
layer = np.atleast_2d(a[i])
delta = np.atleast_2d(deltas[i])
self.weights[i] += learning_rate * layer.T.dot(delta)
def predict(self, x):
x = np.array(x)
temp = np.ones(x.shape[0]+1)
temp[0:-1] = x
a = temp
for l in range(0, len(self.weights)):
a = self.activation(np.dot(a, self.weights[l]))
return a
|
mit
|
mchristopher/PokemonGo-DesktopMap
|
app/pywin/Lib/lib2to3/fixes/fix_filter.py
|
326
|
2107
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name(u"_f"),
Name(u"_f"),
results["seq"].clone(),
Name(u"_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
|
mit
|
ShinyROM/android_external_chromium_org
|
third_party/protobuf/python/google/protobuf/internal/test_util.py
|
210
|
27885
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for Python proto2 tests.
This is intentionally modeled on C++ code in
//google/protobuf/test_util.*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import os.path
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
def SetAllNonLazyFields(message):
"""Sets every non-lazy field in the message to a unique value.
Args:
message: A unittest_pb2.TestAllTypes instance.
"""
#
# Optional fields.
#
message.optional_int32 = 101
message.optional_int64 = 102
message.optional_uint32 = 103
message.optional_uint64 = 104
message.optional_sint32 = 105
message.optional_sint64 = 106
message.optional_fixed32 = 107
message.optional_fixed64 = 108
message.optional_sfixed32 = 109
message.optional_sfixed64 = 110
message.optional_float = 111
message.optional_double = 112
message.optional_bool = True
# TODO(robinson): Firmly spec out and test how
# protos interact with unicode. One specific example:
# what happens if we change the literal below to
# u'115'? What *should* happen? Still some discussion
# to finish with Kenton about bytes vs. strings
# and forcing everything to be utf8. :-/
message.optional_string = '115'
message.optional_bytes = '116'
message.optionalgroup.a = 117
message.optional_nested_message.bb = 118
message.optional_foreign_message.c = 119
message.optional_import_message.d = 120
message.optional_public_import_message.e = 126
message.optional_nested_enum = unittest_pb2.TestAllTypes.BAZ
message.optional_foreign_enum = unittest_pb2.FOREIGN_BAZ
message.optional_import_enum = unittest_import_pb2.IMPORT_BAZ
message.optional_string_piece = '124'
message.optional_cord = '125'
#
# Repeated fields.
#
message.repeated_int32.append(201)
message.repeated_int64.append(202)
message.repeated_uint32.append(203)
message.repeated_uint64.append(204)
message.repeated_sint32.append(205)
message.repeated_sint64.append(206)
message.repeated_fixed32.append(207)
message.repeated_fixed64.append(208)
message.repeated_sfixed32.append(209)
message.repeated_sfixed64.append(210)
message.repeated_float.append(211)
message.repeated_double.append(212)
message.repeated_bool.append(True)
message.repeated_string.append('215')
message.repeated_bytes.append('216')
message.repeatedgroup.add().a = 217
message.repeated_nested_message.add().bb = 218
message.repeated_foreign_message.add().c = 219
message.repeated_import_message.add().d = 220
message.repeated_lazy_message.add().bb = 227
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAR)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAR)
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAR)
message.repeated_string_piece.append('224')
message.repeated_cord.append('225')
# Add a second one of each field.
message.repeated_int32.append(301)
message.repeated_int64.append(302)
message.repeated_uint32.append(303)
message.repeated_uint64.append(304)
message.repeated_sint32.append(305)
message.repeated_sint64.append(306)
message.repeated_fixed32.append(307)
message.repeated_fixed64.append(308)
message.repeated_sfixed32.append(309)
message.repeated_sfixed64.append(310)
message.repeated_float.append(311)
message.repeated_double.append(312)
message.repeated_bool.append(False)
message.repeated_string.append('315')
message.repeated_bytes.append('316')
message.repeatedgroup.add().a = 317
message.repeated_nested_message.add().bb = 318
message.repeated_foreign_message.add().c = 319
message.repeated_import_message.add().d = 320
message.repeated_lazy_message.add().bb = 327
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAZ)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAZ)
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAZ)
message.repeated_string_piece.append('324')
message.repeated_cord.append('325')
#
# Fields that have defaults.
#
message.default_int32 = 401
message.default_int64 = 402
message.default_uint32 = 403
message.default_uint64 = 404
message.default_sint32 = 405
message.default_sint64 = 406
message.default_fixed32 = 407
message.default_fixed64 = 408
message.default_sfixed32 = 409
message.default_sfixed64 = 410
message.default_float = 411
message.default_double = 412
message.default_bool = False
message.default_string = '415'
message.default_bytes = '416'
message.default_nested_enum = unittest_pb2.TestAllTypes.FOO
message.default_foreign_enum = unittest_pb2.FOREIGN_FOO
message.default_import_enum = unittest_import_pb2.IMPORT_FOO
message.default_string_piece = '424'
message.default_cord = '425'
def SetAllFields(message):
SetAllNonLazyFields(message)
message.optional_lazy_message.bb = 127
def SetAllExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
import_pb2 = unittest_import_pb2
#
# Optional fields.
#
extensions[pb2.optional_int32_extension] = 101
extensions[pb2.optional_int64_extension] = 102
extensions[pb2.optional_uint32_extension] = 103
extensions[pb2.optional_uint64_extension] = 104
extensions[pb2.optional_sint32_extension] = 105
extensions[pb2.optional_sint64_extension] = 106
extensions[pb2.optional_fixed32_extension] = 107
extensions[pb2.optional_fixed64_extension] = 108
extensions[pb2.optional_sfixed32_extension] = 109
extensions[pb2.optional_sfixed64_extension] = 110
extensions[pb2.optional_float_extension] = 111
extensions[pb2.optional_double_extension] = 112
extensions[pb2.optional_bool_extension] = True
extensions[pb2.optional_string_extension] = '115'
extensions[pb2.optional_bytes_extension] = '116'
extensions[pb2.optionalgroup_extension].a = 117
extensions[pb2.optional_nested_message_extension].bb = 118
extensions[pb2.optional_foreign_message_extension].c = 119
extensions[pb2.optional_import_message_extension].d = 120
extensions[pb2.optional_public_import_message_extension].e = 126
extensions[pb2.optional_lazy_message_extension].bb = 127
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_foreign_enum_extension] = pb2.FOREIGN_BAZ
extensions[pb2.optional_import_enum_extension] = import_pb2.IMPORT_BAZ
extensions[pb2.optional_string_piece_extension] = '124'
extensions[pb2.optional_cord_extension] = '125'
#
# Repeated fields.
#
extensions[pb2.repeated_int32_extension].append(201)
extensions[pb2.repeated_int64_extension].append(202)
extensions[pb2.repeated_uint32_extension].append(203)
extensions[pb2.repeated_uint64_extension].append(204)
extensions[pb2.repeated_sint32_extension].append(205)
extensions[pb2.repeated_sint64_extension].append(206)
extensions[pb2.repeated_fixed32_extension].append(207)
extensions[pb2.repeated_fixed64_extension].append(208)
extensions[pb2.repeated_sfixed32_extension].append(209)
extensions[pb2.repeated_sfixed64_extension].append(210)
extensions[pb2.repeated_float_extension].append(211)
extensions[pb2.repeated_double_extension].append(212)
extensions[pb2.repeated_bool_extension].append(True)
extensions[pb2.repeated_string_extension].append('215')
extensions[pb2.repeated_bytes_extension].append('216')
extensions[pb2.repeatedgroup_extension].add().a = 217
extensions[pb2.repeated_nested_message_extension].add().bb = 218
extensions[pb2.repeated_foreign_message_extension].add().c = 219
extensions[pb2.repeated_import_message_extension].add().d = 220
extensions[pb2.repeated_lazy_message_extension].add().bb = 227
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAR)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAR)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAR)
extensions[pb2.repeated_string_piece_extension].append('224')
extensions[pb2.repeated_cord_extension].append('225')
# Append a second one of each field.
extensions[pb2.repeated_int32_extension].append(301)
extensions[pb2.repeated_int64_extension].append(302)
extensions[pb2.repeated_uint32_extension].append(303)
extensions[pb2.repeated_uint64_extension].append(304)
extensions[pb2.repeated_sint32_extension].append(305)
extensions[pb2.repeated_sint64_extension].append(306)
extensions[pb2.repeated_fixed32_extension].append(307)
extensions[pb2.repeated_fixed64_extension].append(308)
extensions[pb2.repeated_sfixed32_extension].append(309)
extensions[pb2.repeated_sfixed64_extension].append(310)
extensions[pb2.repeated_float_extension].append(311)
extensions[pb2.repeated_double_extension].append(312)
extensions[pb2.repeated_bool_extension].append(False)
extensions[pb2.repeated_string_extension].append('315')
extensions[pb2.repeated_bytes_extension].append('316')
extensions[pb2.repeatedgroup_extension].add().a = 317
extensions[pb2.repeated_nested_message_extension].add().bb = 318
extensions[pb2.repeated_foreign_message_extension].add().c = 319
extensions[pb2.repeated_import_message_extension].add().d = 320
extensions[pb2.repeated_lazy_message_extension].add().bb = 327
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAZ)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAZ)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAZ)
extensions[pb2.repeated_string_piece_extension].append('324')
extensions[pb2.repeated_cord_extension].append('325')
#
# Fields with defaults.
#
extensions[pb2.default_int32_extension] = 401
extensions[pb2.default_int64_extension] = 402
extensions[pb2.default_uint32_extension] = 403
extensions[pb2.default_uint64_extension] = 404
extensions[pb2.default_sint32_extension] = 405
extensions[pb2.default_sint64_extension] = 406
extensions[pb2.default_fixed32_extension] = 407
extensions[pb2.default_fixed64_extension] = 408
extensions[pb2.default_sfixed32_extension] = 409
extensions[pb2.default_sfixed64_extension] = 410
extensions[pb2.default_float_extension] = 411
extensions[pb2.default_double_extension] = 412
extensions[pb2.default_bool_extension] = False
extensions[pb2.default_string_extension] = '415'
extensions[pb2.default_bytes_extension] = '416'
extensions[pb2.default_nested_enum_extension] = pb2.TestAllTypes.FOO
extensions[pb2.default_foreign_enum_extension] = pb2.FOREIGN_FOO
extensions[pb2.default_import_enum_extension] = import_pb2.IMPORT_FOO
extensions[pb2.default_string_piece_extension] = '424'
extensions[pb2.default_cord_extension] = '425'
def SetAllFieldsAndExtensions(message):
"""Sets every field and extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions message.
"""
message.my_int = 1
message.my_string = 'foo'
message.my_float = 1.0
message.Extensions[unittest_pb2.my_extension_int] = 23
message.Extensions[unittest_pb2.my_extension_string] = 'bar'
def ExpectAllFieldsAndExtensionsInOrder(serialized):
"""Ensures that serialized is the serialization we expect for a message
filled with SetAllFieldsAndExtensions(). (Specifically, ensures that the
serialization is in canonical, tag-number order).
"""
my_extension_int = unittest_pb2.my_extension_int
my_extension_string = unittest_pb2.my_extension_string
expected_strings = []
message = unittest_pb2.TestFieldOrderings()
message.my_int = 1 # Field 1.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_int] = 23 # Field 5.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_string = 'foo' # Field 11.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_string] = 'bar' # Field 50.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_float = 1.0
expected_strings.append(message.SerializeToString())
message.Clear()
expected = ''.join(expected_strings)
if expected != serialized:
raise ValueError('Expected %r, found %r' % (expected, serialized))
def ExpectAllFieldsSet(test_case, message):
"""Check all fields for correct values have after Set*Fields() is called."""
test_case.assertTrue(message.HasField('optional_int32'))
test_case.assertTrue(message.HasField('optional_int64'))
test_case.assertTrue(message.HasField('optional_uint32'))
test_case.assertTrue(message.HasField('optional_uint64'))
test_case.assertTrue(message.HasField('optional_sint32'))
test_case.assertTrue(message.HasField('optional_sint64'))
test_case.assertTrue(message.HasField('optional_fixed32'))
test_case.assertTrue(message.HasField('optional_fixed64'))
test_case.assertTrue(message.HasField('optional_sfixed32'))
test_case.assertTrue(message.HasField('optional_sfixed64'))
test_case.assertTrue(message.HasField('optional_float'))
test_case.assertTrue(message.HasField('optional_double'))
test_case.assertTrue(message.HasField('optional_bool'))
test_case.assertTrue(message.HasField('optional_string'))
test_case.assertTrue(message.HasField('optional_bytes'))
test_case.assertTrue(message.HasField('optionalgroup'))
test_case.assertTrue(message.HasField('optional_nested_message'))
test_case.assertTrue(message.HasField('optional_foreign_message'))
test_case.assertTrue(message.HasField('optional_import_message'))
test_case.assertTrue(message.optionalgroup.HasField('a'))
test_case.assertTrue(message.optional_nested_message.HasField('bb'))
test_case.assertTrue(message.optional_foreign_message.HasField('c'))
test_case.assertTrue(message.optional_import_message.HasField('d'))
test_case.assertTrue(message.HasField('optional_nested_enum'))
test_case.assertTrue(message.HasField('optional_foreign_enum'))
test_case.assertTrue(message.HasField('optional_import_enum'))
test_case.assertTrue(message.HasField('optional_string_piece'))
test_case.assertTrue(message.HasField('optional_cord'))
test_case.assertEqual(101, message.optional_int32)
test_case.assertEqual(102, message.optional_int64)
test_case.assertEqual(103, message.optional_uint32)
test_case.assertEqual(104, message.optional_uint64)
test_case.assertEqual(105, message.optional_sint32)
test_case.assertEqual(106, message.optional_sint64)
test_case.assertEqual(107, message.optional_fixed32)
test_case.assertEqual(108, message.optional_fixed64)
test_case.assertEqual(109, message.optional_sfixed32)
test_case.assertEqual(110, message.optional_sfixed64)
test_case.assertEqual(111, message.optional_float)
test_case.assertEqual(112, message.optional_double)
test_case.assertEqual(True, message.optional_bool)
test_case.assertEqual('115', message.optional_string)
test_case.assertEqual('116', message.optional_bytes)
test_case.assertEqual(117, message.optionalgroup.a)
test_case.assertEqual(118, message.optional_nested_message.bb)
test_case.assertEqual(119, message.optional_foreign_message.c)
test_case.assertEqual(120, message.optional_import_message.d)
test_case.assertEqual(126, message.optional_public_import_message.e)
test_case.assertEqual(127, message.optional_lazy_message.bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
test_case.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.optional_foreign_enum)
test_case.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.optional_import_enum)
# -----------------------------------------------------------------
test_case.assertEqual(2, len(message.repeated_int32))
test_case.assertEqual(2, len(message.repeated_int64))
test_case.assertEqual(2, len(message.repeated_uint32))
test_case.assertEqual(2, len(message.repeated_uint64))
test_case.assertEqual(2, len(message.repeated_sint32))
test_case.assertEqual(2, len(message.repeated_sint64))
test_case.assertEqual(2, len(message.repeated_fixed32))
test_case.assertEqual(2, len(message.repeated_fixed64))
test_case.assertEqual(2, len(message.repeated_sfixed32))
test_case.assertEqual(2, len(message.repeated_sfixed64))
test_case.assertEqual(2, len(message.repeated_float))
test_case.assertEqual(2, len(message.repeated_double))
test_case.assertEqual(2, len(message.repeated_bool))
test_case.assertEqual(2, len(message.repeated_string))
test_case.assertEqual(2, len(message.repeated_bytes))
test_case.assertEqual(2, len(message.repeatedgroup))
test_case.assertEqual(2, len(message.repeated_nested_message))
test_case.assertEqual(2, len(message.repeated_foreign_message))
test_case.assertEqual(2, len(message.repeated_import_message))
test_case.assertEqual(2, len(message.repeated_nested_enum))
test_case.assertEqual(2, len(message.repeated_foreign_enum))
test_case.assertEqual(2, len(message.repeated_import_enum))
test_case.assertEqual(2, len(message.repeated_string_piece))
test_case.assertEqual(2, len(message.repeated_cord))
test_case.assertEqual(201, message.repeated_int32[0])
test_case.assertEqual(202, message.repeated_int64[0])
test_case.assertEqual(203, message.repeated_uint32[0])
test_case.assertEqual(204, message.repeated_uint64[0])
test_case.assertEqual(205, message.repeated_sint32[0])
test_case.assertEqual(206, message.repeated_sint64[0])
test_case.assertEqual(207, message.repeated_fixed32[0])
test_case.assertEqual(208, message.repeated_fixed64[0])
test_case.assertEqual(209, message.repeated_sfixed32[0])
test_case.assertEqual(210, message.repeated_sfixed64[0])
test_case.assertEqual(211, message.repeated_float[0])
test_case.assertEqual(212, message.repeated_double[0])
test_case.assertEqual(True, message.repeated_bool[0])
test_case.assertEqual('215', message.repeated_string[0])
test_case.assertEqual('216', message.repeated_bytes[0])
test_case.assertEqual(217, message.repeatedgroup[0].a)
test_case.assertEqual(218, message.repeated_nested_message[0].bb)
test_case.assertEqual(219, message.repeated_foreign_message[0].c)
test_case.assertEqual(220, message.repeated_import_message[0].d)
test_case.assertEqual(227, message.repeated_lazy_message[0].bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAR,
message.repeated_nested_enum[0])
test_case.assertEqual(unittest_pb2.FOREIGN_BAR,
message.repeated_foreign_enum[0])
test_case.assertEqual(unittest_import_pb2.IMPORT_BAR,
message.repeated_import_enum[0])
test_case.assertEqual(301, message.repeated_int32[1])
test_case.assertEqual(302, message.repeated_int64[1])
test_case.assertEqual(303, message.repeated_uint32[1])
test_case.assertEqual(304, message.repeated_uint64[1])
test_case.assertEqual(305, message.repeated_sint32[1])
test_case.assertEqual(306, message.repeated_sint64[1])
test_case.assertEqual(307, message.repeated_fixed32[1])
test_case.assertEqual(308, message.repeated_fixed64[1])
test_case.assertEqual(309, message.repeated_sfixed32[1])
test_case.assertEqual(310, message.repeated_sfixed64[1])
test_case.assertEqual(311, message.repeated_float[1])
test_case.assertEqual(312, message.repeated_double[1])
test_case.assertEqual(False, message.repeated_bool[1])
test_case.assertEqual('315', message.repeated_string[1])
test_case.assertEqual('316', message.repeated_bytes[1])
test_case.assertEqual(317, message.repeatedgroup[1].a)
test_case.assertEqual(318, message.repeated_nested_message[1].bb)
test_case.assertEqual(319, message.repeated_foreign_message[1].c)
test_case.assertEqual(320, message.repeated_import_message[1].d)
test_case.assertEqual(327, message.repeated_lazy_message[1].bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.repeated_nested_enum[1])
test_case.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.repeated_foreign_enum[1])
test_case.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.repeated_import_enum[1])
# -----------------------------------------------------------------
test_case.assertTrue(message.HasField('default_int32'))
test_case.assertTrue(message.HasField('default_int64'))
test_case.assertTrue(message.HasField('default_uint32'))
test_case.assertTrue(message.HasField('default_uint64'))
test_case.assertTrue(message.HasField('default_sint32'))
test_case.assertTrue(message.HasField('default_sint64'))
test_case.assertTrue(message.HasField('default_fixed32'))
test_case.assertTrue(message.HasField('default_fixed64'))
test_case.assertTrue(message.HasField('default_sfixed32'))
test_case.assertTrue(message.HasField('default_sfixed64'))
test_case.assertTrue(message.HasField('default_float'))
test_case.assertTrue(message.HasField('default_double'))
test_case.assertTrue(message.HasField('default_bool'))
test_case.assertTrue(message.HasField('default_string'))
test_case.assertTrue(message.HasField('default_bytes'))
test_case.assertTrue(message.HasField('default_nested_enum'))
test_case.assertTrue(message.HasField('default_foreign_enum'))
test_case.assertTrue(message.HasField('default_import_enum'))
test_case.assertEqual(401, message.default_int32)
test_case.assertEqual(402, message.default_int64)
test_case.assertEqual(403, message.default_uint32)
test_case.assertEqual(404, message.default_uint64)
test_case.assertEqual(405, message.default_sint32)
test_case.assertEqual(406, message.default_sint64)
test_case.assertEqual(407, message.default_fixed32)
test_case.assertEqual(408, message.default_fixed64)
test_case.assertEqual(409, message.default_sfixed32)
test_case.assertEqual(410, message.default_sfixed64)
test_case.assertEqual(411, message.default_float)
test_case.assertEqual(412, message.default_double)
test_case.assertEqual(False, message.default_bool)
test_case.assertEqual('415', message.default_string)
test_case.assertEqual('416', message.default_bytes)
test_case.assertEqual(unittest_pb2.TestAllTypes.FOO,
message.default_nested_enum)
test_case.assertEqual(unittest_pb2.FOREIGN_FOO,
message.default_foreign_enum)
test_case.assertEqual(unittest_import_pb2.IMPORT_FOO,
message.default_import_enum)
def GoldenFile(filename):
"""Finds the given golden file and returns a file object representing it."""
# Search up the directory tree looking for the C++ protobuf source code.
path = '.'
while os.path.exists(path):
if os.path.exists(os.path.join(path, 'src/google/protobuf')):
# Found it. Load the golden file from the testdata directory.
full_path = os.path.join(path, 'src/google/protobuf/testdata', filename)
return open(full_path, 'rb')
path = os.path.join(path, '..')
raise RuntimeError(
'Could not find golden files. This test must be run from within the '
'protobuf source package so that it can read test data files from the '
'C++ source tree.')
def SetAllPackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedTypes instance.
"""
message.packed_int32.extend([601, 701])
message.packed_int64.extend([602, 702])
message.packed_uint32.extend([603, 703])
message.packed_uint64.extend([604, 704])
message.packed_sint32.extend([605, 705])
message.packed_sint64.extend([606, 706])
message.packed_fixed32.extend([607, 707])
message.packed_fixed64.extend([608, 708])
message.packed_sfixed32.extend([609, 709])
message.packed_sfixed64.extend([610, 710])
message.packed_float.extend([611.0, 711.0])
message.packed_double.extend([612.0, 712.0])
message.packed_bool.extend([True, False])
message.packed_enum.extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
def SetAllPackedExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
extensions[pb2.packed_int32_extension].extend([601, 701])
extensions[pb2.packed_int64_extension].extend([602, 702])
extensions[pb2.packed_uint32_extension].extend([603, 703])
extensions[pb2.packed_uint64_extension].extend([604, 704])
extensions[pb2.packed_sint32_extension].extend([605, 705])
extensions[pb2.packed_sint64_extension].extend([606, 706])
extensions[pb2.packed_fixed32_extension].extend([607, 707])
extensions[pb2.packed_fixed64_extension].extend([608, 708])
extensions[pb2.packed_sfixed32_extension].extend([609, 709])
extensions[pb2.packed_sfixed64_extension].extend([610, 710])
extensions[pb2.packed_float_extension].extend([611.0, 711.0])
extensions[pb2.packed_double_extension].extend([612.0, 712.0])
extensions[pb2.packed_bool_extension].extend([True, False])
extensions[pb2.packed_enum_extension].extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
def SetAllUnpackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestUnpackedTypes instance.
"""
message.unpacked_int32.extend([601, 701])
message.unpacked_int64.extend([602, 702])
message.unpacked_uint32.extend([603, 703])
message.unpacked_uint64.extend([604, 704])
message.unpacked_sint32.extend([605, 705])
message.unpacked_sint64.extend([606, 706])
message.unpacked_fixed32.extend([607, 707])
message.unpacked_fixed64.extend([608, 708])
message.unpacked_sfixed32.extend([609, 709])
message.unpacked_sfixed64.extend([610, 710])
message.unpacked_float.extend([611.0, 711.0])
message.unpacked_double.extend([612.0, 712.0])
message.unpacked_bool.extend([True, False])
message.unpacked_enum.extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
|
bsd-3-clause
|
olologin/scikit-learn
|
sklearn/gaussian_process/tests/test_kernels.py
|
24
|
11602
|
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
|
bsd-3-clause
|
meln1k/marathon
|
tests/system/test_marathon_universe.py
|
2
|
3834
|
"""Marathon acceptance tests for DC/OS."""
import common
import pytest
import retrying
import shakedown
from datetime import timedelta
from dcos import packagemanager, cosmos
PACKAGE_NAME = 'marathon'
SERVICE_NAME = 'marathon-user'
DCOS_SERVICE_URL = shakedown.dcos_service_url(PACKAGE_NAME)
WAIT_TIME_IN_SECS = 300
def teardown_function(function):
uninstall('test-marathon')
def setup_module(module):
uninstall(SERVICE_NAME)
common.cluster_info()
def teardown_module(module):
uninstall(SERVICE_NAME)
@pytest.mark.skipif("shakedown.ee_version() == 'strict'", reason="MoM doesn't work on a strict cluster")
def test_install_marathon():
"""Install the Marathon package for DC/OS.
"""
# Install
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def install_marathon():
shakedown.install_package_and_wait(PACKAGE_NAME)
install_marathon()
assert shakedown.package_installed(PACKAGE_NAME), 'Package failed to install'
# 5000ms = 5 seconds, 5 seconds * 60 attempts = 300 seconds = WAIT_TIME_IN_SECS
@retrying.retry(wait_fixed=5000, stop_max_attempt_number=60, retry_on_exception=common.ignore_exception)
def assert_service_registration(package, service):
found = shakedown.get_service(package) is not None
assert found and shakedown.service_healthy(service), f"Service {package} did not register with DCOS" # NOQA E999
assert_service_registration(PACKAGE_NAME, SERVICE_NAME)
shakedown.deployment_wait()
# Uninstall
uninstall('marathon-user')
shakedown.deployment_wait()
# Reinstall
shakedown.install_package_and_wait(PACKAGE_NAME)
assert shakedown.package_installed(PACKAGE_NAME), 'Package failed to reinstall'
@pytest.mark.skipif("shakedown.ee_version() == 'strict'", reason="MoM doesn't work on a strict cluster")
def test_custom_service_name():
""" Install MoM with a custom service name.
"""
cosmos_pm = packagemanager.PackageManager(cosmos.get_cosmos_url())
cosmos_pm.get_package_version('marathon', None)
options = {
'service': {'name': "test-marathon"}
}
shakedown.install_package('marathon', options_json=options)
shakedown.deployment_wait()
assert shakedown.wait_for_service_endpoint('test-marathon')
@pytest.fixture(
params=[
pytest.mark.skipif("shakedown.required_private_agents(4) or shakedown.ee_version() == 'strict'")('cassandra')
])
def package(request):
package_name = request.param
yield package_name
try:
shakedown.uninstall_package_and_wait(package_name)
shakedown.delete_persistent_data(
'{}-role'.format(package_name),
'dcos-service-{}'.format(package_name))
except Exception as e:
# cleanup does NOT fail the test
print(e)
def test_install_universe_package(package):
""" Marathon is responsible for installing packages from the universe.
This test confirms that several packages are installed into a healty state.
"""
shakedown.install_package_and_wait(package)
assert shakedown.package_installed(package), 'Package failed to install'
shakedown.deployment_wait(timeout=timedelta(minutes=5).total_seconds())
assert shakedown.service_healthy(package)
def uninstall(service, package=PACKAGE_NAME):
try:
task = shakedown.get_service_task(package, service)
if task is not None:
cosmos_pm = packagemanager.PackageManager(cosmos.get_cosmos_url())
cosmos_pm.uninstall_app(package, True, service)
shakedown.deployment_wait()
assert shakedown.wait_for_service_endpoint_removal('test-marathon')
shakedown.delete_zk_node('/universe/{}'.format(service))
except Exception:
pass
|
apache-2.0
|
jhawkesworth/ansible
|
lib/ansible/modules/network/sros/sros_config.py
|
24
|
11717
|
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
---
module: sros_config
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Manage Nokia SR OS device configuration
description:
- Nokia SR OS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with SR OS configuration sections in
a deterministic way.
extends_documentation_fragment: sros
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser. The I(lines) argument only supports current
context lines. See EXAMPLES
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines), I(parents).
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line.
If match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
type: bool
version_added: "2.2"
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. f the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
version_added: "2.2"
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
version_added: "2.2"
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(show running-config all).
type: bool
default: 'no'
aliases: ['detail']
version_added: "2.2"
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
type: bool
default: 'no'
version_added: "2.2"
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
---
- name: enable rollback location
sros_config:
lines: configure system rollback rollback-location "cf3:/ansible"
provider: "{{ cli }}"
- name: set system name to {{ inventory_hostname }} using one line
sros_config:
lines:
- configure system name "{{ inventory_hostname }}"
provider: "{{ cli }}"
- name: set system name to {{ inventory_hostname }} using parents
sros_config:
lines:
- 'name "{{ inventory_hostname }}"'
parents:
- configure
- system
provider: "{{ cli }}"
backup: yes
- name: load config from file
sros_config:
src: "{{ inventory_hostname }}.cfg"
provider: "{{ cli }}"
save: yes
- name: invalid use of lines
sros_config:
lines:
- service
- vpls 1000 customer foo 1 create
- description "invalid lines example"
provider: "{{ cli }}"
- name: valid use of lines
sros_config:
lines:
- description "invalid lines example"
parents:
- service
- vpls 1000 customer foo 1 create
provider: "{{ cli }}"
- name: configurable backup path
sros_config:
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['config system name "sros01"']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['config system name "sros01"']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/sros_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.sros.sros import sros_argument_spec, check_args
from ansible.module_utils.network.sros.sros import load_config, run_commands, get_config
def get_active_config(module):
contents = module.params['config']
if not contents:
flags = []
if module.params['defaults']:
flags = ['detail']
return get_config(module, flags)
return contents
def get_candidate(module):
candidate = NetworkConfig(indent=4)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
candidate = get_candidate(module)
if match != 'none':
config_text = get_active_config(module)
config = NetworkConfig(indent=4, contents=config_text)
configobjs = candidate.difference(config)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands')
commands = commands.split('\n')
result['commands'] = commands
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
def main():
""" main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
match=dict(default='line', choices=['line', 'none']),
config=dict(),
defaults=dict(type='bool', default=False, aliases=['detail']),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
save=dict(type='bool', default=False),
)
argument_spec.update(sros_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
result = dict(changed=False, warnings=list())
warnings = list()
check_args(module, warnings)
if warnings:
result['warnings'] = warnings
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
if module.params['save']:
if not module.check_mode:
run_commands(module, ['admin save'])
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
theheros/kbengine
|
kbe/res/scripts/common/Lib/encodings/mac_arabic.py
|
37
|
37165
|
""" Python Character Mapping Codec generated from 'VENDORS/APPLE/ARABIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-arabic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00a0, # NO-BREAK SPACE, right-left
0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x06ba, # ARABIC LETTER NOON GHUNNA
0x008c: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x2026, # HORIZONTAL ELLIPSIS, right-left
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f7, # DIVISION SIGN, right-left
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x0020, # SPACE, right-left
0x00a1: 0x0021, # EXCLAMATION MARK, right-left
0x00a2: 0x0022, # QUOTATION MARK, right-left
0x00a3: 0x0023, # NUMBER SIGN, right-left
0x00a4: 0x0024, # DOLLAR SIGN, right-left
0x00a5: 0x066a, # ARABIC PERCENT SIGN
0x00a6: 0x0026, # AMPERSAND, right-left
0x00a7: 0x0027, # APOSTROPHE, right-left
0x00a8: 0x0028, # LEFT PARENTHESIS, right-left
0x00a9: 0x0029, # RIGHT PARENTHESIS, right-left
0x00aa: 0x002a, # ASTERISK, right-left
0x00ab: 0x002b, # PLUS SIGN, right-left
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0x002d, # HYPHEN-MINUS, right-left
0x00ae: 0x002e, # FULL STOP, right-left
0x00af: 0x002f, # SOLIDUS, right-left
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE, right-left (need override)
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO, right-left (need override)
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE, right-left (need override)
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX, right-left (need override)
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE, right-left (need override)
0x00ba: 0x003a, # COLON, right-left
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0x003c, # LESS-THAN SIGN, right-left
0x00bd: 0x003d, # EQUALS SIGN, right-left
0x00be: 0x003e, # GREATER-THAN SIGN, right-left
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x274a, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
0x00c1: 0x0621, # ARABIC LETTER HAMZA
0x00c2: 0x0622, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x00c3: 0x0623, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x00c4: 0x0624, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x00c5: 0x0625, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x00c7: 0x0627, # ARABIC LETTER ALEF
0x00c8: 0x0628, # ARABIC LETTER BEH
0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA
0x00ca: 0x062a, # ARABIC LETTER TEH
0x00cb: 0x062b, # ARABIC LETTER THEH
0x00cc: 0x062c, # ARABIC LETTER JEEM
0x00cd: 0x062d, # ARABIC LETTER HAH
0x00ce: 0x062e, # ARABIC LETTER KHAH
0x00cf: 0x062f, # ARABIC LETTER DAL
0x00d0: 0x0630, # ARABIC LETTER THAL
0x00d1: 0x0631, # ARABIC LETTER REH
0x00d2: 0x0632, # ARABIC LETTER ZAIN
0x00d3: 0x0633, # ARABIC LETTER SEEN
0x00d4: 0x0634, # ARABIC LETTER SHEEN
0x00d5: 0x0635, # ARABIC LETTER SAD
0x00d6: 0x0636, # ARABIC LETTER DAD
0x00d7: 0x0637, # ARABIC LETTER TAH
0x00d8: 0x0638, # ARABIC LETTER ZAH
0x00d9: 0x0639, # ARABIC LETTER AIN
0x00da: 0x063a, # ARABIC LETTER GHAIN
0x00db: 0x005b, # LEFT SQUARE BRACKET, right-left
0x00dc: 0x005c, # REVERSE SOLIDUS, right-left
0x00dd: 0x005d, # RIGHT SQUARE BRACKET, right-left
0x00de: 0x005e, # CIRCUMFLEX ACCENT, right-left
0x00df: 0x005f, # LOW LINE, right-left
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0x0641, # ARABIC LETTER FEH
0x00e2: 0x0642, # ARABIC LETTER QAF
0x00e3: 0x0643, # ARABIC LETTER KAF
0x00e4: 0x0644, # ARABIC LETTER LAM
0x00e5: 0x0645, # ARABIC LETTER MEEM
0x00e6: 0x0646, # ARABIC LETTER NOON
0x00e7: 0x0647, # ARABIC LETTER HEH
0x00e8: 0x0648, # ARABIC LETTER WAW
0x00e9: 0x0649, # ARABIC LETTER ALEF MAKSURA
0x00ea: 0x064a, # ARABIC LETTER YEH
0x00eb: 0x064b, # ARABIC FATHATAN
0x00ec: 0x064c, # ARABIC DAMMATAN
0x00ed: 0x064d, # ARABIC KASRATAN
0x00ee: 0x064e, # ARABIC FATHA
0x00ef: 0x064f, # ARABIC DAMMA
0x00f0: 0x0650, # ARABIC KASRA
0x00f1: 0x0651, # ARABIC SHADDA
0x00f2: 0x0652, # ARABIC SUKUN
0x00f3: 0x067e, # ARABIC LETTER PEH
0x00f4: 0x0679, # ARABIC LETTER TTEH
0x00f5: 0x0686, # ARABIC LETTER TCHEH
0x00f6: 0x06d5, # ARABIC LETTER AE
0x00f7: 0x06a4, # ARABIC LETTER VEH
0x00f8: 0x06af, # ARABIC LETTER GAF
0x00f9: 0x0688, # ARABIC LETTER DDAL
0x00fa: 0x0691, # ARABIC LETTER RREH
0x00fb: 0x007b, # LEFT CURLY BRACKET, right-left
0x00fc: 0x007c, # VERTICAL LINE, right-left
0x00fd: 0x007d, # RIGHT CURLY BRACKET, right-left
0x00fe: 0x0698, # ARABIC LETTER JEH
0x00ff: 0x06d2, # ARABIC LETTER YEH BARREE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> CONTROL CHARACTER
'\x01' # 0x0001 -> CONTROL CHARACTER
'\x02' # 0x0002 -> CONTROL CHARACTER
'\x03' # 0x0003 -> CONTROL CHARACTER
'\x04' # 0x0004 -> CONTROL CHARACTER
'\x05' # 0x0005 -> CONTROL CHARACTER
'\x06' # 0x0006 -> CONTROL CHARACTER
'\x07' # 0x0007 -> CONTROL CHARACTER
'\x08' # 0x0008 -> CONTROL CHARACTER
'\t' # 0x0009 -> CONTROL CHARACTER
'\n' # 0x000a -> CONTROL CHARACTER
'\x0b' # 0x000b -> CONTROL CHARACTER
'\x0c' # 0x000c -> CONTROL CHARACTER
'\r' # 0x000d -> CONTROL CHARACTER
'\x0e' # 0x000e -> CONTROL CHARACTER
'\x0f' # 0x000f -> CONTROL CHARACTER
'\x10' # 0x0010 -> CONTROL CHARACTER
'\x11' # 0x0011 -> CONTROL CHARACTER
'\x12' # 0x0012 -> CONTROL CHARACTER
'\x13' # 0x0013 -> CONTROL CHARACTER
'\x14' # 0x0014 -> CONTROL CHARACTER
'\x15' # 0x0015 -> CONTROL CHARACTER
'\x16' # 0x0016 -> CONTROL CHARACTER
'\x17' # 0x0017 -> CONTROL CHARACTER
'\x18' # 0x0018 -> CONTROL CHARACTER
'\x19' # 0x0019 -> CONTROL CHARACTER
'\x1a' # 0x001a -> CONTROL CHARACTER
'\x1b' # 0x001b -> CONTROL CHARACTER
'\x1c' # 0x001c -> CONTROL CHARACTER
'\x1d' # 0x001d -> CONTROL CHARACTER
'\x1e' # 0x001e -> CONTROL CHARACTER
'\x1f' # 0x001f -> CONTROL CHARACTER
' ' # 0x0020 -> SPACE, left-right
'!' # 0x0021 -> EXCLAMATION MARK, left-right
'"' # 0x0022 -> QUOTATION MARK, left-right
'#' # 0x0023 -> NUMBER SIGN, left-right
'$' # 0x0024 -> DOLLAR SIGN, left-right
'%' # 0x0025 -> PERCENT SIGN, left-right
'&' # 0x0026 -> AMPERSAND, left-right
"'" # 0x0027 -> APOSTROPHE, left-right
'(' # 0x0028 -> LEFT PARENTHESIS, left-right
')' # 0x0029 -> RIGHT PARENTHESIS, left-right
'*' # 0x002a -> ASTERISK, left-right
'+' # 0x002b -> PLUS SIGN, left-right
',' # 0x002c -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
'-' # 0x002d -> HYPHEN-MINUS, left-right
'.' # 0x002e -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
'/' # 0x002f -> SOLIDUS, left-right
'0' # 0x0030 -> DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
'2' # 0x0032 -> DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
'3' # 0x0033 -> DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
':' # 0x003a -> COLON, left-right
';' # 0x003b -> SEMICOLON, left-right
'<' # 0x003c -> LESS-THAN SIGN, left-right
'=' # 0x003d -> EQUALS SIGN, left-right
'>' # 0x003e -> GREATER-THAN SIGN, left-right
'?' # 0x003f -> QUESTION MARK, left-right
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET, left-right
'\\' # 0x005c -> REVERSE SOLIDUS, left-right
']' # 0x005d -> RIGHT SQUARE BRACKET, left-right
'^' # 0x005e -> CIRCUMFLEX ACCENT, left-right
'_' # 0x005f -> LOW LINE, left-right
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET, left-right
'|' # 0x007c -> VERTICAL LINE, left-right
'}' # 0x007d -> RIGHT CURLY BRACKET, left-right
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> CONTROL CHARACTER
'\xc4' # 0x0080 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xa0' # 0x0081 -> NO-BREAK SPACE, right-left
'\xc7' # 0x0082 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x0083 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x0084 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x0085 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x0086 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x0087 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x0088 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x0089 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x008a -> LATIN SMALL LETTER A WITH DIAERESIS
'\u06ba' # 0x008b -> ARABIC LETTER NOON GHUNNA
'\xab' # 0x008c -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xe7' # 0x008d -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x008e -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x008f -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x0090 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0091 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x0092 -> LATIN SMALL LETTER I WITH ACUTE
'\u2026' # 0x0093 -> HORIZONTAL ELLIPSIS, right-left
'\xee' # 0x0094 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x0095 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x0096 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x0097 -> LATIN SMALL LETTER O WITH ACUTE
'\xbb' # 0x0098 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xf4' # 0x0099 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x009a -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0x009b -> DIVISION SIGN, right-left
'\xfa' # 0x009c -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x009d -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x009e -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x009f -> LATIN SMALL LETTER U WITH DIAERESIS
' ' # 0x00a0 -> SPACE, right-left
'!' # 0x00a1 -> EXCLAMATION MARK, right-left
'"' # 0x00a2 -> QUOTATION MARK, right-left
'#' # 0x00a3 -> NUMBER SIGN, right-left
'$' # 0x00a4 -> DOLLAR SIGN, right-left
'\u066a' # 0x00a5 -> ARABIC PERCENT SIGN
'&' # 0x00a6 -> AMPERSAND, right-left
"'" # 0x00a7 -> APOSTROPHE, right-left
'(' # 0x00a8 -> LEFT PARENTHESIS, right-left
')' # 0x00a9 -> RIGHT PARENTHESIS, right-left
'*' # 0x00aa -> ASTERISK, right-left
'+' # 0x00ab -> PLUS SIGN, right-left
'\u060c' # 0x00ac -> ARABIC COMMA
'-' # 0x00ad -> HYPHEN-MINUS, right-left
'.' # 0x00ae -> FULL STOP, right-left
'/' # 0x00af -> SOLIDUS, right-left
'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO, right-left (need override)
'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE, right-left (need override)
'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO, right-left (need override)
'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE, right-left (need override)
'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR, right-left (need override)
'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE, right-left (need override)
'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX, right-left (need override)
'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN, right-left (need override)
'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT, right-left (need override)
'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE, right-left (need override)
':' # 0x00ba -> COLON, right-left
'\u061b' # 0x00bb -> ARABIC SEMICOLON
'<' # 0x00bc -> LESS-THAN SIGN, right-left
'=' # 0x00bd -> EQUALS SIGN, right-left
'>' # 0x00be -> GREATER-THAN SIGN, right-left
'\u061f' # 0x00bf -> ARABIC QUESTION MARK
'\u274a' # 0x00c0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
'\u0621' # 0x00c1 -> ARABIC LETTER HAMZA
'\u0622' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0x00c5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0x00c7 -> ARABIC LETTER ALEF
'\u0628' # 0x00c8 -> ARABIC LETTER BEH
'\u0629' # 0x00c9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0x00ca -> ARABIC LETTER TEH
'\u062b' # 0x00cb -> ARABIC LETTER THEH
'\u062c' # 0x00cc -> ARABIC LETTER JEEM
'\u062d' # 0x00cd -> ARABIC LETTER HAH
'\u062e' # 0x00ce -> ARABIC LETTER KHAH
'\u062f' # 0x00cf -> ARABIC LETTER DAL
'\u0630' # 0x00d0 -> ARABIC LETTER THAL
'\u0631' # 0x00d1 -> ARABIC LETTER REH
'\u0632' # 0x00d2 -> ARABIC LETTER ZAIN
'\u0633' # 0x00d3 -> ARABIC LETTER SEEN
'\u0634' # 0x00d4 -> ARABIC LETTER SHEEN
'\u0635' # 0x00d5 -> ARABIC LETTER SAD
'\u0636' # 0x00d6 -> ARABIC LETTER DAD
'\u0637' # 0x00d7 -> ARABIC LETTER TAH
'\u0638' # 0x00d8 -> ARABIC LETTER ZAH
'\u0639' # 0x00d9 -> ARABIC LETTER AIN
'\u063a' # 0x00da -> ARABIC LETTER GHAIN
'[' # 0x00db -> LEFT SQUARE BRACKET, right-left
'\\' # 0x00dc -> REVERSE SOLIDUS, right-left
']' # 0x00dd -> RIGHT SQUARE BRACKET, right-left
'^' # 0x00de -> CIRCUMFLEX ACCENT, right-left
'_' # 0x00df -> LOW LINE, right-left
'\u0640' # 0x00e0 -> ARABIC TATWEEL
'\u0641' # 0x00e1 -> ARABIC LETTER FEH
'\u0642' # 0x00e2 -> ARABIC LETTER QAF
'\u0643' # 0x00e3 -> ARABIC LETTER KAF
'\u0644' # 0x00e4 -> ARABIC LETTER LAM
'\u0645' # 0x00e5 -> ARABIC LETTER MEEM
'\u0646' # 0x00e6 -> ARABIC LETTER NOON
'\u0647' # 0x00e7 -> ARABIC LETTER HEH
'\u0648' # 0x00e8 -> ARABIC LETTER WAW
'\u0649' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0x00ea -> ARABIC LETTER YEH
'\u064b' # 0x00eb -> ARABIC FATHATAN
'\u064c' # 0x00ec -> ARABIC DAMMATAN
'\u064d' # 0x00ed -> ARABIC KASRATAN
'\u064e' # 0x00ee -> ARABIC FATHA
'\u064f' # 0x00ef -> ARABIC DAMMA
'\u0650' # 0x00f0 -> ARABIC KASRA
'\u0651' # 0x00f1 -> ARABIC SHADDA
'\u0652' # 0x00f2 -> ARABIC SUKUN
'\u067e' # 0x00f3 -> ARABIC LETTER PEH
'\u0679' # 0x00f4 -> ARABIC LETTER TTEH
'\u0686' # 0x00f5 -> ARABIC LETTER TCHEH
'\u06d5' # 0x00f6 -> ARABIC LETTER AE
'\u06a4' # 0x00f7 -> ARABIC LETTER VEH
'\u06af' # 0x00f8 -> ARABIC LETTER GAF
'\u0688' # 0x00f9 -> ARABIC LETTER DDAL
'\u0691' # 0x00fa -> ARABIC LETTER RREH
'{' # 0x00fb -> LEFT CURLY BRACKET, right-left
'|' # 0x00fc -> VERTICAL LINE, right-left
'}' # 0x00fd -> RIGHT CURLY BRACKET, right-left
'\u0698' # 0x00fe -> ARABIC LETTER JEH
'\u06d2' # 0x00ff -> ARABIC LETTER YEH BARREE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # CONTROL CHARACTER
0x0001: 0x0001, # CONTROL CHARACTER
0x0002: 0x0002, # CONTROL CHARACTER
0x0003: 0x0003, # CONTROL CHARACTER
0x0004: 0x0004, # CONTROL CHARACTER
0x0005: 0x0005, # CONTROL CHARACTER
0x0006: 0x0006, # CONTROL CHARACTER
0x0007: 0x0007, # CONTROL CHARACTER
0x0008: 0x0008, # CONTROL CHARACTER
0x0009: 0x0009, # CONTROL CHARACTER
0x000a: 0x000a, # CONTROL CHARACTER
0x000b: 0x000b, # CONTROL CHARACTER
0x000c: 0x000c, # CONTROL CHARACTER
0x000d: 0x000d, # CONTROL CHARACTER
0x000e: 0x000e, # CONTROL CHARACTER
0x000f: 0x000f, # CONTROL CHARACTER
0x0010: 0x0010, # CONTROL CHARACTER
0x0011: 0x0011, # CONTROL CHARACTER
0x0012: 0x0012, # CONTROL CHARACTER
0x0013: 0x0013, # CONTROL CHARACTER
0x0014: 0x0014, # CONTROL CHARACTER
0x0015: 0x0015, # CONTROL CHARACTER
0x0016: 0x0016, # CONTROL CHARACTER
0x0017: 0x0017, # CONTROL CHARACTER
0x0018: 0x0018, # CONTROL CHARACTER
0x0019: 0x0019, # CONTROL CHARACTER
0x001a: 0x001a, # CONTROL CHARACTER
0x001b: 0x001b, # CONTROL CHARACTER
0x001c: 0x001c, # CONTROL CHARACTER
0x001d: 0x001d, # CONTROL CHARACTER
0x001e: 0x001e, # CONTROL CHARACTER
0x001f: 0x001f, # CONTROL CHARACTER
0x0020: 0x0020, # SPACE, left-right
0x0020: 0x00a0, # SPACE, right-left
0x0021: 0x0021, # EXCLAMATION MARK, left-right
0x0021: 0x00a1, # EXCLAMATION MARK, right-left
0x0022: 0x0022, # QUOTATION MARK, left-right
0x0022: 0x00a2, # QUOTATION MARK, right-left
0x0023: 0x0023, # NUMBER SIGN, left-right
0x0023: 0x00a3, # NUMBER SIGN, right-left
0x0024: 0x0024, # DOLLAR SIGN, left-right
0x0024: 0x00a4, # DOLLAR SIGN, right-left
0x0025: 0x0025, # PERCENT SIGN, left-right
0x0026: 0x0026, # AMPERSAND, left-right
0x0026: 0x00a6, # AMPERSAND, right-left
0x0027: 0x0027, # APOSTROPHE, left-right
0x0027: 0x00a7, # APOSTROPHE, right-left
0x0028: 0x0028, # LEFT PARENTHESIS, left-right
0x0028: 0x00a8, # LEFT PARENTHESIS, right-left
0x0029: 0x0029, # RIGHT PARENTHESIS, left-right
0x0029: 0x00a9, # RIGHT PARENTHESIS, right-left
0x002a: 0x002a, # ASTERISK, left-right
0x002a: 0x00aa, # ASTERISK, right-left
0x002b: 0x002b, # PLUS SIGN, left-right
0x002b: 0x00ab, # PLUS SIGN, right-left
0x002c: 0x002c, # COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
0x002d: 0x002d, # HYPHEN-MINUS, left-right
0x002d: 0x00ad, # HYPHEN-MINUS, right-left
0x002e: 0x002e, # FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
0x002e: 0x00ae, # FULL STOP, right-left
0x002f: 0x002f, # SOLIDUS, left-right
0x002f: 0x00af, # SOLIDUS, right-left
0x0030: 0x0030, # DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
0x0032: 0x0032, # DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
0x0033: 0x0033, # DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
0x003a: 0x003a, # COLON, left-right
0x003a: 0x00ba, # COLON, right-left
0x003b: 0x003b, # SEMICOLON, left-right
0x003c: 0x003c, # LESS-THAN SIGN, left-right
0x003c: 0x00bc, # LESS-THAN SIGN, right-left
0x003d: 0x003d, # EQUALS SIGN, left-right
0x003d: 0x00bd, # EQUALS SIGN, right-left
0x003e: 0x003e, # GREATER-THAN SIGN, left-right
0x003e: 0x00be, # GREATER-THAN SIGN, right-left
0x003f: 0x003f, # QUESTION MARK, left-right
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET, left-right
0x005b: 0x00db, # LEFT SQUARE BRACKET, right-left
0x005c: 0x005c, # REVERSE SOLIDUS, left-right
0x005c: 0x00dc, # REVERSE SOLIDUS, right-left
0x005d: 0x005d, # RIGHT SQUARE BRACKET, left-right
0x005d: 0x00dd, # RIGHT SQUARE BRACKET, right-left
0x005e: 0x005e, # CIRCUMFLEX ACCENT, left-right
0x005e: 0x00de, # CIRCUMFLEX ACCENT, right-left
0x005f: 0x005f, # LOW LINE, left-right
0x005f: 0x00df, # LOW LINE, right-left
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET, left-right
0x007b: 0x00fb, # LEFT CURLY BRACKET, right-left
0x007c: 0x007c, # VERTICAL LINE, left-right
0x007c: 0x00fc, # VERTICAL LINE, right-left
0x007d: 0x007d, # RIGHT CURLY BRACKET, left-right
0x007d: 0x00fd, # RIGHT CURLY BRACKET, right-left
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # CONTROL CHARACTER
0x00a0: 0x0081, # NO-BREAK SPACE, right-left
0x00ab: 0x008c, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x00bb: 0x0098, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x00c4: 0x0080, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0082, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0083, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x0084, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0085, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x0086, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00e0: 0x0088, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x0087, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0089, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x008a, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x008d, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008f, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x008e, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0090, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0091, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x0092, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x0094, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x0095, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x0096, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x0097, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0099, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x009a, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x009b, # DIVISION SIGN, right-left
0x00f9: 0x009d, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x009c, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x009e, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x009f, # LATIN SMALL LETTER U WITH DIAERESIS
0x060c: 0x00ac, # ARABIC COMMA
0x061b: 0x00bb, # ARABIC SEMICOLON
0x061f: 0x00bf, # ARABIC QUESTION MARK
0x0621: 0x00c1, # ARABIC LETTER HAMZA
0x0622: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x0623: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x0624: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x0625: 0x00c5, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x0626: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x0627: 0x00c7, # ARABIC LETTER ALEF
0x0628: 0x00c8, # ARABIC LETTER BEH
0x0629: 0x00c9, # ARABIC LETTER TEH MARBUTA
0x062a: 0x00ca, # ARABIC LETTER TEH
0x062b: 0x00cb, # ARABIC LETTER THEH
0x062c: 0x00cc, # ARABIC LETTER JEEM
0x062d: 0x00cd, # ARABIC LETTER HAH
0x062e: 0x00ce, # ARABIC LETTER KHAH
0x062f: 0x00cf, # ARABIC LETTER DAL
0x0630: 0x00d0, # ARABIC LETTER THAL
0x0631: 0x00d1, # ARABIC LETTER REH
0x0632: 0x00d2, # ARABIC LETTER ZAIN
0x0633: 0x00d3, # ARABIC LETTER SEEN
0x0634: 0x00d4, # ARABIC LETTER SHEEN
0x0635: 0x00d5, # ARABIC LETTER SAD
0x0636: 0x00d6, # ARABIC LETTER DAD
0x0637: 0x00d7, # ARABIC LETTER TAH
0x0638: 0x00d8, # ARABIC LETTER ZAH
0x0639: 0x00d9, # ARABIC LETTER AIN
0x063a: 0x00da, # ARABIC LETTER GHAIN
0x0640: 0x00e0, # ARABIC TATWEEL
0x0641: 0x00e1, # ARABIC LETTER FEH
0x0642: 0x00e2, # ARABIC LETTER QAF
0x0643: 0x00e3, # ARABIC LETTER KAF
0x0644: 0x00e4, # ARABIC LETTER LAM
0x0645: 0x00e5, # ARABIC LETTER MEEM
0x0646: 0x00e6, # ARABIC LETTER NOON
0x0647: 0x00e7, # ARABIC LETTER HEH
0x0648: 0x00e8, # ARABIC LETTER WAW
0x0649: 0x00e9, # ARABIC LETTER ALEF MAKSURA
0x064a: 0x00ea, # ARABIC LETTER YEH
0x064b: 0x00eb, # ARABIC FATHATAN
0x064c: 0x00ec, # ARABIC DAMMATAN
0x064d: 0x00ed, # ARABIC KASRATAN
0x064e: 0x00ee, # ARABIC FATHA
0x064f: 0x00ef, # ARABIC DAMMA
0x0650: 0x00f0, # ARABIC KASRA
0x0651: 0x00f1, # ARABIC SHADDA
0x0652: 0x00f2, # ARABIC SUKUN
0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE, right-left (need override)
0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO, right-left (need override)
0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE, right-left (need override)
0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX, right-left (need override)
0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE, right-left (need override)
0x066a: 0x00a5, # ARABIC PERCENT SIGN
0x0679: 0x00f4, # ARABIC LETTER TTEH
0x067e: 0x00f3, # ARABIC LETTER PEH
0x0686: 0x00f5, # ARABIC LETTER TCHEH
0x0688: 0x00f9, # ARABIC LETTER DDAL
0x0691: 0x00fa, # ARABIC LETTER RREH
0x0698: 0x00fe, # ARABIC LETTER JEH
0x06a4: 0x00f7, # ARABIC LETTER VEH
0x06af: 0x00f8, # ARABIC LETTER GAF
0x06ba: 0x008b, # ARABIC LETTER NOON GHUNNA
0x06d2: 0x00ff, # ARABIC LETTER YEH BARREE
0x06d5: 0x00f6, # ARABIC LETTER AE
0x2026: 0x0093, # HORIZONTAL ELLIPSIS, right-left
0x274a: 0x00c0, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
}
|
lgpl-3.0
|
gdrouart/MrMoose
|
example_6z.py
|
1
|
6743
|
"""
Example to generate a .fit, .mod and .dat file to feed in MrMoose for
demonstration. The model consists of one single power-laws and two black
bodies, with 15 data points. All is a mixture of unresolved and
blended/spatially identified components, with the black bodies being at
different redshifts (z=2 and z=4).
"""
from models import *
import numpy as np
import mm_utilities as mm
import read_files as rd
# first group of component at same redshift
redshift1 = 2.0
func1a = 'sync_law' # need to make sure function is existing in model.py
norm_sync1 = 7.0 # parameters - normalisation
alpha_sync1 = -2. # parameters - spectral index
func1b = 'BB_law'
norm_bb1 = 1.0 # parameter - normalisation
temp1 = 40 # parameter - temperature [K]
# second group of component at other redshift
redshift2 = 4.0
func2a = 'BB_law'
norm_bb2 = 0.1 # parameter - normalisation
temp2 = 20 # parameter - temperature [K]
# making all in form to build the fake system
# array of the function name
comp_function = np.array([func1a, func1b, func2a])
# array of the redshift of the component
comp_redshift = np.array([redshift1, redshift1, redshift2])
# array of parameter values, organised as sub-arrays respecting function calls
comp_param = np.array([[norm_sync1, alpha_sync1], [norm_bb1, temp1], [norm_bb2, temp2]])
nu = 10**np.linspace(6, 18, 10000) # frequency range
# list of the filters, arrangements and components
filter_name = np.array(['VLA_L', 'VLA_C', 'VLA_C', 'VLA_X', 'VLA_X',
'ATCA_47', 'ALMA_3', 'ALMA_6', 'ALMA_6_nr1',
'laboca_870', 'spire_500', 'spire_350', 'spire_250',
'pacs_160', 'pacs_70'])
data_nature = np.array(['d', 'd', 'd', 'd', 'd',
'd', 'd', 'd', 'd',
'd', 'd', 'd', 'd',
'd', 'd']) # "d" for detections, "u" for upper limit
arrangement = np.array(['1', '1', '1', '1', '1',
'1', '2', '3', '4',
'5', '5', '5', '5',
'5', '5']) # do not forget the "," for the last element!
comp_number = np.array(['0', '0', '0', '0', '0',
'0', '0,1,2', '0,1', '2',
'1,2', '1,2', '1,2', '1,2',
'1,2', '1,2'])
sn_mod = np.array([5., 5., 5., 5., 5.,
5., 5., 5., 5.,
5., 5., 5., 5.,
5., 5.]) # SN detection to estimate noise level for each point
notes = np.array(["'sync'", "'sync'", "'sync'", "'sync'", "'sync'",
"'sync'", "'all'", "'host'", "'comp'",
"'host+comp'", "'host+comp'", "'host+comp'", "'host+comp'",
"'host+comp'", "'host+comp'"]) # notes on observations
RA_list = ['12h00m00s', '12h00m00.1s', '11h59m59.95s', '12h00m00.1s', '11h59m59.95s',
'12h00m00s', '12h00m00s', '12h00m00.1s', '11h59m59.95s',
'12h00m00s', '12h00m00s', '12h00m00s', '12h00m00s',
'12h00m00s', '12h00m00s']
Dec_list = ['-40d00m00s', '-39d59m59s', '-40d00m01s', '-39d59m59s', '-40d00m01s',
'-40d00m00s', '-40d00m00s', '-39d59m59s', '-40d00m00.5s',
'-40d00m00s', '-40d00m00s', '-40d00m00s', '-40d00m00s',
'-40d00m00s', '-40d00m00s']
res_list = [20., 1.0, 1.0, 0.5, 0.5,
10., 3.0, 0.3, 0.3,
15., 35., 25., 17.,
5., 4.]
# create the array to feed in the data file
fnu_mod = np.zeros(filter_name.size)
fnu_err = np.zeros(filter_name.size)
lambda0 = np.zeros(filter_name.size)
# convert the component numbers into integer list to create the combined SED following the provided arrangements
func_index = [map(int, (elem.replace(',', ''))) for elem in comp_number]
# run through the filters to create the simulated data
for i_filter, name_filter in enumerate(filter_name):
# calculate the sum of components for this arrangement
fnu = [globals()[comp_function[j]](nu, comp_param[j], comp_redshift[j]) for j in func_index[i_filter]]
# trick to get rid off the extra dimension
fnu = np.sum(fnu, axis=0)
# read the filter transmission
nu_filter, trans_filter = rd.read_single_filter('filters/'+name_filter+'.fil')
# calculate the lambda0
lambda0[i_filter] = np.average(nu_filter, weights=trans_filter)
# perform the integration
tmp = mm.integrate_filter(nu, fnu, nu_filter, trans_filter)
# add a gaussian noise (depending on the signal to noise defined previously)
fnu_err[i_filter] = tmp/sn_mod[i_filter]
fnu_mod[i_filter] = np.random.normal(tmp, fnu_err[i_filter])
if data_nature[i_filter] == 'u':
fnu_err[i_filter] = fnu_mod[i_filter]
# create the data file
with open('data/fake_source_ex6z.dat', 'wb') as fake:
fake.writelines("# filter RA Dec resolution lambda0 det_type flux "
"flux_error arrangement component component_number \n")
for i in range(filter_name.size-1):
fake.write('{:15} {:15} {:15} {:5.1f} {:10e} {:5} {:10e} {:10e} {:10} {:10} {:10} \n'.format(
filter_name[i], RA_list[i], Dec_list[i], res_list[i],
lambda0[i], data_nature[i], fnu_mod[i], fnu_err[i], arrangement[i], notes[i], comp_number[i]))
fake.write('{:15} {:15} {:15} {:5.1f} {:10e} {:5} {:10e} {:10e} {:10} {:10} {:10}'.format(
filter_name[i+1], RA_list[i+1], Dec_list[i+1], res_list[i+1],
lambda0[i+1], data_nature[i+1], fnu_mod[i+1], fnu_err[i+1], arrangement[i+1], notes[i+1], comp_number[i+1]))
# create the fit file
with open('fake_source_ex6z.fit', 'wb') as fake:
fake.write('source_file: data/fake_source_ex6z.dat \n')
fake.write('model_file: models/fake_source_ex6z.mod \n')
fake.write('all_same_redshift: False \n')
fake.write('redshift: '+"[{:.4f}, {:.4f}, {:.4f}]".format(redshift1, redshift1, redshift2)+'\n')
fake.write('nwalkers: 20 \n')
fake.write('nsteps: 80 \n')
fake.write('nsteps_cut: 78 \n')
fake.write('percentiles: [10., 25., 50., 75., 90.] \n')
fake.write('skip_imaging: False \n')
fake.write('skip_fit: False \n')
fake.write('skip_MCChains: False \n')
fake.write('skip_triangle: False \n')
fake.write('skip_SED: False \n')
fake.write("unit_obs: 'Hz' \n")
fake.write("unit_flux: 'Jy' \n")
# create the model file
with open('models/fake_source_ex6z.mod', 'wb') as fake:
fake.write('sync_law 2 \n')
fake.write('$N_{s1}$ -22 -12 \n')
fake.write('$\\alpha_{s1}$ -3.5 -0.5 \n')
fake.write('BB_law 2 \n')
fake.write('$N_{BB1}$ -28 -18 \n')
fake.write('$T_1$ 10 60 \n')
fake.write('BB_law 2 \n')
fake.write('$N_{BB2}$ -28 -18 \n')
fake.write('$T_2$ 10 40 \n')
|
gpl-3.0
|
sangwonl/audi
|
audi/external/babel/messages/catalog.py
|
32
|
31061
|
# -*- coding: utf-8 -*-
"""
babel.messages.catalog
~~~~~~~~~~~~~~~~~~~~~~
Data structures for message catalogs.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import re
import time
from cgi import parse_header
from datetime import datetime, time as time_
from difflib import get_close_matches
from email import message_from_string
from copy import copy
from babel import __version__ as VERSION
from babel.core import Locale
from babel.dates import format_datetime
from babel.messages.plurals import get_plural
from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone
from babel._compat import string_types, number_types, PY2, cmp
__all__ = ['Message', 'Catalog', 'TranslationError']
PYTHON_FORMAT = re.compile(r'''(?x)
\%
(?:\(([\w]*)\))?
(
[-#0\ +]?(?:\*|[\d]+)?
(?:\.(?:\*|[\d]+))?
[hlL]?
)
([diouxXeEfFgGcrs%])
''')
def _parse_datetime_header(value):
match = re.match(r'^(?P<datetime>.*?)(?P<tzoffset>[+-]\d{4})?$', value)
tt = time.strptime(match.group('datetime'), '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
dt = datetime.fromtimestamp(ts)
# Separate the offset into a sign component, hours, and # minutes
tzoffset = match.group('tzoffset')
if tzoffset is not None:
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = dt.replace(tzinfo=tzoffset)
return dt
class Message(object):
"""Representation of a single message in a catalog."""
def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Create the message object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments for the message
:param user_comments: a sequence of user comments for the message
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
self.id = id #: The message ID
if not string and self.pluralizable:
string = (u'', u'')
self.string = string #: The message translation
self.locations = list(distinct(locations))
self.flags = set(flags)
if id and self.python_format:
self.flags.add('python-format')
else:
self.flags.discard('python-format')
self.auto_comments = list(distinct(auto_comments))
self.user_comments = list(distinct(user_comments))
if isinstance(previous_id, string_types):
self.previous_id = [previous_id]
else:
self.previous_id = list(previous_id)
self.lineno = lineno
self.context = context
def __repr__(self):
return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
list(self.flags))
def __cmp__(self, obj):
"""Compare Messages, taking into account plural ids"""
def values_to_compare():
if isinstance(obj, Message):
plural = self.pluralizable
obj_plural = obj.pluralizable
if plural and obj_plural:
return self.id[0], obj.id[0]
elif plural:
return self.id[0], obj.id
elif obj_plural:
return self.id, obj.id[0]
return self.id, obj.id
this, other = values_to_compare()
return cmp(this, other)
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def clone(self):
return Message(*map(copy, (self.id, self.string, self.locations,
self.flags, self.auto_comments,
self.user_comments, self.previous_id,
self.lineno, self.context)))
def check(self, catalog=None):
"""Run various validation checks on the message. Some validations
are only performed if the catalog is provided. This method returns
a sequence of `TranslationError` objects.
:rtype: ``iterator``
:param catalog: A catalog instance that is passed to the checkers
:see: `Catalog.check` for a way to perform checks for all messages
in a catalog.
"""
from babel.messages.checkers import checkers
errors = []
for checker in checkers:
try:
checker(catalog, self)
except TranslationError as e:
errors.append(e)
return errors
@property
def fuzzy(self):
"""Whether the translation is fuzzy.
>>> Message('foo').fuzzy
False
>>> msg = Message('foo', 'foo', flags=['fuzzy'])
>>> msg.fuzzy
True
>>> msg
<Message 'foo' (flags: ['fuzzy'])>
:type: `bool`"""
return 'fuzzy' in self.flags
@property
def pluralizable(self):
"""Whether the message is plurizable.
>>> Message('foo').pluralizable
False
>>> Message(('foo', 'bar')).pluralizable
True
:type: `bool`"""
return isinstance(self.id, (list, tuple))
@property
def python_format(self):
"""Whether the message contains Python-style parameters.
>>> Message('foo %(name)s bar').python_format
True
>>> Message(('foo %(name)s', 'foo %(name)s')).python_format
True
:type: `bool`"""
ids = self.id
if not isinstance(ids, (list, tuple)):
ids = [ids]
return any(PYTHON_FORMAT.search(id) for id in ids)
class TranslationError(Exception):
"""Exception thrown by translation checkers when invalid message
translations are encountered."""
DEFAULT_HEADER = u"""\
# Translations template for PROJECT.
# Copyright (C) YEAR ORGANIZATION
# This file is distributed under the same license as the PROJECT project.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#"""
if PY2:
def _parse_header(header_string):
# message_from_string only works for str, not for unicode
headers = message_from_string(header_string.encode('utf8'))
decoded_headers = {}
for name, value in headers.items():
name = name.decode('utf8')
value = value.decode('utf8')
decoded_headers[name] = value
return decoded_headers
else:
_parse_header = message_from_string
class Catalog(object):
"""Representation of a message catalog."""
def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
project=None, version=None, copyright_holder=None,
msgid_bugs_address=None, creation_date=None,
revision_date=None, last_translator=None, language_team=None,
charset=None, fuzzy=True):
"""Initialize the catalog object.
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param header_comment: the header comment as string, or `None` for the
default header
:param project: the project's name
:param version: the project's version
:param copyright_holder: the copyright holder of the catalog
:param msgid_bugs_address: the email address or URL to submit bug
reports to
:param creation_date: the date the catalog was created
:param revision_date: the date the catalog was revised
:param last_translator: the name and email of the last translator
:param language_team: the name and email of the language team
:param charset: the encoding to use in the output (defaults to utf-8)
:param fuzzy: the fuzzy bit on the catalog header
"""
self.domain = domain #: The message domain
if locale:
locale = Locale.parse(locale)
self.locale = locale #: The locale or `None`
self._header_comment = header_comment
self._messages = odict()
self.project = project or 'PROJECT' #: The project name
self.version = version or 'VERSION' #: The project version
self.copyright_holder = copyright_holder or 'ORGANIZATION'
self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
"""Name and email address of the last translator."""
self.language_team = language_team or 'LANGUAGE <LL@li.org>'
"""Name and email address of the language team."""
self.charset = charset or 'utf-8'
if creation_date is None:
creation_date = datetime.now(LOCALTZ)
elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
creation_date = creation_date.replace(tzinfo=LOCALTZ)
self.creation_date = creation_date #: Creation date of the template
if revision_date is None:
revision_date = 'YEAR-MO-DA HO:MI+ZONE'
elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
revision_date = revision_date.replace(tzinfo=LOCALTZ)
self.revision_date = revision_date #: Last revision date of the catalog
self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`)
self.obsolete = odict() #: Dictionary of obsolete messages
self._num_plurals = None
self._plural_expr = None
def _get_header_comment(self):
comment = self._header_comment
year = datetime.now(LOCALTZ).strftime('%Y')
if hasattr(self.revision_date, 'strftime'):
year = self.revision_date.strftime('%Y')
comment = comment.replace('PROJECT', self.project) \
.replace('VERSION', self.version) \
.replace('YEAR', year) \
.replace('ORGANIZATION', self.copyright_holder)
if self.locale:
comment = comment.replace('Translations template', '%s translations'
% self.locale.english_name)
return comment
def _set_header_comment(self, string):
self._header_comment = string
header_comment = property(_get_header_comment, _set_header_comment, doc="""\
The header comment for the catalog.
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> print catalog.header_comment #doctest: +ELLIPSIS
# Translations template for Foobar.
# Copyright (C) ... Foo Company
# This file is distributed under the same license as the Foobar project.
# FIRST AUTHOR <EMAIL@ADDRESS>, ....
#
The header can also be set from a string. Any known upper-case variables
will be replaced when the header is retrieved again:
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> catalog.header_comment = '''\\
... # The POT for my really cool PROJECT project.
... # Copyright (C) 1990-2003 ORGANIZATION
... # This file is distributed under the same license as the PROJECT
... # project.
... #'''
>>> print catalog.header_comment
# The POT for my really cool Foobar project.
# Copyright (C) 1990-2003 Foo Company
# This file is distributed under the same license as the Foobar
# project.
#
:type: `unicode`
""")
def _get_mime_headers(self):
headers = []
headers.append(('Project-Id-Version',
'%s %s' % (self.project, self.version)))
headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
headers.append(('POT-Creation-Date',
format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
locale='en')))
if isinstance(self.revision_date, (datetime, time_) + number_types):
headers.append(('PO-Revision-Date',
format_datetime(self.revision_date,
'yyyy-MM-dd HH:mmZ', locale='en')))
else:
headers.append(('PO-Revision-Date', self.revision_date))
headers.append(('Last-Translator', self.last_translator))
if self.locale is not None:
headers.append(('Language', str(self.locale)))
if (self.locale is not None) and ('LANGUAGE' in self.language_team):
headers.append(('Language-Team',
self.language_team.replace('LANGUAGE',
str(self.locale))))
else:
headers.append(('Language-Team', self.language_team))
if self.locale is not None:
headers.append(('Plural-Forms', self.plural_forms))
headers.append(('MIME-Version', '1.0'))
headers.append(('Content-Type',
'text/plain; charset=%s' % self.charset))
headers.append(('Content-Transfer-Encoding', '8bit'))
headers.append(('Generated-By', 'Babel %s\n' % VERSION))
return headers
def _set_mime_headers(self, headers):
for name, value in headers:
name = name.lower()
if name == 'project-id-version':
parts = value.split(' ')
self.project = u' '.join(parts[:-1])
self.version = parts[-1]
elif name == 'report-msgid-bugs-to':
self.msgid_bugs_address = value
elif name == 'last-translator':
self.last_translator = value
elif name == 'language-team':
self.language_team = value
elif name == 'content-type':
mimetype, params = parse_header(value)
if 'charset' in params:
self.charset = params['charset'].lower()
elif name == 'plural-forms':
_, params = parse_header(' ;' + value)
self._num_plurals = int(params.get('nplurals', 2))
self._plural_expr = params.get('plural', '(n != 1)')
elif name == 'pot-creation-date':
self.creation_date = _parse_datetime_header(value)
elif name == 'po-revision-date':
# Keep the value if it's not the default one
if 'YEAR' not in value:
self.revision_date = _parse_datetime_header(value)
mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
The MIME headers of the catalog, used for the special ``msgid ""`` entry.
The behavior of this property changes slightly depending on whether a locale
is set or not, the latter indicating that the catalog is actually a template
for actual translations.
Here's an example of the output for such a catalog template:
>>> from babel.dates import UTC
>>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
>>> catalog = Catalog(project='Foobar', version='1.0',
... creation_date=created)
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
Last-Translator: FULL NAME <EMAIL@ADDRESS>
Language-Team: LANGUAGE <LL@li.org>
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
And here's an example of the output when the locale is set:
>>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
>>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
... creation_date=created, revision_date=revised,
... last_translator='John Doe <jd@example.com>',
... language_team='de_DE <de@example.com>')
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: 1990-08-03 12:00+0000
Last-Translator: John Doe <jd@example.com>
Language: de_DE
Language-Team: de_DE <de@example.com>
Plural-Forms: nplurals=2; plural=(n != 1)
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
:type: `list`
""")
@property
def num_plurals(self):
"""The number of plurals used by the catalog or locale.
>>> Catalog(locale='en').num_plurals
2
>>> Catalog(locale='ga').num_plurals
3
:type: `int`"""
if self._num_plurals is None:
num = 2
if self.locale:
num = get_plural(self.locale)[0]
self._num_plurals = num
return self._num_plurals
@property
def plural_expr(self):
"""The plural expression used by the catalog or locale.
>>> Catalog(locale='en').plural_expr
'(n != 1)'
>>> Catalog(locale='ga').plural_expr
'(n==1 ? 0 : n==2 ? 1 : 2)'
:type: `string_types`"""
if self._plural_expr is None:
expr = '(n != 1)'
if self.locale:
expr = get_plural(self.locale)[1]
self._plural_expr = expr
return self._plural_expr
@property
def plural_forms(self):
"""Return the plural forms declaration for the locale.
>>> Catalog(locale='en').plural_forms
'nplurals=2; plural=(n != 1)'
>>> Catalog(locale='pt_BR').plural_forms
'nplurals=2; plural=(n > 1)'
:type: `str`"""
return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
def __contains__(self, id):
"""Return whether the catalog has a message with the specified ID."""
return self._key_for(id) in self._messages
def __len__(self):
"""The number of messages in the catalog.
This does not include the special ``msgid ""`` entry."""
return len(self._messages)
def __iter__(self):
"""Iterates through all the entries in the catalog, in the order they
were added, yielding a `Message` object for every entry.
:rtype: ``iterator``"""
buf = []
for name, value in self.mime_headers:
buf.append('%s: %s' % (name, value))
flags = set()
if self.fuzzy:
flags |= set(['fuzzy'])
yield Message(u'', '\n'.join(buf), flags=flags)
for key in self._messages:
yield self._messages[key]
def __repr__(self):
locale = ''
if self.locale:
locale = ' %s' % self.locale
return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
def __delitem__(self, id):
"""Delete the message with the specified ID."""
self.delete(id)
def __getitem__(self, id):
"""Return the message with the specified ID.
:param id: the message ID
"""
return self.get(id)
def __setitem__(self, id, message):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo')
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
If a message with that ID is already in the catalog, it is updated
to include the locations and flags of the new message.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
>>> catalog[u'foo'].locations
[('main.py', 1)]
>>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
>>> catalog[u'foo'].locations
[('main.py', 1), ('utils.py', 5)]
:param id: the message ID
:param message: the `Message` object
"""
assert isinstance(message, Message), 'expected a Message object'
key = self._key_for(id, message.context)
current = self._messages.get(key)
if current:
if message.pluralizable and not current.pluralizable:
# The new message adds pluralization
current.id = message.id
current.string = message.string
current.locations = list(distinct(current.locations +
message.locations))
current.auto_comments = list(distinct(current.auto_comments +
message.auto_comments))
current.user_comments = list(distinct(current.user_comments +
message.user_comments))
current.flags |= message.flags
message = current
elif id == '':
# special treatment for the header message
self.mime_headers = _parse_header(message.string).items()
self.header_comment = '\n'.join([('# %s' % c).rstrip() for c
in message.user_comments])
self.fuzzy = message.fuzzy
else:
if isinstance(id, (list, tuple)):
assert isinstance(message.string, (list, tuple)), \
'Expected sequence but got %s' % type(message.string)
self._messages[key] = message
def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog.add(u'foo')
<Message ...>
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
This method simply constructs a `Message` object with the given
arguments and invokes `__setitem__` with that object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments
:param user_comments: a sequence of user comments
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
message = Message(id, string, list(locations), flags, auto_comments,
user_comments, previous_id, lineno=lineno,
context=context)
self[id] = message
return message
def check(self):
"""Run various validation checks on the translations in the catalog.
For every message which fails validation, this method yield a
``(message, errors)`` tuple, where ``message`` is the `Message` object
and ``errors`` is a sequence of `TranslationError` objects.
:rtype: ``iterator``
"""
for message in self._messages.values():
errors = message.check(catalog=self)
if errors:
yield message, errors
def get(self, id, context=None):
"""Return the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
return self._messages.get(self._key_for(id, context))
def delete(self, id, context=None):
"""Delete the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
key = self._key_for(id, context)
if key in self._messages:
del self._messages[key]
def update(self, template, no_fuzzy_matching=False):
"""Update the catalog based on the given template catalog.
>>> from babel.messages import Catalog
>>> template = Catalog()
>>> template.add('green', locations=[('main.py', 99)])
<Message ...>
>>> template.add('blue', locations=[('main.py', 100)])
<Message ...>
>>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
<Message ...>
>>> catalog = Catalog(locale='de_DE')
>>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
<Message ...>
>>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
<Message ...>
>>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
... locations=[('util.py', 38)])
<Message ...>
>>> catalog.update(template)
>>> len(catalog)
3
>>> msg1 = catalog['green']
>>> msg1.string
>>> msg1.locations
[('main.py', 99)]
>>> msg2 = catalog['blue']
>>> msg2.string
u'blau'
>>> msg2.locations
[('main.py', 100)]
>>> msg3 = catalog['salad']
>>> msg3.string
(u'Salat', u'Salate')
>>> msg3.locations
[('util.py', 42)]
Messages that are in the catalog but not in the template are removed
from the main collection, but can still be accessed via the `obsolete`
member:
>>> 'head' in catalog
False
>>> catalog.obsolete.values()
[<Message 'head' (flags: [])>]
:param template: the reference catalog, usually read from a POT file
:param no_fuzzy_matching: whether to use fuzzy matching of message IDs
"""
messages = self._messages
remaining = messages.copy()
self._messages = odict()
# Prepare for fuzzy matching
fuzzy_candidates = []
if not no_fuzzy_matching:
fuzzy_candidates = dict([
(self._key_for(msgid), messages[msgid].context)
for msgid in messages if msgid and messages[msgid].string
])
fuzzy_matches = set()
def _merge(message, oldkey, newkey):
message = message.clone()
fuzzy = False
if oldkey != newkey:
fuzzy = True
fuzzy_matches.add(oldkey)
oldmsg = messages.get(oldkey)
if isinstance(oldmsg.id, string_types):
message.previous_id = [oldmsg.id]
else:
message.previous_id = list(oldmsg.id)
else:
oldmsg = remaining.pop(oldkey, None)
message.string = oldmsg.string
if isinstance(message.id, (list, tuple)):
if not isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = tuple(
[message.string] + ([u''] * (len(message.id) - 1))
)
elif len(message.string) != self.num_plurals:
fuzzy = True
message.string = tuple(message.string[:len(oldmsg.string)])
elif isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = message.string[0]
message.flags |= oldmsg.flags
if fuzzy:
message.flags |= set([u'fuzzy'])
self[message.id] = message
for message in template:
if message.id:
key = self._key_for(message.id, message.context)
if key in messages:
_merge(message, key, key)
else:
if no_fuzzy_matching is False:
# do some fuzzy matching with difflib
if isinstance(key, tuple):
matchkey = key[0] # just the msgid, no context
else:
matchkey = key
matches = get_close_matches(matchkey.lower().strip(),
fuzzy_candidates.keys(), 1)
if matches:
newkey = matches[0]
newctxt = fuzzy_candidates[newkey]
if newctxt is not None:
newkey = newkey, newctxt
_merge(message, newkey, key)
continue
self[message.id] = message
for msgid in remaining:
if no_fuzzy_matching or msgid not in fuzzy_matches:
self.obsolete[msgid] = remaining[msgid]
# Make updated catalog's POT-Creation-Date equal to the template
# used to update the catalog
self.creation_date = template.creation_date
def _key_for(self, id, context=None):
"""The key for a message is just the singular ID even for pluralizable
messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
messages.
"""
key = id
if isinstance(key, (list, tuple)):
key = id[0]
if context is not None:
key = (key, context)
return key
|
mit
|
kou/zulip
|
zerver/webhooks/opsgenie/view.py
|
2
|
2877
|
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@webhook_view('OpsGenie')
@has_request_variables
def api_opsgenie_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
# construct the body of the message
info = {
"additional_info": '',
"alert_type": payload['action'],
"alert_id": payload['alert']['alertId'],
"integration_name": payload['integrationName'],
"tags": ', '.join('`' + tag + '`' for tag in payload['alert'].get('tags', [])),
}
topic = info['integration_name']
bullet_template = "* **{key}**: {value}\n"
if 'note' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Note',
value=payload['alert']['note'],
)
if 'recipient' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Recipient',
value=payload['alert']['recipient'],
)
if 'addedTags' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Tags added',
value=payload['alert']['addedTags'],
)
if 'team' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Team added',
value=payload['alert']['team'],
)
if 'owner' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Assigned owner',
value=payload['alert']['owner'],
)
if 'escalationName' in payload:
info['additional_info'] += bullet_template.format(
key='Escalation',
value=payload['escalationName'],
)
if 'removedTags' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Tags removed',
value=payload['alert']['removedTags'],
)
if 'message' in payload['alert']:
info['additional_info'] += bullet_template.format(
key='Message',
value=payload['alert']['message'],
)
if info['tags']:
info['additional_info'] += bullet_template.format(
key='Tags',
value=info['tags'],
)
body_template = """
[OpsGenie alert for {integration_name}](https://app.opsgenie.com/alert/V2#/show/{alert_id}):
* **Type**: {alert_type}
{additional_info}
""".strip()
body = body_template.format(**info)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
|
apache-2.0
|
Southpaw-TACTIC/Team
|
src/python/Tools/Scripts/xxci.py
|
12
|
2914
|
#! /usr/bin/env python
# xxci
#
# check in files for which rcsdiff returns nonzero exit status
import sys
import os
from stat import *
import fnmatch
EXECMAGIC = '\001\140\000\010'
MAXSIZE = 200*1024 # Files this big must be binaries and are skipped.
def getargs():
args = sys.argv[1:]
if args:
return args
print 'No arguments, checking almost *, in "ls -t" order'
list = []
for file in os.listdir(os.curdir):
if not skipfile(file):
list.append((getmtime(file), file))
list.sort()
if not list:
print 'Nothing to do -- exit 1'
sys.exit(1)
list.sort()
list.reverse()
for mtime, file in list: args.append(file)
return args
def getmtime(file):
try:
st = os.stat(file)
return st[ST_MTIME]
except os.error:
return -1
badnames = ['tags', 'TAGS', 'xyzzy', 'nohup.out', 'core']
badprefixes = ['.', ',', '@', '#', 'o.']
badsuffixes = \
['~', '.a', '.o', '.old', '.bak', '.orig', '.new', '.prev', '.not', \
'.pyc', '.fdc', '.rgb', '.elc', ',v']
ignore = []
def setup():
ignore[:] = badnames
for p in badprefixes:
ignore.append(p + '*')
for p in badsuffixes:
ignore.append('*' + p)
try:
f = open('.xxcign', 'r')
except IOError:
return
ignore[:] = ignore + f.read().split()
def skipfile(file):
for p in ignore:
if fnmatch.fnmatch(file, p): return 1
try:
st = os.lstat(file)
except os.error:
return 1 # Doesn't exist -- skip it
# Skip non-plain files.
if not S_ISREG(st[ST_MODE]): return 1
# Skip huge files -- probably binaries.
if st[ST_SIZE] >= MAXSIZE: return 1
# Skip executables
try:
data = open(file, 'r').read(len(EXECMAGIC))
if data == EXECMAGIC: return 1
except:
pass
return 0
def badprefix(file):
for bad in badprefixes:
if file[:len(bad)] == bad: return 1
return 0
def badsuffix(file):
for bad in badsuffixes:
if file[-len(bad):] == bad: return 1
return 0
def go(args):
for file in args:
print file + ':'
if differing(file):
showdiffs(file)
if askyesno('Check in ' + file + ' ? '):
sts = os.system('rcs -l ' + file) # ignored
sts = os.system('ci -l ' + file)
def differing(file):
cmd = 'co -p ' + file + ' 2>/dev/null | cmp -s - ' + file
sts = os.system(cmd)
return sts != 0
def showdiffs(file):
cmd = 'rcsdiff ' + file + ' 2>&1 | ${PAGER-more}'
sts = os.system(cmd)
def askyesno(prompt):
s = raw_input(prompt)
return s in ['y', 'yes']
if __name__ == '__main__':
try:
setup()
go(getargs())
except KeyboardInterrupt:
print '[Intr]'
|
epl-1.0
|
Turbo87/unp
|
unp.py
|
1
|
14021
|
import os
import re
import sys
import tempfile
import mimetypes
import subprocess
import click
FILENAME = object()
OUTPUT_FOLDER = object()
unpackers = []
def register_unpacker(cls):
unpackers.append(cls)
return cls
def fnmatch(pattern, filename):
filename = os.path.basename(os.path.normcase(filename))
pattern = os.path.normcase(pattern)
bits = '(%s)' % re.escape(pattern).replace('\\*', ')(.*?)(')
return re.match('^%s$' % bits, filename)
def which(name):
path = os.environ.get('PATH')
if path:
for p in path.split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, os.X_OK):
return p
def increment_string(string):
m = re.match(r'(.*?)(\d+)$', string)
if m is None:
return string + '-2'
return m.group(1) + str(int(m.group(2)) + 1)
def get_mimetype(filename):
file_executable = which('file')
if file_executable is not None:
rv = subprocess.Popen(['file', '-b', '--mime-type', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0].strip()
if rv:
return rv
return mimetypes.guess_type(filename)[0]
def line_parser(format):
pass
class StreamProcessor(object):
def __init__(self, format, stream):
self.regex = re.compile(format)
self.stream = stream
def process(self, p):
stream = getattr(p, self.stream)
while 1:
line = stream.readline()
if not line:
break
match = self.regex.search(line)
if match is not None:
yield match.group(1)
class UnpackerBase(object):
name = None
executable = None
filename_patterns = ()
mimetypes = ()
brew_package = None
args = ()
cwd = OUTPUT_FOLDER
def __init__(self, filename, silent=False):
self.filename = filename
self.silent = silent
self.assert_available()
@classmethod
def filename_matches(cls, filename):
for pattern in cls.filename_patterns:
if fnmatch(pattern, filename) is not None:
return True
@classmethod
def mimetype_matches(cls, filename):
mt = get_mimetype(filename)
return mt in cls.mimetypes
@classmethod
def find_executable(cls):
return which(cls.executable)
@property
def basename(self):
for pattern in self.filename_patterns:
match = fnmatch(pattern, self.filename)
if match is None:
continue
pieces = match.groups()
if pieces and pieces[-1].startswith('.'):
return ''.join(pieces[:-1])
return os.path.basename(self.filename).split('.', 1)[0]
def assert_available(self):
if self.find_executable() is not None:
return
msgs = ['Cannot unpack "%s" because %s is not available.' % (
click.format_filename(self.filename),
self.executable,
)]
if sys.platform == 'darwin' and self.brew_package is not None:
msgs.extend((
'You can install the unpacker threw brew:',
'',
' $ brew install %s' % self.brew_package,
))
raise click.UsageError('\n'.join(msgs))
def get_args_and_cwd(self, dst):
def convert_arg(arg):
if arg is FILENAME:
return self.filename
if arg is OUTPUT_FOLDER:
return dst
return arg
args = [self.find_executable()]
for arg in self.args:
args.append(convert_arg(arg))
cwd = convert_arg(self.cwd)
if cwd is None:
cwd = '.'
return args, cwd
def report_file(self, filename):
if not self.silent:
click.echo(click.format_filename(filename), err=True)
def real_unpack(self, dst, silent):
raise NotImplementedError()
def finish_unpacking(self, tmp_dir, dst):
# Calculate the fallback destination
basename = self.basename
fallback_dst = os.path.join(os.path.abspath(dst), basename)
while os.path.isdir(fallback_dst):
fallback_dst = increment_string(fallback_dst)
# Find how many unpacked files there are. If there is more than
# one, then we have to go to the fallback destination. Same goes
# if the intended destination already exists.
contents = os.listdir(tmp_dir)
if len(contents) == 1:
the_one_file = contents[0]
intended_dst = os.path.join(dst, the_one_file)
else:
intended_dst = None
if intended_dst is None or os.path.exists(intended_dst):
os.rename(tmp_dir, fallback_dst)
return fallback_dst
# Otherwise rename the first thing to the intended destination
# and remove the temporary directory.
os.rename(os.path.join(tmp_dir, the_one_file), intended_dst)
os.rmdir(tmp_dir)
return intended_dst
def cleanup(self, dst):
try:
os.remove(dst)
except Exception:
pass
try:
import shutil
shutil.rmdir(dst)
except Exception:
pass
def unpack(self, dst):
if not self.silent:
click.secho('Unpacking "%s" with %s' % (
self.filename,
self.executable,
), fg='yellow')
dst = os.path.abspath(dst)
try:
os.makedirs(dst)
except OSError:
pass
tmp_dir = tempfile.mkdtemp(prefix='.' + self.basename, dir=dst)
try:
if self.real_unpack(tmp_dir) != 0:
raise click.UsageError('Unpacking through %s failed.'
% self.executable)
final = self.finish_unpacking(tmp_dir, dst)
if not self.silent:
click.secho('Extracted to %s' % final, fg='green')
finally:
self.cleanup(tmp_dir)
def dump_command(self, dst):
args, cwd = self.get_args_and_cwd(dst)
for idx, arg in enumerate(args):
if arg.split() != [arg]:
args[idx] = '"%s"' % \
arg.replace('\\', '\\\\').replace('"', '\\"')
click.echo(' '.join(args))
def __repr__(self):
return '<Unpacker %r>' % (
self.name,
)
class Unpacker(UnpackerBase):
stream_processor = None
def real_unpack(self, dst):
args, cwd = self.get_args_and_cwd(dst)
extra = {}
extra[self.stream_processor.stream] = subprocess.PIPE
c = subprocess.Popen(args, cwd=cwd, **extra)
for filename in self.stream_processor.process(c):
self.report_file(filename)
return c.wait()
class SingleInplaceUnpacker(UnpackerBase):
def real_unpack(self, dst):
args, cwd = self.get_args_and_cwd(dst)
filename = os.path.join(dst, self.basename)
with open(filename, 'wb') as f:
rv = subprocess.Popen(args, cwd=cwd, stdout=f).wait()
self.report_file(filename)
return rv
tar_stream_processor = StreamProcessor(
format=r'^x (.*?)$',
stream='stderr',
)
@register_unpacker
class TarUnpacker(Unpacker):
name = 'Uncompressed Tarballs'
filename_patterns = ['*.tar']
executable = 'tar'
args = ['xvf', FILENAME]
stream_processor = tar_stream_processor
@register_unpacker
class TarGzUnpacker(Unpacker):
name = 'Gzip Compressed Tarballs'
filename_patterns = ['*.tar.gz', '*.tgz']
executable = 'tar'
args = ['xvzf', FILENAME]
stream_processor = tar_stream_processor
@register_unpacker
class TarBz2Unpacker(Unpacker):
name = 'Bz2 Compressed Tarballs'
filename_patterns = ['*.tar.bz2']
executable = 'tar'
args = ['xvjf', FILENAME]
stream_processor = tar_stream_processor
@register_unpacker
class GzipUnpacker(SingleInplaceUnpacker):
name = 'Gzip Compressed Files'
filename_patterns = ['*.gz']
executable = 'gunzip'
args = ['-c', FILENAME]
mimetypes = ['application/x-gzip']
@register_unpacker
class Bz2Unpacker(SingleInplaceUnpacker):
name = 'Bz2 Compressed Files'
filename_patterns = ['*.bz2']
executable = 'bunzip2'
args = ['-c', FILENAME]
mimetypes = ['application/x-bzip2']
@register_unpacker
class ZipUnpacker(Unpacker):
name = 'Zip Archives'
filename_patterns = ['*.zip', '*.egg', '*.whl', '*.jar']
executable = 'unzip'
args = [FILENAME]
mimetypes = ['application/zip']
stream_processor = StreamProcessor(
format=r'^ inflating: (.*?)$',
stream='stdout',
)
@register_unpacker
class RarUnpacker(Unpacker):
name = 'WinRAR Archives'
filename_patterns = ['*.rar']
executable = 'unrar'
args = ['-idp', '-y', 'x', FILENAME]
mimetypes = ['application/zip']
brew_package = 'unrar'
stream_processor = StreamProcessor(
format=r'^Extracting (.*?)\s+OK\s*$',
stream='stdout',
)
@register_unpacker
class P7ZipUnpacker(Unpacker):
name = '7zip Archives'
filename_patterns = ['*.7z']
executable = '7z'
args = ['-bd', 'x', FILENAME]
mimetypes = ['application/zip']
brew_package = 'p7zip'
stream_processor = StreamProcessor(
format=r'^Extracting (.*?)$',
stream='stdout',
)
class DMGUnpacker(UnpackerBase):
name = 'Apple Disk Image'
filename_patterns = ['*.dmg', '*.sparseimage']
executable = 'hdiutil'
args = ['attach', '-nobrowse', FILENAME]
def real_unpack(self, dst):
mp = dst + '---mp'
args, cwd = self.get_args_and_cwd(dst)
args.append('-mountpoint')
args.append(mp)
with open('/dev/null', 'wb') as devnull:
rv = subprocess.Popen(args, cwd=cwd,
stdout=devnull,
stderr=devnull).wait()
if rv != 0:
return rv
p = subprocess.Popen(['cp', '-vpR', mp + '/', dst],
stdout=subprocess.PIPE)
while 1:
line = p.stdout.readline()
if not line:
break
line = line.rstrip('\r\n').split(' -> ', 1)[1]
if line.startswith(dst + '/'):
line = line[len(dst) + 1:].strip()
if line:
self.report_file(line)
return p.wait()
def cleanup(self, dst):
with open('/dev/null', 'wb') as devnull:
subprocess.Popen(['umount', dst + '---mp'],
stderr=devnull, stdout=devnull).wait()
UnpackerBase.cleanup(self, dst)
if sys.platform == 'darwin':
register_unpacker(DMGUnpacker)
def get_unpacker_class(filename):
uifn = click.format_filename(filename)
if not os.path.isfile(filename):
raise click.UsageError('Could not find file "%s".' % uifn)
for unpacker_cls in unpackers:
if unpacker_cls.filename_matches(filename):
return unpacker_cls
for unpacker_cls in unpackers:
if unpacker_cls.mimetype_matches(filename):
return unpacker_cls
raise click.UsageError('Could not determine unpacker for "%s".' % uifn)
def list_unpackers(ctx, param, value):
if not value:
return
executables = {}
for unpacker in unpackers:
exe = unpacker.find_executable()
if exe is None:
exe = unpacker.executable + '*'
executables.setdefault(exe, []).append(unpacker)
executables = sorted(executables.items(),
key=lambda x: os.path.basename(x[0]).lower())
for idx, (executable, unps) in enumerate(executables):
if idx:
click.echo()
click.echo(executable)
for unpacker in unps:
click.echo(' - %s (%s)' % (unpacker.name,
'; '.join(unpacker.filename_patterns)))
ctx.exit()
@click.command()
@click.argument('files', nargs=-1, type=click.Path(), required=True)
@click.option('-q', '--silent', is_flag=True,
help='If this is enabled, nothing will be printed.')
@click.option('-o', '--output', type=click.Path(),
help='Defines the output folder. '
'Defaults to the working directory.')
@click.option('--dump-command', is_flag=True,
help='Instead of executing the unpacker it prints out the '
'command that would be executed. This is useful for '
'debugging broken archives usually. Note that this command '
'when executed directly might spam your current working '
'directory!')
@click.option('--list-unpackers', is_flag=True, expose_value=False,
callback=list_unpackers,
help='Lists all supported unpackers.')
@click.version_option()
def cli(files, silent, output, dump_command):
"""unp is a super simple command line application that can unpack a lot
of different archives. No matter if you unpack a zip or tarball, the
syntax for doing it is the same. Unp will also automatically ensure
that the unpacking goes into a single folder in case the archive does not
contain a wrapper directory. This guarantees that you never accidentally
spam files into your current working directory.
Behind the scenes unp will shell out to the most appropriate application
based on filename or guessed mimetype.
"""
if output is None:
output = '.'
unpackers = []
for filename in files:
filename = os.path.realpath(filename)
unpacker_cls = get_unpacker_class(filename)
unpackers.append(unpacker_cls(filename, silent=silent))
for unpacker in unpackers:
if dump_command:
unpacker.dump_command(output)
else:
unpacker.unpack(output)
|
bsd-3-clause
|
FireWRT/OpenWrt-Firefly-Libraries
|
staging_dir/host/lib/python3.4/test/test_mmap.py
|
68
|
27117
|
from test.support import (TESTFN, run_unittest, import_module, unlink,
requires, _2G, _4G, gc_collect, cpython_only)
import unittest
import os
import re
import itertools
import socket
import sys
import weakref
# Skip test if we can't import mmap.
mmap = import_module('mmap')
PAGESIZE = mmap.PAGESIZE
class MmapTests(unittest.TestCase):
def setUp(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def tearDown(self):
try:
os.unlink(TESTFN)
except OSError:
pass
def test_basic(self):
# Test mmap module on Unix systems and Windows
# Create a file to be mmap'ed.
f = open(TESTFN, 'bw+')
try:
# Write 2 pages worth of data to the file
f.write(b'\0'* PAGESIZE)
f.write(b'foo')
f.write(b'\0'* (PAGESIZE-3) )
f.flush()
m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
finally:
f.close()
# Simple sanity checks
tp = str(type(m)) # SF bug 128713: segfaulted on Linux
self.assertEqual(m.find(b'foo'), PAGESIZE)
self.assertEqual(len(m), 2*PAGESIZE)
self.assertEqual(m[0], 0)
self.assertEqual(m[0:3], b'\0\0\0')
# Shouldn't crash on boundary (Issue #5292)
self.assertRaises(IndexError, m.__getitem__, len(m))
self.assertRaises(IndexError, m.__setitem__, len(m), b'\0')
# Modify the file's content
m[0] = b'3'[0]
m[PAGESIZE +3: PAGESIZE +3+3] = b'bar'
# Check that the modification worked
self.assertEqual(m[0], b'3'[0])
self.assertEqual(m[0:3], b'3\0\0')
self.assertEqual(m[PAGESIZE-1 : PAGESIZE + 7], b'\0foobar\0')
m.flush()
# Test doing a regular expression match in an mmap'ed file
match = re.search(b'[A-Za-z]+', m)
if match is None:
self.fail('regex match on mmap failed!')
else:
start, end = match.span(0)
length = end - start
self.assertEqual(start, PAGESIZE)
self.assertEqual(end, PAGESIZE + 6)
# test seeking around (try to overflow the seek implementation)
m.seek(0,0)
self.assertEqual(m.tell(), 0)
m.seek(42,1)
self.assertEqual(m.tell(), 42)
m.seek(0,2)
self.assertEqual(m.tell(), len(m))
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -1)
# Try to seek beyond end of mmap...
self.assertRaises(ValueError, m.seek, 1, 2)
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -len(m)-1, 2)
# Try resizing map
try:
m.resize(512)
except SystemError:
# resize() not supported
# No messages are printed, since the output of this test suite
# would then be different across platforms.
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the underlying file is truncated too
# (bug #728515)
f = open(TESTFN, 'rb')
try:
f.seek(0, 2)
self.assertEqual(f.tell(), 512)
finally:
f.close()
self.assertEqual(m.size(), 512)
m.close()
def test_access_parameter(self):
# Test for "access" keyword parameter
mapsize = 10
with open(TESTFN, "wb") as fp:
fp.write(b"a"*mapsize)
with open(TESTFN, "rb") as f:
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
self.assertEqual(m[:], b'a'*mapsize, "Readonly memory map data incorrect.")
# Ensuring that readonly mmap can't be slice assigned
try:
m[:] = b'b'*mapsize
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be item assigned
try:
m[0] = b'b'
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write() to
try:
m.seek(0,0)
m.write(b'abc')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write_byte() to
try:
m.seek(0,0)
m.write_byte(b'd')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be resized
try:
m.resize(2*mapsize)
except SystemError: # resize is not universally supported
pass
except TypeError:
pass
else:
self.fail("Able to resize readonly memory map")
with open(TESTFN, "rb") as fp:
self.assertEqual(fp.read(), b'a'*mapsize,
"Readonly memory map data file was modified")
# Opening mmap with size too big
with open(TESTFN, "r+b") as f:
try:
m = mmap.mmap(f.fileno(), mapsize+1)
except ValueError:
# we do not expect a ValueError on Windows
# CAUTION: This also changes the size of the file on disk, and
# later tests assume that the length hasn't changed. We need to
# repair that.
if sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should work on Windows.")
else:
# we expect a ValueError on Unix, but not on Windows
if not sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should raise ValueError.")
m.close()
if sys.platform.startswith('win'):
# Repair damage from the resizing test.
with open(TESTFN, 'r+b') as f:
f.truncate(mapsize)
# Opening mmap with access=ACCESS_WRITE
with open(TESTFN, "r+b") as f:
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE)
# Modifying write-through memory map
m[:] = b'c'*mapsize
self.assertEqual(m[:], b'c'*mapsize,
"Write-through memory map memory not updated properly.")
m.flush()
m.close()
with open(TESTFN, 'rb') as f:
stuff = f.read()
self.assertEqual(stuff, b'c'*mapsize,
"Write-through memory map data file not updated properly.")
# Opening mmap with access=ACCESS_COPY
with open(TESTFN, "r+b") as f:
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY)
# Modifying copy-on-write memory map
m[:] = b'd'*mapsize
self.assertEqual(m[:], b'd' * mapsize,
"Copy-on-write memory map data not written correctly.")
m.flush()
with open(TESTFN, "rb") as fp:
self.assertEqual(fp.read(), b'c'*mapsize,
"Copy-on-write test data file should not be modified.")
# Ensuring copy-on-write maps cannot be resized
self.assertRaises(TypeError, m.resize, 2*mapsize)
m.close()
# Ensuring invalid access parameter raises exception
with open(TESTFN, "r+b") as f:
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize, access=4)
if os.name == "posix":
# Try incompatible flags, prot and access parameters.
with open(TESTFN, "r+b") as f:
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize,
flags=mmap.MAP_PRIVATE,
prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE)
# Try writing with PROT_EXEC and without PROT_WRITE
prot = mmap.PROT_READ | getattr(mmap, 'PROT_EXEC', 0)
with open(TESTFN, "r+b") as f:
m = mmap.mmap(f.fileno(), mapsize, prot=prot)
self.assertRaises(TypeError, m.write, b"abcdef")
self.assertRaises(TypeError, m.write_byte, 0)
m.close()
def test_bad_file_desc(self):
# Try opening a bad file descriptor...
self.assertRaises(OSError, mmap.mmap, -2, 4096)
def test_tougher_find(self):
# Do a tougher .find() test. SF bug 515943 pointed out that, in 2.2,
# searching for data with embedded \0 bytes didn't work.
with open(TESTFN, 'wb+') as f:
data = b'aabaac\x00deef\x00\x00aa\x00'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
for start in range(n+1):
for finish in range(start, n+1):
slice = data[start : finish]
self.assertEqual(m.find(slice), data.find(slice))
self.assertEqual(m.find(slice + b'x'), -1)
m.close()
def test_find_end(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'wb+')
data = b'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.find(b'one'), 0)
self.assertEqual(m.find(b'ones'), 8)
self.assertEqual(m.find(b'one', 0, -1), 0)
self.assertEqual(m.find(b'one', 1), 8)
self.assertEqual(m.find(b'one', 1, -1), 8)
self.assertEqual(m.find(b'one', 1, -2), -1)
def test_rfind(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'wb+')
data = b'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.rfind(b'one'), 8)
self.assertEqual(m.rfind(b'one '), 0)
self.assertEqual(m.rfind(b'one', 0, -1), 8)
self.assertEqual(m.rfind(b'one', 0, -2), 0)
self.assertEqual(m.rfind(b'one', 1, -1), 8)
self.assertEqual(m.rfind(b'one', 1, -2), -1)
def test_double_close(self):
# make sure a double close doesn't crash on Solaris (Bug# 665913)
f = open(TESTFN, 'wb+')
f.write(2**16 * b'a') # Arbitrary character
f.close()
f = open(TESTFN, 'rb')
mf = mmap.mmap(f.fileno(), 2**16, access=mmap.ACCESS_READ)
mf.close()
mf.close()
f.close()
@unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_entire_file(self):
# test mapping of entire file by passing 0 for map length
f = open(TESTFN, "wb+")
f.write(2**16 * b'm') # Arbitrary character
f.close()
f = open(TESTFN, "rb+")
mf = mmap.mmap(f.fileno(), 0)
self.assertEqual(len(mf), 2**16, "Map size should equal file size.")
self.assertEqual(mf.read(2**16), 2**16 * b"m")
mf.close()
f.close()
@unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_length_0_offset(self):
# Issue #10916: test mapping of remainder of file by passing 0 for
# map length with an offset doesn't cause a segfault.
# NOTE: allocation granularity is currently 65536 under Win64,
# and therefore the minimum offset alignment.
with open(TESTFN, "wb") as f:
f.write((65536 * 2) * b'm') # Arbitrary character
with open(TESTFN, "rb") as f:
with mmap.mmap(f.fileno(), 0, offset=65536, access=mmap.ACCESS_READ) as mf:
self.assertRaises(IndexError, mf.__getitem__, 80000)
@unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_length_0_large_offset(self):
# Issue #10959: test mapping of a file by passing 0 for
# map length with a large offset doesn't cause a segfault.
with open(TESTFN, "wb") as f:
f.write(115699 * b'm') # Arbitrary character
with open(TESTFN, "w+b") as f:
self.assertRaises(ValueError, mmap.mmap, f.fileno(), 0,
offset=2147418112)
def test_move(self):
# make move works everywhere (64-bit format problem earlier)
f = open(TESTFN, 'wb+')
f.write(b"ABCDEabcde") # Arbitrary character
f.flush()
mf = mmap.mmap(f.fileno(), 10)
mf.move(5, 0, 5)
self.assertEqual(mf[:], b"ABCDEABCDE", "Map move should have duplicated front 5")
mf.close()
f.close()
# more excessive test
data = b"0123456789"
for dest in range(len(data)):
for src in range(len(data)):
for count in range(len(data) - max(dest, src)):
expected = data[:dest] + data[src:src+count] + data[dest+count:]
m = mmap.mmap(-1, len(data))
m[:] = data
m.move(dest, src, count)
self.assertEqual(m[:], expected)
m.close()
# segfault test (Issue 5387)
m = mmap.mmap(-1, 100)
offsets = [-100, -1, 0, 1, 100]
for source, dest, size in itertools.product(offsets, offsets, offsets):
try:
m.move(source, dest, size)
except ValueError:
pass
offsets = [(-1, -1, -1), (-1, -1, 0), (-1, 0, -1), (0, -1, -1),
(-1, 0, 0), (0, -1, 0), (0, 0, -1)]
for source, dest, size in offsets:
self.assertRaises(ValueError, m.move, source, dest, size)
m.close()
m = mmap.mmap(-1, 1) # single byte
self.assertRaises(ValueError, m.move, 0, 0, 2)
self.assertRaises(ValueError, m.move, 1, 0, 1)
self.assertRaises(ValueError, m.move, 0, 1, 1)
m.move(0, 0, 1)
m.move(0, 0, 0)
def test_anonymous(self):
# anonymous mmap.mmap(-1, PAGE)
m = mmap.mmap(-1, PAGESIZE)
for x in range(PAGESIZE):
self.assertEqual(m[x], 0,
"anonymously mmap'ed contents should be zero")
for x in range(PAGESIZE):
b = x & 0xff
m[x] = b
self.assertEqual(m[x], b)
def test_read_all(self):
m = mmap.mmap(-1, 16)
self.addCleanup(m.close)
# With no parameters, or None or a negative argument, reads all
m.write(bytes(range(16)))
m.seek(0)
self.assertEqual(m.read(), bytes(range(16)))
m.seek(8)
self.assertEqual(m.read(), bytes(range(8, 16)))
m.seek(16)
self.assertEqual(m.read(), b'')
m.seek(3)
self.assertEqual(m.read(None), bytes(range(3, 16)))
m.seek(4)
self.assertEqual(m.read(-1), bytes(range(4, 16)))
m.seek(5)
self.assertEqual(m.read(-2), bytes(range(5, 16)))
m.seek(9)
self.assertEqual(m.read(-42), bytes(range(9, 16)))
def test_read_invalid_arg(self):
m = mmap.mmap(-1, 16)
self.addCleanup(m.close)
self.assertRaises(TypeError, m.read, 'foo')
self.assertRaises(TypeError, m.read, 5.5)
self.assertRaises(TypeError, m.read, [1, 2, 3])
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = bytes(reversed(range(256)))
m = mmap.mmap(-1, len(s))
m[:] = s
self.assertEqual(m[:], s)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(m[start:stop:step],
s[start:stop:step])
def test_extended_set_del_slice(self):
# Test extended slicing by comparing with list slicing.
s = bytes(reversed(range(256)))
m = mmap.mmap(-1, len(s))
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
m[:] = s
self.assertEqual(m[:], s)
L = list(s)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data = bytes(reversed(data))
L[start:stop:step] = data
m[start:stop:step] = data
self.assertEqual(m[:], bytes(L))
def make_mmap_file (self, f, halfsize):
# Write 2 pages worth of data to the file
f.write (b'\0' * halfsize)
f.write (b'foo')
f.write (b'\0' * (halfsize - 3))
f.flush ()
return mmap.mmap (f.fileno(), 0)
def test_empty_file (self):
f = open (TESTFN, 'w+b')
f.close()
with open(TESTFN, "rb") as f :
self.assertRaisesRegex(ValueError,
"cannot mmap an empty file",
mmap.mmap, f.fileno(), 0,
access=mmap.ACCESS_READ)
def test_offset (self):
f = open (TESTFN, 'w+b')
try: # unlink TESTFN no matter what
halfsize = mmap.ALLOCATIONGRANULARITY
m = self.make_mmap_file (f, halfsize)
m.close ()
f.close ()
mapsize = halfsize * 2
# Try invalid offset
f = open(TESTFN, "r+b")
for offset in [-2, -1, None]:
try:
m = mmap.mmap(f.fileno(), mapsize, offset=offset)
self.assertEqual(0, 1)
except (ValueError, TypeError, OverflowError):
pass
else:
self.assertEqual(0, 0)
f.close()
# Try valid offset, hopefully 8192 works on all OSes
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize - halfsize, offset=halfsize)
self.assertEqual(m[0:3], b'foo')
f.close()
# Try resizing map
try:
m.resize(512)
except SystemError:
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the content is not changed
self.assertEqual(m[0:3], b'foo')
# Check that the underlying file is truncated too
f = open(TESTFN, 'rb')
f.seek(0, 2)
self.assertEqual(f.tell(), halfsize + 512)
f.close()
self.assertEqual(m.size(), halfsize + 512)
m.close()
finally:
f.close()
try:
os.unlink(TESTFN)
except OSError:
pass
def test_subclass(self):
class anon_mmap(mmap.mmap):
def __new__(klass, *args, **kwargs):
return mmap.mmap.__new__(klass, -1, *args, **kwargs)
anon_mmap(PAGESIZE)
@unittest.skipUnless(hasattr(mmap, 'PROT_READ'), "needs mmap.PROT_READ")
def test_prot_readonly(self):
mapsize = 10
with open(TESTFN, "wb") as fp:
fp.write(b"a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, prot=mmap.PROT_READ)
self.assertRaises(TypeError, m.write, "foo")
f.close()
def test_error(self):
self.assertIs(mmap.error, OSError)
def test_io_methods(self):
data = b"0123456789"
with open(TESTFN, "wb") as fp:
fp.write(b"x"*len(data))
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), len(data))
f.close()
# Test write_byte()
for i in range(len(data)):
self.assertEqual(m.tell(), i)
m.write_byte(data[i])
self.assertEqual(m.tell(), i+1)
self.assertRaises(ValueError, m.write_byte, b"x"[0])
self.assertEqual(m[:], data)
# Test read_byte()
m.seek(0)
for i in range(len(data)):
self.assertEqual(m.tell(), i)
self.assertEqual(m.read_byte(), data[i])
self.assertEqual(m.tell(), i+1)
self.assertRaises(ValueError, m.read_byte)
# Test read()
m.seek(3)
self.assertEqual(m.read(3), b"345")
self.assertEqual(m.tell(), 6)
# Test write()
m.seek(3)
m.write(b"bar")
self.assertEqual(m.tell(), 6)
self.assertEqual(m[:], b"012bar6789")
m.seek(8)
self.assertRaises(ValueError, m.write, b"bar")
def test_non_ascii_byte(self):
for b in (129, 200, 255): # > 128
m = mmap.mmap(-1, 1)
m.write_byte(b)
self.assertEqual(m[0], b)
m.seek(0)
self.assertEqual(m.read_byte(), b)
m.close()
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_tagname(self):
data1 = b"0123456789"
data2 = b"abcdefghij"
assert len(data1) == len(data2)
# Test same tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="foo")
m2[:] = data2
self.assertEqual(m1[:], data2)
self.assertEqual(m2[:], data2)
m2.close()
m1.close()
# Test different tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="boo")
m2[:] = data2
self.assertEqual(m1[:], data1)
self.assertEqual(m2[:], data2)
m2.close()
m1.close()
@cpython_only
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_sizeof(self):
m1 = mmap.mmap(-1, 100)
tagname = "foo"
m2 = mmap.mmap(-1, 100, tagname=tagname)
self.assertEqual(sys.getsizeof(m2),
sys.getsizeof(m1) + len(tagname) + 1)
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_crasher_on_windows(self):
# Should not crash (Issue 1733986)
m = mmap.mmap(-1, 1000, tagname="foo")
try:
mmap.mmap(-1, 5000, tagname="foo")[:] # same tagname, but larger size
except:
pass
m.close()
# Should not crash (Issue 5385)
with open(TESTFN, "wb") as fp:
fp.write(b"x"*10)
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), 0)
f.close()
try:
m.resize(0) # will raise OSError
except:
pass
try:
m[:]
except:
pass
m.close()
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_invalid_descriptor(self):
# socket file descriptors are valid, but out of range
# for _get_osfhandle, causing a crash when validating the
# parameters to _get_osfhandle.
s = socket.socket()
try:
with self.assertRaises(OSError):
m = mmap.mmap(s.fileno(), 10)
finally:
s.close()
def test_context_manager(self):
with mmap.mmap(-1, 10) as m:
self.assertFalse(m.closed)
self.assertTrue(m.closed)
def test_context_manager_exception(self):
# Test that the OSError gets passed through
with self.assertRaises(Exception) as exc:
with mmap.mmap(-1, 10) as m:
raise OSError
self.assertIsInstance(exc.exception, OSError,
"wrong exception raised in context manager")
self.assertTrue(m.closed, "context manager failed")
def test_weakref(self):
# Check mmap objects are weakrefable
mm = mmap.mmap(-1, 16)
wr = weakref.ref(mm)
self.assertIs(wr(), mm)
del mm
gc_collect()
self.assertIs(wr(), None)
class LargeMmapTests(unittest.TestCase):
def setUp(self):
unlink(TESTFN)
def tearDown(self):
unlink(TESTFN)
def _make_test_file(self, num_zeroes, tail):
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(0x180000000))
f = open(TESTFN, 'w+b')
try:
f.seek(num_zeroes)
f.write(tail)
f.flush()
except (OSError, OverflowError):
f.close()
raise unittest.SkipTest("filesystem does not have largefile support")
return f
def test_large_offset(self):
with self._make_test_file(0x14FFFFFFF, b" ") as f:
with mmap.mmap(f.fileno(), 0, offset=0x140000000, access=mmap.ACCESS_READ) as m:
self.assertEqual(m[0xFFFFFFF], 32)
def test_large_filesize(self):
with self._make_test_file(0x17FFFFFFF, b" ") as f:
if sys.maxsize < 0x180000000:
# On 32 bit platforms the file is larger than sys.maxsize so
# mapping the whole file should fail -- Issue #16743
with self.assertRaises(OverflowError):
mmap.mmap(f.fileno(), 0x180000000, access=mmap.ACCESS_READ)
with self.assertRaises(ValueError):
mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
with mmap.mmap(f.fileno(), 0x10000, access=mmap.ACCESS_READ) as m:
self.assertEqual(m.size(), 0x180000000)
# Issue 11277: mmap() with large (~4GB) sparse files crashes on OS X.
def _test_around_boundary(self, boundary):
tail = b' DEARdear '
start = boundary - len(tail) // 2
end = start + len(tail)
with self._make_test_file(start, tail) as f:
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m:
self.assertEqual(m[start:end], tail)
@unittest.skipUnless(sys.maxsize > _4G, "test cannot run on 32-bit systems")
def test_around_2GB(self):
self._test_around_boundary(_2G)
@unittest.skipUnless(sys.maxsize > _4G, "test cannot run on 32-bit systems")
def test_around_4GB(self):
self._test_around_boundary(_4G)
def test_main():
run_unittest(MmapTests, LargeMmapTests)
if __name__ == '__main__':
test_main()
|
gpl-2.0
|
arbn/pysaml2
|
src/saml2/attribute_converter.py
|
1
|
15064
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) s2010-2011 Umeå University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from importlib import import_module
from saml2.s_utils import factory
from saml2.s_utils import do_ava
from saml2 import saml
from saml2 import extension_elements_to_elements
from saml2 import SAMLError
from saml2.saml import NAME_FORMAT_UNSPECIFIED
import logging
logger = logging.getLogger(__name__)
class UnknownNameFormat(SAMLError):
pass
class ConverterError(SAMLError):
pass
def load_maps(dirspec):
""" load the attribute maps
:param dirspec: a directory specification
:return: a dictionary with the name of the map as key and the
map as value. The map itself is a dictionary with two keys:
"to" and "fro". The values for those keys are the actual mapping.
"""
mapd = {}
if dirspec not in sys.path:
sys.path.insert(0, dirspec)
for fil in os.listdir(dirspec):
if fil.endswith(".py"):
mod = import_module(fil[:-3])
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item, dict) and "to" in item and "fro" in item:
mapd[item["identifier"]] = item
return mapd
def ac_factory(path=""):
"""Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance
"""
acs = []
if path:
if path not in sys.path:
sys.path.insert(0, path)
for fil in os.listdir(path):
if fil.endswith(".py"):
mod = import_module(fil[:-3])
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item,
dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
else:
from saml2 import attributemaps
for typ in attributemaps.__all__:
mod = import_module(".%s" % typ, "saml2.attributemaps")
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item, dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
return acs
def ac_factory_II(path):
return ac_factory(path)
# def ava_fro(acs, statement):
# """ Translates attributes according to their name_formats into the local
# names.
#
# :param acs: AttributeConverter instances
# :param statement: A SAML statement
# :return: A dictionary with attribute names replaced with local names.
# """
# if not statement:
# return {}
#
# acsdic = dict([(ac.name_format, ac) for ac in acs])
# acsdic[None] = acsdic[NAME_FORMAT_URI]
# return dict([acsdic[a.name_format].ava_from(a) for a in statement])
def to_local(acs, statement, allow_unknown_attributes=False):
""" Replaces the attribute names in a attribute value assertion with the
equivalent name from a local name format.
:param acs: List of Attribute Converters
:param statement: The Attribute Statement
:param allow_unknown_attributes: If unknown attributes are allowed
:return: A key,values dictionary
"""
if not acs:
acs = [AttributeConverter()]
acsd = {"": acs}
else:
acsd = dict([(a.name_format, a) for a in acs])
ava = {}
for attr in statement.attribute:
try:
_func = acsd[attr.name_format].ava_from
except KeyError:
if attr.name_format == NAME_FORMAT_UNSPECIFIED or \
allow_unknown_attributes:
_func = acs[0].lcd_ava_from
else:
logger.info("Unsupported attribute name format: %s" % (
attr.name_format,))
continue
try:
key, val = _func(attr)
except KeyError:
if allow_unknown_attributes:
key, val = acs[0].lcd_ava_from(attr)
else:
logger.info("Unknown attribute name: %s" % (attr,))
continue
except AttributeError:
continue
try:
ava[key].extend(val)
except KeyError:
ava[key] = val
return ava
def from_local(acs, ava, name_format):
for aconv in acs:
#print ac.format, name_format
if aconv.name_format == name_format:
#print "Found a name_form converter"
return aconv.to_(ava)
return None
def from_local_name(acs, attr, name_format):
"""
:param acs: List of AttributeConverter instances
:param attr: attribute name as string
:param name_format: Which name-format it should be translated to
:return: An Attribute instance
"""
for aconv in acs:
#print ac.format, name_format
if aconv.name_format == name_format:
#print "Found a name_form converter"
return aconv.to_format(attr)
return attr
def to_local_name(acs, attr):
"""
:param acs: List of AttributeConverter instances
:param attr: an Attribute instance
:return: The local attribute name
"""
for aconv in acs:
lattr = aconv.from_format(attr)
if lattr:
return lattr
return attr.friendly_name
def d_to_local_name(acs, attr):
"""
:param acs: List of AttributeConverter instances
:param attr: an Attribute dictionary
:return: The local attribute name
"""
for aconv in acs:
lattr = aconv.d_from_format(attr)
if lattr:
return lattr
# if everything else fails this might be good enough
try:
return attr["friendly_name"]
except KeyError:
raise ConverterError("Could not find local name for %s" % attr)
class AttributeConverter(object):
""" Converts from an attribute statement to a key,value dictionary and
vice-versa """
def __init__(self, name_format=""):
self.name_format = name_format
self._to = None
self._fro = None
def adjust(self):
""" If one of the transformations is not defined it is expected to
be the mirror image of the other.
"""
if self._fro is None and self._to is not None:
self._fro = dict(
[(value.lower(), key) for key, value in self._to.items()])
if self._to is None and self.fro is not None:
self._to = dict(
[(value.lower, key) for key, value in self._fro.items()])
def from_dict(self, mapdict):
""" Import the attribute map from a dictionary
:param mapdict: The dictionary
"""
self.name_format = mapdict["identifier"]
try:
self._fro = dict(
[(k.lower(), v) for k, v in mapdict["fro"].items()])
except KeyError:
pass
try:
self._to = dict([(k.lower(), v) for k, v in mapdict["to"].items()])
except KeyError:
pass
if self._fro is None and self._to is None:
raise ConverterError("Missing specifications")
if self._fro is None or self._to is None:
self.adjust()
def lcd_ava_from(self, attribute):
"""
In nothing else works, this should
:param attribute: An Attribute Instance
:return:
"""
try:
name = attribute.friendly_name.strip()
except AttributeError:
name = attribute.name.strip()
values = []
for value in attribute.attribute_value:
if not value.text:
values.append('')
else:
values.append(value.text.strip())
return name, values
def fail_safe_fro(self, statement):
""" In case there is not formats defined or if the name format is
undefined
:param statement: AttributeStatement instance
:return: A dictionary with names and values
"""
result = {}
for attribute in statement.attribute:
if attribute.name_format and \
attribute.name_format != NAME_FORMAT_UNSPECIFIED:
continue
try:
name = attribute.friendly_name.strip()
except AttributeError:
name = attribute.name.strip()
result[name] = []
for value in attribute.attribute_value:
if not value.text:
result[name].append('')
else:
result[name].append(value.text.strip())
return result
def ava_from(self, attribute, allow_unknown=False):
try:
attr = self._fro[attribute.name.strip().lower()]
except AttributeError:
attr = attribute.friendly_name.strip().lower()
except KeyError:
if allow_unknown:
try:
attr = attribute.name.strip().lower()
except AttributeError:
attr = attribute.friendly_name.strip().lower()
else:
raise
val = []
for value in attribute.attribute_value:
if value.extension_elements:
ext = extension_elements_to_elements(value.extension_elements,
[saml])
for ex in ext:
cval = {}
for key, (name, typ, mul) in ex.c_attributes.items():
exv = getattr(ex, name)
if exv:
cval[name] = exv
if ex.text:
cval["value"] = ex.text.strip()
val.append({ex.c_tag: cval})
elif not value.text:
val.append('')
else:
val.append(value.text.strip())
return attr, val
def fro(self, statement):
""" Get the attributes and the attribute values.
:param statement: The AttributeStatement.
:return: A dictionary containing attributes and values
"""
if not self.name_format:
return self.fail_safe_fro(statement)
result = {}
for attribute in statement.attribute:
if attribute.name_format and self.name_format and \
attribute.name_format != self.name_format:
continue
try:
(key, val) = self.ava_from(attribute)
except (KeyError, AttributeError):
pass
else:
result[key] = val
return result
def to_format(self, attr):
""" Creates an Attribute instance with name, name_format and
friendly_name
:param attr: The local name of the attribute
:return: An Attribute instance
"""
try:
return factory(saml.Attribute,
name=self._to[attr],
name_format=self.name_format,
friendly_name=attr)
except KeyError:
return factory(saml.Attribute, name=attr)
def from_format(self, attr):
""" Find out the local name of an attribute
:param attr: An saml.Attribute instance
:return: The local attribute name or "" if no mapping could be made
"""
if attr.name_format:
if self.name_format == attr.name_format:
try:
return self._fro[attr.name.lower()]
except KeyError:
pass
else: # don't know the name format so try all I have
try:
return self._fro[attr.name.lower()]
except KeyError:
pass
return ""
def d_from_format(self, attr):
""" Find out the local name of an attribute
:param attr: An Attribute dictionary
:return: The local attribute name or "" if no mapping could be made
"""
if attr["name_format"]:
if self.name_format == attr["name_format"]:
try:
return self._fro[attr["name"].lower()]
except KeyError:
pass
else: # don't know the name format so try all I have
try:
return self._fro[attr["name"].lower()]
except KeyError:
pass
return ""
def to_(self, attrvals):
""" Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances
"""
attributes = []
for key, value in attrvals.items():
key = key.lower()
try:
attributes.append(factory(saml.Attribute,
name=self._to[key],
name_format=self.name_format,
friendly_name=key,
attribute_value=do_ava(value)))
except KeyError:
attributes.append(factory(saml.Attribute,
name=key,
attribute_value=do_ava(value)))
return attributes
class AttributeConverterNOOP(AttributeConverter):
""" Does a NOOP conversion, that is no conversion is made """
def __init__(self, name_format=""):
AttributeConverter.__init__(self, name_format)
def to_(self, attrvals):
""" Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances
"""
attributes = []
for key, value in attrvals.items():
key = key.lower()
attributes.append(factory(saml.Attribute,
name=key,
name_format=self.name_format,
attribute_value=do_ava(value)))
return attributes
|
bsd-2-clause
|
francois-berder/PyLetMeCreate
|
letmecreate/core/led.py
|
1
|
4150
|
#!/usr/bin/env python3
"""Python binding of LED wrapper of LetMeCreate library."""
import ctypes
_LIB = ctypes.CDLL('libletmecreate_core.so')
LED_0 = 0x01
LED_1 = 0x02
LED_2 = 0x04
LED_3 = 0x08
LED_4 = 0x10
LED_5 = 0x20
LED_6 = 0x40
LED_HEARTBEAT = LED_7 = 0x80
ALL_LEDS = 0xFF
LED_CNT = 8
ON_OFF_MODE = 0
TIMER_MODE = 1
def init():
"""Initialise all LED's.
ALL LED's are switched off and configured in ON_OFF_MODE.
Note: An exception is thrown if an error occurs during the intialisation.
"""
ret = _LIB.led_init()
if ret != 0:
raise Exception("led init failed")
def switch_on(mask):
"""Switch on some LED's.
The LED's must be initialised before calling this function and configured
in ON_OFF_MODE.
mask: 8-bit integer. Only LED's whose corresponding bits are set to 1 are
switched on.
Note: An exception is thrown if it fails to switch on LED's.
"""
ret = _LIB.led_switch_on(mask)
if ret < 0:
raise Exception("led switch_on failed")
def switch_off(mask):
"""Switch off some LED's.
The LED's must be initialised before calling this function and configured
in ON_OFF_MODE.
mask: 8-bit integer. Only LED's whose corresponding bits are set to 1 are
switched off.
Note: An exception is thrown if it fails to switch on LED's.
"""
ret = _LIB.led_switch_off(mask)
if ret < 0:
raise Exception("led switch_off failed")
def set_value(mask, value):
"""Switch on/off some LED's.
The LED's must be initialised before calling this function and configured
in ON_OFF_MODE.
mask: 8-bit integer. LED's whose corresponding bits are set to 1 might be
switched on/off by this function.
value: 8-bit integer. LED's whose corresponding bits are set to 1 are
switched on, 0, switched off.
Note: An exception is thrown if it fails to switch on/off LED's.
"""
ret = _LIB.led_set(mask, value)
if ret < 0:
raise Exception("led set value failed")
def configure_on_off_mode(mask):
"""Configure some LED's as ON_OFF_MODE.
The LED's must be initialised before calling this function.
mask: 8-bit integer. LED's whose corresponding bits are set to 1 are
configured in ON_OFF_MODE.
Note: An exception is thrown if it fails to configure LED's.
"""
ret = _LIB.led_configure_on_off_mode(mask)
if ret < 0:
raise Exception("led configure_on_off_mode failed")
def configure_timer_mode(mask):
"""Configure some LED's as TIMER_MODE.
The LED's must be initialised before calling this function.
mask: 8-bit integer. LED's whose corresponding bits are set to 1 are
configured in TIMER_MODE.
Note: An exception is thrown if it fails to configure LED's.
"""
ret = _LIB.led_configure_timer_mode(mask)
if ret < 0:
raise Exception("led configure_timer_mode failed")
def get_mode(index):
"""Returns the mode of a LED.
The LED must be initialised before calling this function.
index: must be an integer in range 0..7
Note: An exception is thrown if it fails to retrieve the mode of a LED.
"""
mode = ctypes.c_uint8(0)
ret = _LIB.led_get_mode(index, ctypes.byref(mode))
if ret < 0:
raise Exception("led get mode failed")
return mode.value
def set_delay(mask, delay_on, delay_off):
"""Set the on/off delay of some LED's.
The LED's must be initialised before calling this function and configured
in TIMER_MODE.
mask: Only LED's, whose corresponding bits are set to 1, have their settings
changed by this function.
delay_on: Time in milliseconds when LED's are on.
delay_off: Time in milliseconds when LED's are off.
Note: An exception is thrown if it fails to set the
"""
ret = _LIB.led_set_delay(mask, delay_on, delay_off)
if ret < 0:
raise Exception("led set delay failed")
def release():
"""Release all LED's.
Switch off all LED's.
Note: If it fails to release LED's, an exception is thrown.
"""
ret = _LIB.led_release()
if ret < 0:
raise Exception("led release failed")
|
bsd-3-clause
|
hfp/tensorflow-xsmm
|
tensorflow/contrib/solvers/python/kernel_tests/util_test.py
|
25
|
4551
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UtilTest(test.TestCase):
def _testCreateOperator(self, use_static_shape_):
for dtype in np.float32, np.float64:
a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
x_np = np.array([[2.], [-3.]], dtype=dtype)
y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np, dtype=dtype)
x = constant_op.constant(x_np, dtype=dtype)
y = constant_op.constant(y_np, dtype=dtype)
else:
a = array_ops.placeholder(dtype)
x = array_ops.placeholder(dtype)
y = array_ops.placeholder(dtype)
op = util.create_operator(a)
ax = op.apply(x)
aty = op.apply_adjoint(y)
op_shape = ops.convert_to_tensor(op.shape)
if use_static_shape_:
op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
else:
op_shape_val, ax_val, aty_val = sess.run(
[op_shape, ax, aty], feed_dict={a: a_np,
x: x_np,
y: y_np})
self.assertAllEqual(op_shape_val, [3, 2])
self.assertAllClose(ax_val, np.dot(a_np, x_np))
self.assertAllClose(aty_val, np.dot(a_np.T, y_np))
def testCreateOperator(self):
self._testCreateOperator(True)
def testCreateOperatorUnknownShape(self):
self._testCreateOperator(False)
def _testIdentityOperator(self, use_static_shape_):
for dtype in np.float32, np.float64:
a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
x_np = np.array([[2.], [-3.]], dtype=dtype)
y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np, dtype=dtype)
x = constant_op.constant(x_np, dtype=dtype)
y = constant_op.constant(y_np, dtype=dtype)
else:
a = array_ops.placeholder(dtype)
x = array_ops.placeholder(dtype)
y = array_ops.placeholder(dtype)
id_op = util.identity_operator(a)
ax = id_op.apply(x)
aty = id_op.apply_adjoint(y)
op_shape = ops.convert_to_tensor(id_op.shape)
if use_static_shape_:
op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
else:
op_shape_val, ax_val, aty_val = sess.run(
[op_shape, ax, aty], feed_dict={
a: a_np,
x: x_np,
y: y_np
})
self.assertAllEqual(op_shape_val, [3, 2])
self.assertAllClose(ax_val, x_np)
self.assertAllClose(aty_val, y_np)
def testIdentityOperator(self):
self._testIdentityOperator(True)
def testIdentityOperatorUnknownShape(self):
self._testIdentityOperator(False)
def testL2Norm(self):
with self.cached_session():
x_np = np.array([[2], [-3.], [5.]])
x_norm_np = np.linalg.norm(x_np)
x_normalized_np = x_np / x_norm_np
x = constant_op.constant(x_np)
l2norm = util.l2norm(x)
l2norm_squared = util.l2norm_squared(x)
x_normalized, x_norm = util.l2normalize(x)
self.assertAllClose(l2norm.eval(), x_norm_np)
self.assertAllClose(l2norm_squared.eval(), np.square(x_norm_np))
self.assertAllClose(x_norm.eval(), x_norm_np)
self.assertAllClose(x_normalized.eval(), x_normalized_np)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
CloudVLab/professional-services
|
tools/kunskap/config.py
|
2
|
1957
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config_vars = {
'billing_project_id': 'billing_project',
'billing_dataset_id': 'billing_dataset',
'billing_table_name': 'billing_table',
'output_dataset_id': 'output_dataset',
'output_table_name': 'output_table',
'sql_file_path': 'cud_sud_attribution_query.sql',
# There are two slightly different allocation methods that affect how the
# Commitment charge is allocated:
# Method 1: Only UTILIZED commitment charges are allocated to projects.
# (P_method_1_CUD_commitment_cost): Utilized CUD commitment charges are
# proportionally allocated to each project based on its share of total
# eligible VM usage during the time increment (P_usage_percentage). Any
# unutilized commitment cost remains unallocated
# (BA_unutilized_commitment_cost) and is allocated to the shell project.
# Method 2: ALL commitment charges are allocated to projects (regardless of
# utilization). (P_method_2_CUD_commitment_cost): All CUD commitment charges
# are proportionally allocated to each project based on its share of total
# eligible VM usage during the time increment (P_usage_percentage). All
# commitment cost is allocated into the projects proportionally based on the
# CUD credits that they consumed, even if the commitment is not fully
# utilized.
'allocation_method': 'P_method_2_commitment_cost'
}
|
apache-2.0
|
admetricks/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/networktransaction.py
|
190
|
2926
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import time
import urllib2
_log = logging.getLogger(__name__)
class NetworkTimeout(Exception):
def __str__(self):
return 'NetworkTimeout'
class NetworkTransaction(object):
def __init__(self, initial_backoff_seconds=10, grown_factor=1.5, timeout_seconds=(10 * 60), convert_404_to_None=False):
self._initial_backoff_seconds = initial_backoff_seconds
self._grown_factor = grown_factor
self._timeout_seconds = timeout_seconds
self._convert_404_to_None = convert_404_to_None
def run(self, request):
self._total_sleep = 0
self._backoff_seconds = self._initial_backoff_seconds
while True:
try:
return request()
except urllib2.HTTPError, e:
if self._convert_404_to_None and e.code == 404:
return None
self._check_for_timeout()
_log.warn("Received HTTP status %s loading \"%s\". Retrying in %s seconds..." % (e.code, e.filename, self._backoff_seconds))
self._sleep()
def _check_for_timeout(self):
if self._total_sleep + self._backoff_seconds > self._timeout_seconds:
raise NetworkTimeout()
def _sleep(self):
time.sleep(self._backoff_seconds)
self._total_sleep += self._backoff_seconds
self._backoff_seconds *= self._grown_factor
|
bsd-3-clause
|
ulrich3110/mfc
|
deb-binarys/builds/mfc1_1.0-5/mfc1-1.0/debian/tmp/usr/share/mfc/mfc1/unity.py
|
8
|
9734
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# With the MindfulClock you turn your device into a Bell of Mindfulness.
# Concept, Design: Marcurs Möller
# <marcus.moeller@outlook.com>
# <http://apps.microsoft.com/windows/de-de/app/
# mindfulclock/58063160-9cc6-4dee-9d92-17df4ce4318a>
# Programming: Andreas Ulrich
# <ulrich3110@gmail.com>,
# <http://erasand.jimdo.com/kontakt/>
#
# This file is part of the "MindfulClock".
# "MindfulClock" is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# "MindfulClock" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# "MindfulClock". If not, see <http://www.gnu.org/licenses/>.
import wx
# Install under unity: <sudo apt-get install python-appindicator>
import appindicator
import gtk
class AppIndicator():
'''Application Indicator object for the MindfulClock.
AppIndicator(rame, icon, path, textdic, menutime)
frame = wx.Window
icon = icon (without extension)
path = path to icon
textdic = dictionairys with texts.
menutime = True, False, show the time not beside te indicator.
'''
def __init__(self, frame, icon, path, textdic, menutime):
# wx.Frame
self.__frame = frame
# Text dictionairy.
self.__textdic = textdic
# Show the time in the menu (True) or not.
self.__menutime = menutime
# Application indicator
self.ind = appindicator.Indicator('MindfulClock', icon,
appindicator.CATEGORY_APPLICATION_STATUS,
path)
self.ind.set_status(appindicator.STATUS_ACTIVE)
self.ind.set_attention_icon(icon)
# Gtk menu.
self.menu_setup()
# Set menu to the indicator.
self.ind.set_menu(self.menu)
def main(self):
'''GTK main loop.'''
gtk.main()
def menu_setup(self):
'''GTK menu setup.'''
self.menu = gtk.Menu()
if self.__menutime:
# Remaining time entry.
self.remain = gtk.MenuItem('--:--')
self.remain.show()
self.menu.append(self.remain)
# Clock entry.
self.clock = gtk.MenuItem(self.__textdic['start'])
self.clock.connect('activate', self.on_start_stop)
self.clock.show()
self.menu.append(self.clock)
# Puse entry.
self.pause = gtk.MenuItem(self.__textdic['pause'])
self.pause.connect('activate', self.on_pause_continue)
self.pause.show()
self.pause.set_sensitive(False)
self.menu.append(self.pause)
# GUI entry.
self.show = gtk.MenuItem(self.__textdic['hide'])
self.show.connect('activate', self.on_show_hide)
self.show.show()
self.menu.append(self.show)
# Exit entry.
self.exit = gtk.MenuItem(self.__textdic['exit'])
self.exit.connect('activate', self.on_exit)
self.exit.show()
self.menu.append(self.exit)
if not self.__menutime:
# Remaining time beside the indicator icoan.
self.ind.set_label('--:--')
def on_exit(self, event):
'''Event, exit the gui.'''
self.quit()
self.__frame.Close()
def on_pause_continue(self, event):
'''Event, pause, continue clock.'''
if self.__frame.get_pause():
# Clock paused.
self.__frame.clock_start()
elif self.__frame.get_clock():
# Pause clock.
self.__frame.clock_pause()
def on_show_hide(self, event):
'''Event, show, hide clock.'''
if self.__frame.IsShown():
self.__frame.Hide()
self.set_menu_show()
else:
self.__frame.Show()
self.set_menu_hide()
def on_start_stop(self, event):
'''Event, start, stop clock.'''
if self.__frame.get_clock():
# Clock is running.
self.__frame.clock_stop()
else:
# Clock dont running.
self.__frame.clock_start()
def quit(self):
'''Exit the gtk.'''
gtk.main_quit()
def set_menu_continue(self):
'''Change the menu entry, clock continue.'''
label = self.pause.child
label.set_text(self.__textdic['cont'])
self.pause.set_sensitive(True)
def set_menu_hide(self):
'''Change the menu entry, hide the gui.'''
label = self.show.child
label.set_text(self.__textdic['hide'])
def set_menu_pause(self):
'''Change the menu entry, clock pause.'''
label = self.pause.child
label.set_text(self.__textdic['pause'])
self.pause.set_sensitive(True)
def set_menu_pause_clear(self):
'''Change the menu entry, no pause text.'''
label = self.pause.child
label.set_text(self.__textdic['pause'])
self.pause.set_sensitive(False)
def set_menu_show(self):
'''Change the menu entry, show the gui.'''
label = self.show.child
label.set_text(self.__textdic['show'])
def set_menu_start(self):
'''Change the menu entry, start the clock.'''
label = self.clock.child
label.set_text(self.__textdic['start'])
def set_menu_stop(self):
'''Change the menu entry, stop the clock.'''
label = self.clock.child
label.set_text(self.__textdic['stop'])
def set_remain_time(self, text):
'''Set the remaining time.'''
if self.__menutime:
# Show the time in the menu.
label = self.remain.child
label.set_text(text)
else:
# Show the time beside the indicator icon.
self.ind.set_label(text)
class wxTestFrame(wx.Frame):
'''wxTestFrame()
Test object for GUI development.
'''
def __init__(self):
wx.Frame.__init__(self, parent=None)
# Button
btnlaunch = wx.Button(parent=self, label='Launcher')
btnindic = wx.Button(parent=self, label='Indicator')
btnexit = wx.Button(parent=self, label='Exit')
btnlaunch.Bind(event=wx.EVT_BUTTON, handler=self.on_launch)
btnindic.Bind(event=wx.EVT_BUTTON, handler=self.on_indic)
btnexit.Bind(event=wx.EVT_BUTTON, handler=self.on_exit)
# Clockstatus & pausetime
self.__clockstatus = False
self.__pausetime = -99
# status of the indicator.
self.__indstatus = False
# Layout
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(item=btnlaunch, flag=wx.EXPAND)
vbox.Add(item=btnindic, flag=wx.EXPAND)
vbox.Add(item=btnexit, flag=wx.EXPAND)
self.SetSizer(vbox)
self.Centre()
self.Show()
def clock_pause(self):
'''Pause the clock.'''
print('Clock pause ..')
self.__clockstatus = True
self.__pausetime = 1
if self.__indstatus:
self.__ind.set_menu_stop()
self.__ind.set_menu_continue()
def clock_start(self):
'''Start the clock.'''
print('Start clock ..')
self.__clockstatus = True
self.__pausetime = -99
if self.__indstatus:
self.__ind.set_menu_stop()
self.__ind.set_menu_pause()
def clock_stop(self):
'''Stop the clock.'''
print('Clock stop ..')
self.__clockstatus = False
self.__pausetime = -99
if self.__indstatus:
self.__ind.set_menu_start()
self.__ind.set_menu_pause_clear()
def get_clock(self):
'''Get the status of the clock.'''
return(self.__clockstatus)
def get_pause(self):
'''Is the clock paused, True of False.'''
if self.__pausetime == -99:
# Clock not paused.
status = False
else:
# Clock paused.
status = True
return(status)
def on_exit(self, event):
'''Event, exit button.'''
if self.__indstatus:
self.__ind.quit()
self.Close()
def on_indic(self, event):
'''Test Unity application indicator.'''
# status of the indicator.
self.__indstatus = True
# Application indicator.
self.__ind = AppIndicator(frame=self,
icon='alarm-clock-indicator',
path='./icons',
textdic={'start': 'Start',
'stop': 'Stop',
'show': 'Show',
'hide': 'Hide',
'exit': 'Exit',
'pause': 'Pause',
'cont': 'Continue'},
menutime=True)
self.__ind.main()
def on_launch(self, event):
'''Test Unity application launcher.'''
def on_minimize(self, event):
'''Event, minimize button.'''
if self.IsShown():
self.Hide()
self.__ind.set_menu_show()
def on_system_close(self, event):
'''Close the clock.'''
self.Destroy()
if __name__ == '__main__':
app = wx.App()
frame = wxTestFrame()
app.MainLoop()
|
gpl-3.0
|
rtiwariops/rain
|
nodejs/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py
|
1569
|
23354
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
apache-2.0
|
tanglei528/glance
|
glance/tests/functional/test_api.py
|
2
|
11437
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Version-independent api tests"""
import httplib2
from glance.openstack.common import jsonutils
from glance.tests import functional
class TestRootApi(functional.FunctionalTest):
def test_version_configurations(self):
"""Test that versioning is handled properly through all channels"""
#v1 and v2 api enabled
self.cleanup()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.2',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
self.stop_servers()
#v2 api enabled
self.cleanup()
self.api_server.enable_v1_api = False
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.2',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
self.stop_servers()
#v1 api enabled
self.cleanup()
self.api_server.enable_v1_api = True
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v1.1',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
self.stop_servers()
def test_version_variations(self):
"""Test that versioning is handled properly through all channels"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.2',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
images = {'images': []}
images_json = jsonutils.dumps(images)
# 0. GET / with no Accept: header
# Verify version choices returned.
# Bug lp:803260 no Accept header causes a 500 in glance-api
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 1. GET /images with no Accept: header
# Verify version choices returned.
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 2. GET /v1/images with no Accept: header
# Verify empty images list returned.
path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, images_json)
# 3. GET / with Accept: unknown header
# Verify version choices returned. Verify message in API log about
# unknown accept header.
path = 'http://%s:%d/' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'unknown'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 4. GET / with an Accept: application/vnd.openstack.images-v1
# Verify empty image list returned
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(response.status, 200)
self.assertEqual(content, images_json)
# 5. GET /images with a Accept: application/vnd.openstack.compute-v1
# header. Verify version choices returned. Verify message in API log
# about unknown accept header.
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.compute-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 6. GET /v1.0/images with no Accept: header
# Verify version choices returned
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
# 7. GET /v1.a/images with no Accept: header
# Verify version choices returned
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
# 8. GET /va.1/images with no Accept: header
# Verify version choices returned
path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 9. GET /versions with no Accept: header
# Verify version choices returned
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 10. GET /versions with a Accept: application/vnd.openstack.images-v1
# header. Verify version choices returned.
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 11. GET /v1/versions with no Accept: header
# Verify 404 returned
path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 404)
# Verify version choices returned
path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 13. GET /images with a Accept: application/vnd.openstack.compute-v2
# header. Verify version choices returned. Verify message in API log
# about unknown version in accept header.
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v10'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
# 14. GET /v1.2/images with no Accept: header
# Verify version choices returned
path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 300)
self.assertEqual(content, versions_json)
self.stop_servers()
|
apache-2.0
|
rebost/django
|
tests/regressiontests/localflavor/cz/tests.py
|
14
|
2844
|
from __future__ import unicode_literals
from django.contrib.localflavor.cz.forms import (CZPostalCodeField,
CZRegionSelect, CZBirthNumberField, CZICNumberField)
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase
class CZLocalFlavorTests(SimpleTestCase):
def test_CZRegionSelect(self):
f = CZRegionSelect()
out = '''<select name="regions">
<option value="PR">Prague</option>
<option value="CE">Central Bohemian Region</option>
<option value="SO">South Bohemian Region</option>
<option value="PI">Pilsen Region</option>
<option value="CA">Carlsbad Region</option>
<option value="US">Usti Region</option>
<option value="LB">Liberec Region</option>
<option value="HK">Hradec Region</option>
<option value="PA">Pardubice Region</option>
<option value="VY">Vysocina Region</option>
<option value="SM">South Moravian Region</option>
<option value="OL">Olomouc Region</option>
<option value="ZL">Zlin Region</option>
<option value="MS">Moravian-Silesian Region</option>
</select>'''
self.assertHTMLEqual(f.render('regions', 'TT'), out)
def test_CZPostalCodeField(self):
error_format = ['Enter a postal code in the format XXXXX or XXX XX.']
valid = {
'91909': '91909',
'917 01': '91701',
'12345': '12345',
}
invalid = {
'84545x': error_format,
'123456': error_format,
'1234': error_format,
'123 4': error_format,
}
self.assertFieldOutput(CZPostalCodeField, valid, invalid)
def test_CZBirthNumberField(self):
error_format = ['Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
error_invalid = ['Enter a valid birth number.']
valid = {
'880523/1237': '880523/1237',
'8805231237': '8805231237',
'880523/000': '880523/000',
'880523000': '880523000',
'882101/0011': '882101/0011',
}
invalid = {
'123456/12': error_format,
'123456/12345': error_format,
'12345612': error_format,
'12345612345': error_format,
'880523/1239': error_invalid,
'8805231239': error_invalid,
'990101/0011': error_invalid,
}
self.assertFieldOutput(CZBirthNumberField, valid, invalid)
def test_CZICNumberField(self):
error_invalid = ['Enter a valid IC number.']
valid ={
'12345679': '12345679',
'12345601': '12345601',
'12345661': '12345661',
'12345610': '12345610',
}
invalid = {
'1234567': error_invalid,
'12345660': error_invalid,
'12345600': error_invalid,
}
self.assertFieldOutput(CZICNumberField, valid, invalid)
|
bsd-3-clause
|
gevero/deap
|
examples/ga/onemax_multidemic.py
|
12
|
3208
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import random
import numpy
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_bool", random.randint, 0, 1)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, 100)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def evalOneMax(individual):
return sum(individual),
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("migrate", tools.migRing, k=5, selection=tools.selBest,
replacement=random.sample)
def main():
random.seed(64)
NBR_DEMES = 3
MU = 300
NGEN = 40
CXPB = 0.5
MUTPB = 0.2
MIG_RATE = 5
demes = [toolbox.population(n=MU) for _ in range(NBR_DEMES)]
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "deme", "evals", "std", "min", "avg", "max"
for idx, deme in enumerate(demes):
for ind in deme:
ind.fitness.values = toolbox.evaluate(ind)
logbook.record(gen=0, deme=idx, evals=len(deme), **stats.compile(deme))
hof.update(deme)
print(logbook.stream)
gen = 1
while gen <= NGEN and logbook[-1]["max"] < 100.0:
for idx, deme in enumerate(demes):
deme[:] = toolbox.select(deme, len(deme))
deme[:] = algorithms.varAnd(deme, toolbox, cxpb=CXPB, mutpb=MUTPB)
invalid_ind = [ind for ind in deme if not ind.fitness.valid]
for ind in invalid_ind:
ind.fitness.values = toolbox.evaluate(ind)
logbook.record(gen=gen, deme=idx, evals=len(deme), **stats.compile(deme))
hof.update(deme)
print(logbook.stream)
if gen % MIG_RATE == 0:
toolbox.migrate(demes)
gen += 1
return demes, logbook, hof
if __name__ == "__main__":
main()
|
lgpl-3.0
|
kanagasabapathi/python-for-android
|
python3-alpha/python3-src/Lib/test/test_strlit.py
|
48
|
5040
|
r"""Test correct treatment of various string literals by the parser.
There are four types of string literals:
'abc' -- normal str
r'abc' -- raw str
b'xyz' -- normal bytes
br'xyz' -- raw bytes
The difference between normal and raw strings is of course that in a
raw string, \ escapes (while still used to determine the end of the
literal) are not interpreted, so that r'\x00' contains four
characters: a backslash, an x, and two zeros; while '\x00' contains a
single character (code point zero).
The tricky thing is what should happen when non-ASCII bytes are used
inside literals. For bytes literals, this is considered illegal. But
for str literals, those bytes are supposed to be decoded using the
encoding declared for the file (UTF-8 by default).
We have to test this with various file encodings. We also test it with
exec()/eval(), which uses a different code path.
This file is really about correct treatment of encodings and
backslashes. It doesn't concern itself with issues like single
vs. double quotes or singly- vs. triply-quoted strings: that's dealt
with elsewhere (I assume).
"""
import os
import sys
import shutil
import tempfile
import unittest
TEMPLATE = r"""# coding: %s
a = 'x'
assert ord(a) == 120
b = '\x01'
assert ord(b) == 1
c = r'\x01'
assert list(map(ord, c)) == [92, 120, 48, 49]
d = '\x81'
assert ord(d) == 0x81
e = r'\x81'
assert list(map(ord, e)) == [92, 120, 56, 49]
f = '\u1881'
assert ord(f) == 0x1881
g = r'\u1881'
assert list(map(ord, g)) == [92, 117, 49, 56, 56, 49]
"""
def byte(i):
return bytes([i])
class TestLiterals(unittest.TestCase):
def setUp(self):
self.save_path = sys.path[:]
self.tmpdir = tempfile.mkdtemp()
sys.path.insert(0, self.tmpdir)
def tearDown(self):
sys.path = self.save_path
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_template(self):
# Check that the template doesn't contain any non-printables
# except for \n.
for c in TEMPLATE:
assert c == '\n' or ' ' <= c <= '~', repr(c)
def test_eval_str_normal(self):
self.assertEqual(eval(""" 'x' """), 'x')
self.assertEqual(eval(r""" '\x01' """), chr(1))
self.assertEqual(eval(""" '\x01' """), chr(1))
self.assertEqual(eval(r""" '\x81' """), chr(0x81))
self.assertEqual(eval(""" '\x81' """), chr(0x81))
self.assertEqual(eval(r""" '\u1881' """), chr(0x1881))
self.assertEqual(eval(""" '\u1881' """), chr(0x1881))
def test_eval_str_raw(self):
self.assertEqual(eval(""" r'x' """), 'x')
self.assertEqual(eval(r""" r'\x01' """), '\\' + 'x01')
self.assertEqual(eval(""" r'\x01' """), chr(1))
self.assertEqual(eval(r""" r'\x81' """), '\\' + 'x81')
self.assertEqual(eval(""" r'\x81' """), chr(0x81))
self.assertEqual(eval(r""" r'\u1881' """), '\\' + 'u1881')
self.assertEqual(eval(""" r'\u1881' """), chr(0x1881))
def test_eval_bytes_normal(self):
self.assertEqual(eval(""" b'x' """), b'x')
self.assertEqual(eval(r""" b'\x01' """), byte(1))
self.assertEqual(eval(""" b'\x01' """), byte(1))
self.assertEqual(eval(r""" b'\x81' """), byte(0x81))
self.assertRaises(SyntaxError, eval, """ b'\x81' """)
self.assertEqual(eval(r""" b'\u1881' """), b'\\' + b'u1881')
self.assertRaises(SyntaxError, eval, """ b'\u1881' """)
def test_eval_bytes_raw(self):
self.assertEqual(eval(""" br'x' """), b'x')
self.assertEqual(eval(r""" br'\x01' """), b'\\' + b'x01')
self.assertEqual(eval(""" br'\x01' """), byte(1))
self.assertEqual(eval(r""" br'\x81' """), b"\\" + b"x81")
self.assertRaises(SyntaxError, eval, """ br'\x81' """)
self.assertEqual(eval(r""" br'\u1881' """), b"\\" + b"u1881")
self.assertRaises(SyntaxError, eval, """ br'\u1881' """)
def check_encoding(self, encoding, extra=""):
modname = "xx_" + encoding.replace("-", "_")
fn = os.path.join(self.tmpdir, modname + ".py")
f = open(fn, "w", encoding=encoding)
try:
f.write(TEMPLATE % encoding)
f.write(extra)
finally:
f.close()
__import__(modname)
del sys.modules[modname]
def test_file_utf_8(self):
extra = "z = '\u1234'; assert ord(z) == 0x1234\n"
self.check_encoding("utf-8", extra)
def test_file_utf_8_error(self):
extra = "b'\x80'\n"
self.assertRaises(SyntaxError, self.check_encoding, "utf-8", extra)
def test_file_utf8(self):
self.check_encoding("utf8")
def test_file_iso_8859_1(self):
self.check_encoding("iso-8859-1")
def test_file_latin_1(self):
self.check_encoding("latin-1")
def test_file_latin9(self):
self.check_encoding("latin9")
if __name__ == "__main__":
# Hack so that error messages containing non-ASCII can be printed
sys.stdout._encoding = sys.stderr._encoding = "utf-8"
unittest.main()
|
apache-2.0
|
jasonzio/azure-linux-extensions
|
OSPatching/test/handler.py
|
8
|
17930
|
#!/usr/bin/python
#
# OSPatching extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import re
import time
import chardet
import tempfile
import urllib2
import urlparse
import shutil
import traceback
import logging
from azure.storage import BlobService
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as Util
from patch import *
# Global variables definition
ExtensionShortName = 'OSPatching'
DownloadDirectory = 'download'
idleTestScriptName = "idleTest.py"
healthyTestScriptName = "healthyTest.py"
idleTestScriptLocal = """
#!/usr/bin/python
# Locally.
def is_vm_idle():
return True
"""
healthyTestScriptLocal = """
#!/usr/bin/python
# Locally.
def is_vm_healthy():
return True
"""
idleTestScriptGithub = "https://raw.githubusercontent.com/bingosummer/scripts/master/idleTest.py"
healthyTestScriptGithub = "https://raw.githubusercontent.com/bingosummer/scripts/master/healthyTest.py"
idleTestScriptStorage = "https://binxia.blob.core.windows.net/ospatching-v2/idleTest.py"
healthyTestScriptStorage = "https://binxia.blob.core.windows.net/ospatching-v2/healthyTest.py"
public_settings = {
"disabled" : "false",
"stop" : "false",
"rebootAfterPatch" : "RebootIfNeed",
"category" : "ImportantAndRecommended",
"installDuration" : "00:30",
"oneoff" : "false",
"intervalOfWeeks" : "1",
"dayOfWeek" : "everyday",
"startTime" : "03:00",
"vmStatusTest" : {
"local" : "true",
"idleTestScript" : idleTestScriptLocal, #idleTestScriptStorage,
"healthyTestScript" : healthyTestScriptLocal, #healthyTestScriptStorage
}
}
protected_settings = {
"storageAccountName" : "<TOCHANGE>",
"storageAccountKey" : "<TOCHANGE>"
}
def install():
hutil.do_parse_context('Install')
try:
MyPatching.install()
hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded.')
except Exception as e:
hutil.log_and_syslog(logging.ERROR, "Failed to install the extension with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Install', 'error', '0', 'Install Failed.')
def enable():
hutil.do_parse_context('Enable')
try:
# protected_settings = hutil.get_protected_settings()
# public_settings = hutil.get_public_settings()
settings = protected_settings.copy()
settings.update(public_settings)
MyPatching.parse_settings(settings)
# Ensure the same configuration is executed only once
hutil.exit_if_seq_smaller()
oneoff = settings.get("oneoff")
download_customized_vmstatustest()
copy_vmstatustestscript(hutil.get_seq_no(), oneoff)
MyPatching.enable()
current_config = MyPatching.get_current_config()
hutil.do_exit(0, 'Enable', 'success', '0', 'Enable Succeeded. Current Configuration: ' + current_config)
except Exception as e:
current_config = MyPatching.get_current_config()
hutil.log_and_syslog(logging.ERROR, "Failed to enable the extension with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable', 'error', '0', 'Enable Failed. Current Configuation: ' + current_config)
def uninstall():
hutil.do_parse_context('Uninstall')
hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall Succeeded.')
def disable():
hutil.do_parse_context('Disable')
try:
# Ensure the same configuration is executed only once
hutil.exit_if_seq_smaller()
MyPatching.disable()
hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded.')
except Exception as e:
hutil.log_and_syslog(logging.ERROR, "Failed to disable the extension with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Disable', 'error', '0', 'Disable Failed.')
def update():
hutil.do_parse_context('Upadate')
hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded.')
def download():
hutil.do_parse_context('Download')
try:
# protected_settings = hutil.get_protected_settings()
# public_settings = hutil.get_public_settings()
settings = protected_settings.copy()
settings.update(public_settings)
MyPatching.parse_settings(settings)
MyPatching.download()
current_config = MyPatching.get_current_config()
hutil.do_exit(0,'Enable','success','0', 'Download Succeeded. Current Configuation: ' + current_config)
except Exception as e:
current_config = MyPatching.get_current_config()
hutil.log_and_syslog(logging.ERROR, "Failed to download updates with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable','error','0', 'Download Failed. Current Configuation: ' + current_config)
def patch():
hutil.do_parse_context('Patch')
try:
# protected_settings = hutil.get_protected_settings()
# public_settings = hutil.get_public_settings()
settings = protected_settings.copy()
settings.update(public_settings)
MyPatching.parse_settings(settings)
MyPatching.patch()
current_config = MyPatching.get_current_config()
hutil.do_exit(0,'Enable','success','0', 'Patch Succeeded. Current Configuation: ' + current_config)
except Exception as e:
current_config = MyPatching.get_current_config()
hutil.log_and_syslog(logging.ERROR, "Failed to patch with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable','error','0', 'Patch Failed. Current Configuation: ' + current_config)
def oneoff():
hutil.do_parse_context('Oneoff')
try:
# protected_settings = hutil.get_protected_settings()
# public_settings = hutil.get_public_settings()
settings = protected_settings.copy()
settings.update(public_settings)
MyPatching.parse_settings(settings)
MyPatching.patch_one_off()
current_config = MyPatching.get_current_config()
hutil.do_exit(0,'Enable','success','0', 'Oneoff Patch Succeeded. Current Configuation: ' + current_config)
except Exception as e:
current_config = MyPatching.get_current_config()
hutil.log_and_syslog(logging.ERROR, "Failed to one-off patch with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable','error','0', 'Oneoff Patch Failed. Current Configuation: ' + current_config)
def download_files(hutil):
# protected_settings = hutil.get_protected_settings()
# public_settings = hutil.get_public_settings()
settings = protected_settings.copy()
settings.update(public_settings)
local = settings.get("vmStatusTest", dict()).get("local", "")
if local.lower() == "true":
local = True
elif local.lower() == "false":
local = False
else:
hutil.log_and_syslog(logging.WARNING, "The parameter \"local\" "
"is empty or invalid. Set it as False. Continue...")
local = False
idle_test_script = settings.get("vmStatusTest", dict()).get('idleTestScript')
healthy_test_script = settings.get("vmStatusTest", dict()).get('healthyTestScript')
if (not idle_test_script and not healthy_test_script):
hutil.log_and_syslog(logging.WARNING, "The parameter \"idleTestScript\" and \"healthyTestScript\" "
"are both empty. Exit downloading VMStatusTest scripts...")
return
elif local:
if (idle_test_script and idle_test_script.startswith("http")) or \
(healthy_test_script and healthy_test_script.startswith("http")):
hutil.log_and_syslog(logging.WARNING, "The parameter \"idleTestScript\" or \"healthyTestScript\" "
"should not be uri. Exit downloading VMStatusTest scripts...")
return
elif not local:
if (idle_test_script and not idle_test_script.startswith("http")) or \
(healthy_test_script and not healthy_test_script.startswith("http")):
hutil.log_and_syslog(logging.WARNING, "The parameter \"idleTestScript\" or \"healthyTestScript\" "
"should be uri. Exit downloading VMStatusTest scripts...")
return
hutil.do_status_report('Downloading','transitioning', '0',
'Downloading VMStatusTest scripts...')
vmStatusTestScripts = dict()
vmStatusTestScripts[idle_test_script] = idleTestScriptName
vmStatusTestScripts[healthy_test_script] = healthyTestScriptName
if local:
hutil.log_and_syslog(logging.INFO, "Saving VMStatusTest scripts from user's configurations...")
for src,dst in vmStatusTestScripts.items():
if not src:
continue
file_path = save_local_file(src, dst, hutil)
preprocess_files(file_path, hutil)
return
storage_account_name = None
storage_account_key = None
if settings:
storage_account_name = settings.get("storageAccountName", "").strip()
storage_account_key = settings.get("storageAccountKey", "").strip()
if storage_account_name and storage_account_key:
hutil.log_and_syslog(logging.INFO, "Downloading VMStatusTest scripts from azure storage...")
for src,dst in vmStatusTestScripts.items():
if not src:
continue
file_path = download_blob(storage_account_name,
storage_account_key,
src,
dst,
hutil)
preprocess_files(file_path, hutil)
elif not(storage_account_name or storage_account_key):
hutil.log_and_syslog(logging.INFO, "No azure storage account and key specified in protected "
"settings. Downloading VMStatusTest scripts from external links...")
for src,dst in vmStatusTestScripts.items():
if not src:
continue
file_path = download_external_file(src, dst, hutil)
preprocess_files(file_path, hutil)
else:
#Storage account and key should appear in pairs
error_msg = "Azure storage account or storage key is not provided"
hutil.log_and_syslog(logging.ERROR, error_msg)
raise ValueError(error_msg)
def download_blob(storage_account_name, storage_account_key,
blob_uri, dst, hutil):
seqNo = hutil.get_seq_no()
container_name = get_container_name_from_uri(blob_uri)
blob_name = get_blob_name_from_uri(blob_uri)
download_dir = prepare_download_dir(seqNo)
download_path = os.path.join(download_dir, dst)
#Guest agent already ensure the plugin is enabled one after another.
#The blob download will not conflict.
blob_service = BlobService(storage_account_name, storage_account_key)
try:
blob_service.get_blob_to_path(container_name, blob_name, download_path)
except Exception as e:
hutil.log_and_syslog(logging.ERROR, ("Failed to download blob with uri:{0} "
"with error {1}").format(blob_uri,e))
raise
return download_path
def download_external_file(uri, dst, hutil):
seqNo = hutil.get_seq_no()
download_dir = prepare_download_dir(seqNo)
file_path = os.path.join(download_dir, dst)
try:
download_and_save_file(uri, file_path)
except Exception as e:
hutil.log_and_syslog(logging.ERROR, ("Failed to download external file with uri:{0} "
"with error {1}").format(uri, e))
raise
return file_path
def save_local_file(src, dst, hutil):
seqNo = hutil.get_seq_no()
download_dir = prepare_download_dir(seqNo)
file_path = os.path.join(download_dir, dst)
try:
waagent.SetFileContents(file_path, src)
except Exception as e:
hutil.log_and_syslog(logging.ERROR, ("Failed to save file from user's configuration "
"with error {0}").format(e))
raise
return file_path
def preprocess_files(file_path, hutil):
"""
Preprocess the text file. If it is a binary file, skip it.
"""
is_text, code_type = is_text_file(file_path)
if is_text:
dos2unix(file_path)
hutil.log_and_syslog(logging.INFO, "Converting text files from DOS to Unix formats: Done")
if code_type in ['UTF-8', 'UTF-16LE', 'UTF-16BE']:
remove_bom(file_path)
hutil.log_and_syslog(logging.INFO, "Removing BOM: Done")
def is_text_file(file_path):
with open(file_path, 'rb') as f:
contents = f.read(512)
return is_text(contents)
def is_text(contents):
supported_encoding = ['ascii', 'UTF-8', 'UTF-16LE', 'UTF-16BE']
code_type = chardet.detect(contents)['encoding']
if code_type in supported_encoding:
return True, code_type
else:
return False, code_type
def dos2unix(file_path):
temp_file_path = tempfile.mkstemp()[1]
f_temp = open(temp_file_path, 'wb')
with open(file_path, 'rU') as f:
contents = f.read()
f_temp.write(contents)
f_temp.close()
shutil.move(temp_file_path, file_path)
def remove_bom(file_path):
temp_file_path = tempfile.mkstemp()[1]
f_temp = open(temp_file_path, 'wb')
with open(file_path, 'rb') as f:
contents = f.read()
for encoding in ["utf-8-sig", "utf-16"]:
try:
f_temp.write(contents.decode(encoding).encode('utf-8'))
break
except UnicodeDecodeError:
continue
f_temp.close()
shutil.move(temp_file_path, file_path)
def download_and_save_file(uri, file_path):
src = urllib2.urlopen(uri)
dest = open(file_path, 'wb')
buf_size = 1024
buf = src.read(buf_size)
while(buf):
dest.write(buf)
buf = src.read(buf_size)
def prepare_download_dir(seqNo):
download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)
create_directory_if_not_exists(download_dir_main)
download_dir = os.path.join(download_dir_main, seqNo)
create_directory_if_not_exists(download_dir)
return download_dir
def create_directory_if_not_exists(directory):
"""create directory if no exists"""
if not os.path.exists(directory):
os.makedirs(directory)
def get_path_from_uri(uriStr):
uri = urlparse.urlparse(uriStr)
return uri.path
def get_blob_name_from_uri(uri):
return get_properties_from_uri(uri)['blob_name']
def get_container_name_from_uri(uri):
return get_properties_from_uri(uri)['container_name']
def get_properties_from_uri(uri):
path = get_path_from_uri(uri)
if path.endswith('/'):
path = path[:-1]
if path[0] == '/':
path = path[1:]
first_sep = path.find('/')
if first_sep == -1:
hutil.log_and_syslog(logging.ERROR, "Failed to extract container, blob, from {}".format(path))
blob_name = path[first_sep+1:]
container_name = path[:first_sep]
return {'blob_name': blob_name, 'container_name': container_name}
def download_customized_vmstatustest():
download_dir = prepare_download_dir(hutil.get_seq_no())
maxRetry = 2
for retry in range(0, maxRetry + 1):
try:
download_files(hutil)
break
except Exception:
hutil.log_and_syslog(logging.ERROR, "Failed to download files, retry=" + str(retry) + ", maxRetry=" + str(maxRetry))
if retry != maxRetry:
hutil.log_and_syslog(logging.INFO, "Sleep 10 seconds")
time.sleep(10)
else:
raise
def copy_vmstatustestscript(seqNo, oneoff):
src_dir = prepare_download_dir(seqNo)
for filename in (idleTestScriptName, healthyTestScriptName):
src = os.path.join(src_dir, filename)
if oneoff is not None and oneoff.lower() == "false":
dst = "oneoff"
else:
dst = "scheduled"
dst = os.path.join(os.getcwd(), dst)
if os.path.isfile(src):
shutil.copy(src, dst)
def delete_current_vmstatustestscript():
for filename in (idleTestScriptName, healthyTestScriptName):
current_vmstatustestscript = os.path.join(os.getcwd(), "patch/"+filename)
if os.path.isfile(current_vmstatustestscript):
os.remove(current_vmstatustestscript)
# Main function is the only entrance to this extension handler
def main():
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')
waagent.Log("%s started to handle." %(ExtensionShortName))
global hutil
hutil = Util.HandlerUtility(waagent.Log, waagent.Error,
ExtensionShortName)
global MyPatching
MyPatching = GetMyPatching(hutil)
if MyPatching is None:
sys.exit(1)
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
elif re.match("^([-/]*)(download)", a):
download()
elif re.match("^([-/]*)(patch)", a):
patch()
elif re.match("^([-/]*)(oneoff)", a):
oneoff()
if __name__ == '__main__':
main()
|
apache-2.0
|
eaplatanios/tensorflow
|
tensorflow/python/ops/variable_scope.py
|
2
|
98348
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
__all__ = ["AUTO_REUSE", "VariableScope", "get_variable_scope",
"get_variable", "get_local_variable", "variable_scope",
"variable_op_scope", "no_regularizer"]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export("AUTO_REUSE").export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None,
custom_getter=None, constraint=None):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
When eager execution is enabled this argument is always forced to be
true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter(name, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None,
constraint=None):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, reuse=reuse,
trainable=trainable, collections=collections,
caching_device=caching_device, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
}
# `fn_args` can handle functions, `functools.partial`, `lambda`.
if "constraint" in estimator_util.fn_args(custom_getter):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
reuse=reuse, trainable=trainable, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource,
constraint=constraint)
def _get_partitioned_variable(
self, name, partitioner, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
validate_shape=True, use_resource=None, constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse and not partitioner
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
# pylint: disable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
# pylint: enable=protected-access
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = False
v = variable(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export("no_regularizer")
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export("VariableScope")
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True). When eager execution is enabled
this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
if self._partitioner is not None:
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
if partitioner and context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if not scope_name or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export("get_variable_scope")
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x._trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x._trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(),
name=stripped_var_name,
trainable=var._trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export("get_variable")
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None):
return get_variable_scope().get_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint)
get_variable_or_local_docstring = (
"""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
@{$variables$Variable Scope How To}
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
@{tf.GraphKeys.REGULARIZATION_LOSSES} and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export("get_local_variable")
def get_local_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, trainable=False, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource,
custom_getter=custom_getter, constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent
scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(
self._custom_getter, self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" + self._name_or_scope if self._old.name
else self._name_or_scope)
self._reuse = (self._reuse
or self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export("variable_scope") # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the @{$variables$Variable Scope How To}, here we present only a few basic
examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode
for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create
variables if they do not exist, and return them otherwise; if None, we
inherit the parent scope's reuse flag. When eager execution is enabled,
this argument is always forced to be tf.AUTO_REUSE.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't touch name scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if self._graph_context_manager is not None:
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(
type_arg, value_arg, traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export("variable_op_scope")
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", True)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
dtype = kwargs.get("dtype", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource or (use_resource is None and context.executing_eagerly()):
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint)
elif not use_resource and context.executing_eagerly():
raise RuntimeError(
"VariableScope should use resource variable when eager execution is"
" enabled, but use_resource is False."
)
else:
return variables.Variable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint)
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
def variable(initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
constraint=None,
use_resource=None):
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
return previous_getter(initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name, dtype=dtype,
constraint=constraint,
use_resource=use_resource)
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
apache-2.0
|
bmaluenda/switch
|
switch_mod/operations/unitcommit/fuel_use.py
|
1
|
16280
|
# Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
This module describes fuel use with considerations of unit commitment
and incremental heat rates using piecewise linear expressions. If you
want to use this module directly in a list of switch modules (instead of
including the package project.unitcommit), you will also need to include
the module operations.unitcommit.commit
If you haven't worked with incremental heat rates before, you may want
to start by reading a background document on incremental heat rates such
as: http://www.energy.ca.gov/papers/98-04-07_HEATRATE.PDF
Incremental heat rates are a way of approximating an "input-output
curve" (heat input vs electricity output) with a series of line
segments. These curves are typically drawn with electricity output on
the x-axis (Power, MW) and fuel use rates on the y-axis (MMBTU/h). These
curves are drawn from the minimum to maximum power output levels for a
given generator, and most generators cannot run at 0 output. The slope
of each line segment is the incremental heat rate at that point in units
of MMBTU/MWh.
Data for incremental heat rates is typically formatted in a heterogenous
manner. The first data point is the first point on the curve - the
minimum loading level (MW) and its corresponding fuel use rate
(MMBTU/h). Subsequent data points provide subseqent loading levels in MW
and slopes, or incremental heat rates in MMBTU/MWh. This format was
designed to make certain economic calculations easy, not to draw input-
output curves, but you can calculate subsequent points on the curve from
this information.
Fuel requirements for most generators can be approximated very well with
simple models of a single line segment, but the gold standard is to use
several line segments that have increasing slopes. In the future, we may
include a simpler model that uses a single line segment, but we are just
implementing the complex piecewise linear form initially to satisfy key
stakeholders.
There are two basic ways to model a piecewise linear relationship like
this in linear programming. The first approach (which we don't use in
this module) is to divide the energy production variable into several
subvariables (one for each line segment), and put an upper bound on each
subvariable so that it can't exceed the width of the segment. The total
energy production is the sum of the sub-variables, and the total fuel
consumption is: Fuel = line0_intercept + E0*incremental_heat_rate0 +
E1*incremental_heat_rate1 + ... As long as each incremental_heat_rate is
larger than the one before it, then the optimization will ensure that E1
remains at 0 until E0 is at its upper limit, which ensures consistent
results. This tiered decision method is used in the fuel_markets module,
but is not used here.
This module uses the second approach which is to make FuelUse into a
decision variable that must be greater than or equal to each of the
lines. As long as fuel has a cost associated with it, a cost minimizing
optimization will push the fuel use down till it touchs a line segments.
This method also requires that incremental heat rates increase with
energy production so that the lines collectively form a convex boundary
for fuel use.
"""
import os
from pyomo.environ import *
import csv
from switch_mod.utilities import approx_equal
dependencies = 'switch_mod.timescales', 'switch_mod.load_zones',\
'switch_mod.financials.minimize_cost', 'switch_mod.energy_sources', \
'switch_mod.investment.proj_build', 'switch_mod.operations.proj_dispatch',\
'switch_mod.operations.unitcommit.commit'
def define_components(mod):
"""
This function adds components to a Pyomo abstract model object to
describe fuel consumption in the context of unit commitment. Unless
otherwise stated, all power capacity is specified in units of MW and
all sets and parameters are mandatory.
Typically incremental heat rates tables specify "blocks" where each
block includes power output in MW and heat requirements in MMBTU/hr
to move from the prior block to the current block. If you plot these
points and connect the dots, you have a piecewise linear function
that goes from at least minimum loading level to maximum loading
level. Data is read in in that format, then processed to describe
the individual line segments.
GEN_FUEL_USE_SEGMENTS[g in GEN_TECH_WITH_FUEL] is a set of line segments
that collectively describe fuel requirements for a given generation
technology. Each element of this set is a tuple of (y-intercept,
slope) where the y-intercept is in units of MMBTU/(hr * MW-capacity)
and slope is incremental heat rate in units of MMBTU / MWh-energy.
We normalize the y-intercept by capacity so that we can scale it to
arbitrary sizes of generation, or stacks of individual generation
units. This code can be used in conjunction with discrete unit sizes
but it not dependent on that. This set is optional.
PROJ_FUEL_USE_SEGMENTS[proj in FUEL_BASED_PROJECTS] is the same as
GEN_FUEL_USE_SEGMENTS but scoped to projects. This set is optional
and will default to GEN_FUEL_USE_SEGMENTS if that is available;
otherwise it will default to an intercept of 0 and a slope of its
full load heat rate.
"""
mod.PROJ_FUEL_USE_SEGMENTS = Set(
mod.FUEL_BASED_PROJECTS,
dimen=2)
# Use BuildAction to populate a set's default values.
def PROJ_FUEL_USE_SEGMENTS_default_rule(m, pr):
if pr not in m.PROJ_FUEL_USE_SEGMENTS:
heat_rate = m.proj_full_load_heat_rate[pr]
m.PROJ_FUEL_USE_SEGMENTS[pr] = [(0, heat_rate)]
mod.PROJ_FUEL_USE_SEGMENTS_default = BuildAction(
mod.FUEL_BASED_PROJECTS,
rule=PROJ_FUEL_USE_SEGMENTS_default_rule)
mod.PROJ_DISP_FUEL_PIECEWISE_CONS_SET = Set(
dimen=4,
initialize=lambda m: [
(proj, t, intercept, slope)
for (proj, t) in m.PROJ_WITH_FUEL_DISPATCH_POINTS
for (intercept, slope) in m.PROJ_FUEL_USE_SEGMENTS[proj]
]
)
mod.ProjFuelUseRate_Calculate = Constraint(
mod.PROJ_DISP_FUEL_PIECEWISE_CONS_SET,
rule=lambda m, pr, t, intercept, incremental_heat_rate: (
sum(m.ProjFuelUseRate[pr, t, f] for f in m.PROJ_FUELS[pr]) >=
# Do the startup
m.Startup[pr, t] * m.proj_startup_fuel[pr] / m.tp_duration_hrs[t] +
intercept * m.CommitProject[pr, t] +
incremental_heat_rate * m.DispatchProj[pr, t]))
# TODO: switch to defining heat rates as a collection of (output_mw, fuel_mmbtu_per_h) points;
# read those directly as normal sets, then derive the project heat rate curves from those
# within define_components.
# This will simplify data preparation (the current format is hard to produce from any
# normalized database) and the import code and help the readability of this file.
def load_inputs(mod, switch_data, inputs_dir):
"""
Import data to support modeling fuel use under partial loading
conditions with piecewise linear incremental heat rates.
These files are formatted differently than most to match the
standard format of incremental heat rates. This format is peculiar
because it formats data records that describes a fuel use curve in
two disticnt ways. The first record is the first point on the curve,
but all subsequent records are slopes and x-domain for each line
segment. For a given generation technology or project, the relevant
data should be formatted like so:
power_start_mw power_end_mw ihr fuel_use_rate
min_load . . value
min_load mid_load1 value .
mid_load1 max_load value .
The first row provides the first point on the input/output curve.
Literal dots should be included to indicate blanks.
The column fuel_use_rate is in units of MMBTU/h.
Subsequent rows provide the domain and slope of each line segement.
The column ihr indicates incremental heat rate in MMBTU/MWh.
Any number of line segments will be accepted.
All text should be replaced with actual numerical values.
I chose this format to a) be relatively consistent with standard
data that is easiest to find, b) make it difficult to misinterpret
the meaning of the data, and c) allow all of the standard data to be
included in a single file.
The following files are optional. If no representative data is
provided for a generation technology, it will default to a single
line segment with an intercept of 0 and a slope equal to the full
load heat22 rate. If no specific data is provided for a project, it
will default to its generation technology.
proj_inc_heat_rates.tab
project, power_start_mw, power_end_mw,
incremental_heat_rate_mbtu_per_mwhr, fuel_use_rate_mmbtu_per_h
"""
path = os.path.join(inputs_dir, 'proj_inc_heat_rates.tab')
if os.path.isfile(path):
(fuel_rate_segments, min_load, full_hr) = _parse_inc_heat_rate_file(
path, id_column="project")
# Check implied minimum loading level for consistency with
# proj_min_load_fraction if proj_min_load_fraction was provided. If
# proj_min_load_fraction wasn't provided, set it to implied minimum
# loading level.
for pr in min_load:
if 'proj_min_load_fraction' not in switch_data.data():
switch_data.data()['proj_min_load_fraction'] = {}
dp_dict = switch_data.data(name='proj_min_load_fraction')
if pr in dp_dict:
min_load_dat = dp_dict[pr]
if not approx_equal(min_load[pr], min_load_dat):
raise ValueError((
"proj_min_load_fraction is inconsistant with " +
"incremental heat rate data for project " +
"{}.").format(pr))
else:
dp_dict[pr] = min_load[pr]
# Same thing, but for full load heat rate.
for pr in full_hr:
if 'proj_full_load_heat_rate' not in switch_data.data():
switch_data.data()['proj_full_load_heat_rate'] = {}
dp_dict = switch_data.data(name='proj_full_load_heat_rate')
if pr in dp_dict:
full_hr_dat = dp_dict[pr]
if abs((full_hr[pr] - full_hr_dat) / full_hr_dat) > 0.01:
raise ValueError((
"proj_full_load_heat_rate is inconsistant with " +
"incremental heat rate data for project " +
"{}.").format(pr))
else:
dp_dict[pr] = full_hr[pr]
# Copy parsed data into the data portal.
switch_data.data()['PROJ_FUEL_USE_SEGMENTS'] = fuel_rate_segments
def _parse_inc_heat_rate_file(path, id_column):
"""
Parse tabular incremental heat rate data, calculate a series of
lines that describe each segment, and perform various error checks.
"""
# fuel_rate_points[unit] = {min_power: fuel_use_rate}
fuel_rate_points = {}
# fuel_rate_segments[unit] = [(intercept1, slope1), (int2, slope2)...]
# Stores the description of each linear segment of a fuel rate curve.
fuel_rate_segments = {}
# ihr_dat stores incremental heat rate records as a list for each unit
ihr_dat = {}
# min_cap_factor[unit] and full_load_hr[unit] are for error checking.
min_cap_factor = {}
full_load_hr = {}
# Scan the file and stuff the data into dictionaries for easy access.
# Parse the file and stuff data into dictionaries indexed by units.
with open(path, 'rb') as hr_file:
dat = list(csv.DictReader(hr_file, delimiter='\t'))
for row in dat:
u = row[id_column]
p1 = float(row['power_start_mw'])
p2 = row['power_end_mw']
ihr = row['incremental_heat_rate_mbtu_per_mwhr']
fr = row['fuel_use_rate_mmbtu_per_h']
# Does this row give the first point?
if(p2 == '.' and ihr == '.'):
fr = float(fr)
if(u in fuel_rate_points):
raise ValueError(
"Error processing incremental heat rates for " +
u + " in " + path + ". More than one row has " +
"a fuel use rate specified.")
fuel_rate_points[u] = {p1: fr}
# Does this row give a line segment?
elif(fr == '.'):
p2 = float(p2)
ihr = float(ihr)
if(u not in ihr_dat):
ihr_dat[u] = []
ihr_dat[u].append((p1, p2, ihr))
# Throw an error if the row's format is not recognized.
else:
raise ValueError(
"Error processing incremental heat rates for row " +
u + " in " + path + ". Row format not recognized for " +
"row " + str(row) + ". See documentation for acceptable " +
"formats.")
# Make sure that each project that has incremental heat rates defined
# also has a starting point defined.
missing_starts = [k for k in ihr_dat if k not in fuel_rate_points]
if missing_starts:
raise ValueError(
'No starting point(s) are defined for incremental heat rate curves '
'for the following technologies: {}'.format(','.join(missing_starts)))
# Construct a convex combination of lines describing a fuel use
# curve for each representative unit "u".
for u, fr_points in fuel_rate_points.items():
if u not in ihr_dat:
# no heat rate segments specified; plant can only be off or on at full power
# create a dummy curve at full heat rate
output, fuel = fr_points.items()[0]
fuel_rate_segments[u] = [(0.0, fuel / output)]
min_cap_factor[u] = 1.0
full_load_hr[u] = fuel / output
continue
fuel_rate_segments[u] = []
# Sort the line segments by their domains.
ihr_dat[u].sort()
# Assume that the maximum power output is the rated capacity.
(junk, capacity, junk) = ihr_dat[u][len(ihr_dat[u])-1]
# Retrieve the first incremental heat rate for error checking.
(min_power, junk, ihr_prev) = ihr_dat[u][0]
min_cap_factor[u] = min_power / capacity
# Process each line segment.
for (p_start, p_end, ihr) in ihr_dat[u]:
# Error check: This incremental heat rate cannot be less than
# the previous one.
if ihr_prev > ihr:
raise ValueError((
"Error processing incremental heat rates for " +
"{} in file {}. The incremental heat rate " +
"between power output levels {}-{} is less than " +
"that of the prior line segment.").format(
u, path, p_start, p_end))
# Error check: This segment needs to start at an existing point.
if p_start not in fr_points:
raise ValueError((
"Error processing incremental heat rates for " +
"{} in file {}. The incremental heat rate " +
"between power output levels {}-{} does not start at a " +
"previously defined point or line segment.").format(
u, path, p_start, p_end))
# Calculate the y-intercept then normalize it by the capacity.
intercept_norm = (fr_points[p_start] - ihr * p_start) / capacity
# Save the line segment's definition.
fuel_rate_segments[u].append((intercept_norm, ihr))
# Add a point for the end of the segment for the next iteration.
fr_points[p_end] = fr_points[p_start] + (p_end - p_start) * ihr
ihr_prev = ihr
# Calculate the max load heat rate for error checking
full_load_hr[u] = fr_points[capacity] / capacity
return (fuel_rate_segments, min_cap_factor, full_load_hr)
|
apache-2.0
|
iuliat/nova
|
nova/tests/functional/libvirt/test_numa_servers.py
|
45
|
6782
|
# Copyright (C) 2015 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import fixtures
from oslo_config import cfg
from oslo_log import log as logging
from nova.tests.functional.test_servers import ServersTestBase
from nova.tests.unit import fake_network
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NumaHostInfo(fakelibvirt.HostInfo):
def __init__(self, **kwargs):
super(NumaHostInfo, self).__init__(**kwargs)
self.numa_mempages_list = []
def get_numa_topology(self):
if self.numa_topology:
return self.numa_topology
topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets,
self.cpu_cores, self.cpu_threads,
self.kB_mem)
self.numa_topology = topology
# update number of active cpus
cpu_count = len(topology.cells) * len(topology.cells[0].cpus)
self.cpus = cpu_count - len(self.disabled_cpus_list)
return topology
def set_custom_numa_toplogy(self, topology):
self.numa_topology = topology
class NUMAServersTest(ServersTestBase):
def setUp(self):
super(NUMAServersTest, self).setUp()
# Replace libvirt with fakelibvirt
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
def _setup_compute_service(self):
pass
def _setup_scheduler_service(self):
self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver')
self.flags(scheduler_driver='nova.scheduler.'
'filter_scheduler.FilterScheduler')
self.flags(scheduler_default_filters=CONF.scheduler_default_filters
+ ['NUMATopologyFilter'])
return self.start_service('scheduler')
def _run_build_test(self, flavor_id, filter_mock, end_status='ACTIVE'):
self.compute = self.start_service('compute', host='test_compute0')
fake_network.set_stub_network_methods(self.stubs)
# Create server
good_server = self._build_server(flavor_id)
post = {'server': good_server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Validate that the server has been created
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
# Validate that NUMATopologyFilter has been called
self.assertTrue(filter_mock.called)
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual(end_status, found_server['status'])
self._delete_server(created_server_id)
def _get_topology_filter_spy(self):
host_manager = self.scheduler.manager.driver.host_manager
numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
return host_pass_mock
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_numa_topology(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
cpu_threads=2, kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
hv_version=2001000,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with contextlib.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_numa_fails(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2,
kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with contextlib.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock, end_status='ERROR')
|
apache-2.0
|
dparlevliet/zelenka-report-storage
|
server-local/twisted/python/test/test_versions.py
|
33
|
10680
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.versions}.
"""
from __future__ import division, absolute_import
import sys
import operator
from io import BytesIO
from twisted.python.versions import getVersionString, IncomparableVersions
from twisted.python.versions import Version, _inf
from twisted.python.filepath import FilePath
from twisted.trial.unittest import SynchronousTestCase as TestCase
VERSION_4_ENTRIES = b"""\
<?xml version="1.0" encoding="utf-8"?>
<wc-entries
xmlns="svn:">
<entry
committed-rev="18210"
name=""
committed-date="2006-09-21T04:43:09.542953Z"
url="svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk/twisted"
last-author="exarkun"
kind="dir"
uuid="bbbe8e31-12d6-0310-92fd-ac37d47ddeeb"
repos="svn+ssh://svn.twistedmatrix.com/svn/Twisted"
revision="18211"/>
</wc-entries>
"""
VERSION_8_ENTRIES = b"""\
8
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
VERSION_9_ENTRIES = b"""\
9
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
VERSION_10_ENTRIES = b"""\
10
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
class VersionsTest(TestCase):
def test_versionComparison(self):
"""
Versions can be compared for equality and order.
"""
va = Version("dummy", 1, 0, 0)
vb = Version("dummy", 0, 1, 0)
self.assertTrue(va > vb)
self.assertTrue(vb < va)
self.assertTrue(va >= vb)
self.assertTrue(vb <= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("dummy", 0, 1, 0))
self.assertTrue(vb == vb)
def test_comparingPrereleasesWithReleases(self):
"""
Prereleases are always less than versions without prereleases.
"""
va = Version("whatever", 1, 0, 0, prerelease=1)
vb = Version("whatever", 1, 0, 0)
self.assertTrue(va < vb)
self.assertFalse(va > vb)
self.assertNotEquals(vb, va)
def test_comparingPrereleases(self):
"""
The value specified as the prerelease is used in version comparisons.
"""
va = Version("whatever", 1, 0, 0, prerelease=1)
vb = Version("whatever", 1, 0, 0, prerelease=2)
self.assertTrue(va < vb)
self.assertTrue(vb > va)
self.assertTrue(va <= vb)
self.assertTrue(vb >= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("whatever", 1, 0, 0, prerelease=2))
self.assertTrue(va == va)
def test_infComparison(self):
"""
L{_inf} is equal to L{_inf}.
This is a regression test.
"""
self.assertEqual(_inf, _inf)
def test_disallowBuggyComparisons(self):
"""
The package names of the Version objects need to be the same,
"""
self.assertRaises(IncomparableVersions,
operator.eq,
Version("dummy", 1, 0, 0),
Version("dumym", 1, 0, 0))
def test_notImplementedComparisons(self):
"""
Comparing a L{Version} to some other object type results in
C{NotImplemented}.
"""
va = Version("dummy", 1, 0, 0)
vb = ("dummy", 1, 0, 0) # a tuple is not a Version object
self.assertEqual(va.__cmp__(vb), NotImplemented)
def test_repr(self):
"""
Calling C{repr} on a version returns a human-readable string
representation of the version.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3)),
"Version('dummy', 1, 2, 3)")
def test_reprWithPrerelease(self):
"""
Calling C{repr} on a version with a prerelease returns a human-readable
string representation of the version including the prerelease.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3, prerelease=4)),
"Version('dummy', 1, 2, 3, prerelease=4)")
def test_str(self):
"""
Calling C{str} on a version returns a human-readable string
representation of the version.
"""
self.assertEqual(str(Version("dummy", 1, 2, 3)),
"[dummy, version 1.2.3]")
def test_strWithPrerelease(self):
"""
Calling C{str} on a version with a prerelease includes the prerelease.
"""
self.assertEqual(str(Version("dummy", 1, 0, 0, prerelease=1)),
"[dummy, version 1.0.0pre1]")
def testShort(self):
self.assertEqual(Version('dummy', 1, 2, 3).short(), '1.2.3')
def test_goodSVNEntries_4(self):
"""
Version should be able to parse an SVN format 4 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_4(BytesIO(VERSION_4_ENTRIES)), b'18211')
def test_goodSVNEntries_8(self):
"""
Version should be able to parse an SVN format 8 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_8(BytesIO(VERSION_8_ENTRIES)), b'22715')
def test_goodSVNEntries_9(self):
"""
Version should be able to parse an SVN format 9 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_9(BytesIO(VERSION_9_ENTRIES)), b'22715')
def test_goodSVNEntriesTenPlus(self):
"""
Version should be able to parse an SVN format 10 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntriesTenPlus(BytesIO(VERSION_10_ENTRIES)), b'22715')
def test_getVersionString(self):
"""
L{getVersionString} returns a string with the package name and the
short version number.
"""
self.assertEqual(
'Twisted 8.0.0', getVersionString(Version('Twisted', 8, 0, 0)))
def test_getVersionStringWithPrerelease(self):
"""
L{getVersionString} includes the prerelease, if any.
"""
self.assertEqual(
getVersionString(Version("whatever", 8, 0, 0, prerelease=1)),
"whatever 8.0.0pre1")
def test_base(self):
"""
The L{base} method returns a very simple representation of the version.
"""
self.assertEqual(Version("foo", 1, 0, 0).base(), "1.0.0")
def test_baseWithPrerelease(self):
"""
The base version includes 'preX' for versions with prereleases.
"""
self.assertEqual(Version("foo", 1, 0, 0, prerelease=8).base(),
"1.0.0pre8")
class FormatDiscoveryTests(TestCase):
"""
Tests which discover the parsing method based on the imported module name.
"""
def mktemp(self):
return TestCase.mktemp(self).encode("utf-8")
def setUp(self):
"""
Create a temporary directory with a package structure in it.
"""
self.entry = FilePath(self.mktemp())
self.preTestModules = sys.modules.copy()
sys.path.append(self.entry.path.decode('utf-8'))
pkg = self.entry.child(b"twisted_python_versions_package")
pkg.makedirs()
pkg.child(b"__init__.py").setContent(
b"from twisted.python.versions import Version\n"
b"version = Version('twisted_python_versions_package', 1, 0, 0)\n")
self.svnEntries = pkg.child(b".svn")
self.svnEntries.makedirs()
def tearDown(self):
"""
Remove the imported modules and sys.path modifications.
"""
sys.modules.clear()
sys.modules.update(self.preTestModules)
sys.path.remove(self.entry.path.decode('utf-8'))
def checkSVNFormat(self, formatVersion, entriesText, expectedRevision):
"""
Check for the given revision being detected after setting the SVN
entries text and format version of the test directory structure.
"""
self.svnEntries.child(b"format").setContent(formatVersion + b"\n")
self.svnEntries.child(b"entries").setContent(entriesText)
self.assertEqual(self.getVersion()._getSVNVersion(), expectedRevision)
def getVersion(self):
"""
Import and retrieve the Version object from our dynamically created
package.
"""
import twisted_python_versions_package
return twisted_python_versions_package.version
def test_detectVersion4(self):
"""
Verify that version 4 format file will be properly detected and parsed.
"""
self.checkSVNFormat(b"4", VERSION_4_ENTRIES, b'18211')
def test_detectVersion8(self):
"""
Verify that version 8 format files will be properly detected and
parsed.
"""
self.checkSVNFormat(b"8", VERSION_8_ENTRIES, b'22715')
def test_detectVersion9(self):
"""
Verify that version 9 format files will be properly detected and
parsed.
"""
self.checkSVNFormat(b"9", VERSION_9_ENTRIES, b'22715')
def test_unparseableEntries(self):
"""
Verify that the result is C{b"Unknown"} for an apparently supported
version for which parsing of the entries file fails.
"""
self.checkSVNFormat(b"4", b"some unsupported stuff", b"Unknown")
def test_detectVersion10(self):
"""
Verify that version 10 format files will be properly detected and
parsed.
Differing from previous formats, the version 10 format lacks a
I{format} file and B{only} has the version information on the first
line of the I{entries} file.
"""
self.svnEntries.child(b"entries").setContent(VERSION_10_ENTRIES)
self.assertEqual(self.getVersion()._getSVNVersion(), b'22715')
def test_detectUnknownVersion(self):
"""
Verify that a new version of SVN will result in the revision 'Unknown'.
"""
self.checkSVNFormat(b"some-random-new-version", b"ooga booga!", b'Unknown')
def test_getVersionStringWithRevision(self):
"""
L{getVersionString} includes the discovered revision number.
"""
self.svnEntries.child(b"format").setContent(b"9\n")
self.svnEntries.child(b"entries").setContent(VERSION_10_ENTRIES)
version = getVersionString(self.getVersion())
self.assertEqual(
"twisted_python_versions_package 1.0.0+r22715",
version)
self.assertTrue(isinstance(version, type("")))
|
lgpl-3.0
|
OpenFacetracker/facetracker-core
|
lib/youtube-dl/youtube_dl/extractor/unistra.py
|
146
|
2119
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import qualities
class UnistraIE(InfoExtractor):
_VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)'
_TESTS = [
{
'url': 'http://utv.unistra.fr/video.php?id_video=154',
'md5': '736f605cfdc96724d55bb543ab3ced24',
'info_dict': {
'id': '154',
'ext': 'mp4',
'title': 'M!ss Yella',
'description': 'md5:104892c71bd48e55d70b902736b81bbf',
},
},
{
'url': 'http://utv.unistra.fr/index.php?id_video=437',
'md5': '1ddddd6cccaae76f622ce29b8779636d',
'info_dict': {
'id': '437',
'ext': 'mp4',
'title': 'Prix Louise Weiss 2014',
'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a',
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
files = set(re.findall(r'file\s*:\s*"([^"]+)"', webpage))
quality = qualities(['SD', 'HD'])
formats = []
for file_path in files:
format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD'
formats.append({
'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path,
'format_id': format_id,
'quality': quality(format_id)
})
title = self._html_search_regex(
r'<title>UTV - (.*?)</', webpage, 'title')
description = self._html_search_regex(
r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL)
thumbnail = self._search_regex(
r'image: "(.*?)"', webpage, 'thumbnail')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats
}
|
gpl-2.0
|
JioCloud/nova
|
nova/api/openstack/compute/plugins/v3/extended_availability_zone.py
|
36
|
2533
|
# Copyright 2013 Netease, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Availability Zone Status API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import availability_zones as avail_zone
ALIAS = "os-extended-availability-zone"
authorize = extensions.os_compute_soft_authorizer(ALIAS)
PREFIX = "OS-EXT-AZ"
class ExtendedAZController(wsgi.Controller):
def _extend_server(self, context, server, instance):
key = "%s:availability_zone" % PREFIX
az = avail_zone.get_instance_availability_zone(context, instance)
if not az and instance.get('availability_zone'):
# Likely hasn't reached a viable compute node yet so give back the
# desired availability_zone that *may* exist in the instance
# record itself.
az = instance.availability_zone
server[key] = az
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
class ExtendedAvailabilityZone(extensions.V3APIExtensionBase):
"""Extended Availability Zone support."""
name = "ExtendedAvailabilityZone"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ExtendedAZController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
apache-2.0
|
rossgoodwin/drgonzo
|
en/wordnet/wordnet.py
|
11
|
41682
|
# Module wordnet.py
#
# Original author: Oliver Steele <steele@osteele.com>
# Project Page: http://sourceforge.net/projects/pywordnet
#
# Copyright (c) 1998-2004 by Oliver Steele. Use is permitted under
# the Artistic License
# <http://www.opensource.org/licenses/artistic-license.html>
"""An OO interface to the WordNet database.
Usage
-----
>>> from wordnet import *
>>> # Retrieve words from the database
>>> N['dog']
dog(n.)
>>> V['dog']
dog(v.)
>>> ADJ['clear']
clear(adj.)
>>> ADV['clearly']
clearly(adv.)
>>> # Examine a word's senses and pointers:
>>> N['dog'].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris}, 'dog' in {noun: frump, dog}, 'dog' in {noun: dog}, 'dog' in {noun: cad, bounder, blackguard, dog, hound, heel}, 'dog' in {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, 'dog' in {noun: pawl, detent, click, dog}, 'dog' in {noun: andiron, firedog, dog, dog-iron})
>>> # Extract the first sense
>>> dog = N['dog'][0] # aka N['dog'].getSenses()[0]
>>> dog
'dog' in {noun: dog, domestic dog, Canis familiaris}
>>> dog.getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> dog.getPointerTargets(MEMBER_MERONYM)
[{noun: Canis, genus Canis}, {noun: pack}]
"""
__author__ = "Oliver Steele <steele@osteele.com>"
__version__ = "2.0.1"
import string
import os
from os import environ
from types import IntType, ListType, StringType, TupleType
#
# Configuration variables
#
WNHOME = environ.get('WNHOME', {
'mac': ":",
'dos': "C:\\wn16",
'nt': "C:\\Program Files\\WordNet\\2.0"}
.get(os.name, "/usr/local/wordnet2.0"))
WNSEARCHDIR = environ.get('WNSEARCHDIR', os.path.join(WNHOME, {'mac': "Database"}.get(os.name, "dict")))
ReadableRepresentations = 1
"""If true, repr(word), repr(sense), and repr(synset) return
human-readable strings instead of strings that evaluate to an object
equal to the argument.
This breaks the contract for repr, but it makes the system much more
usable from the command line."""
_TraceLookups = 0
_FILE_OPEN_MODE = os.name in ('dos', 'nt') and 'rb' or 'r' # work around a Windows Python bug
#
# Enumerated types
#
NOUN = 'noun'
VERB = 'verb'
ADJECTIVE = 'adjective'
ADVERB = 'adverb'
PartsOfSpeech = (NOUN, VERB, ADJECTIVE, ADVERB)
ANTONYM = 'antonym'
HYPERNYM = 'hypernym'
HYPONYM = 'hyponym'
ATTRIBUTE = 'attribute'
ALSO_SEE = 'also see'
ENTAILMENT = 'entailment'
CAUSE = 'cause'
VERB_GROUP = 'verb group'
MEMBER_MERONYM = 'member meronym'
SUBSTANCE_MERONYM = 'substance meronym'
PART_MERONYM = 'part meronym'
MEMBER_HOLONYM = 'member holonym'
SUBSTANCE_HOLONYM = 'substance holonym'
PART_HOLONYM = 'part holonym'
SIMILAR = 'similar'
PARTICIPLE_OF = 'participle of'
PERTAINYM = 'pertainym'
# New in wn 2.0:
FRAMES = 'frames'
CLASSIF_CATEGORY = 'domain category'
CLASSIF_USAGE = 'domain usage'
CLASSIF_REGIONAL = 'domain regional'
CLASS_CATEGORY = 'class category'
CLASS_USAGE = 'class usage'
CLASS_REGIONAL = 'class regional'
POINTER_TYPES = (
ANTONYM,
HYPERNYM,
HYPONYM,
ATTRIBUTE,
ALSO_SEE,
ENTAILMENT,
CAUSE,
VERB_GROUP,
MEMBER_MERONYM,
SUBSTANCE_MERONYM,
PART_MERONYM,
MEMBER_HOLONYM,
SUBSTANCE_HOLONYM,
PART_HOLONYM,
SIMILAR,
PARTICIPLE_OF,
PERTAINYM,
# New in wn 2.0:
FRAMES,
CLASSIF_CATEGORY,
CLASSIF_USAGE,
CLASSIF_REGIONAL,
CLASS_CATEGORY,
CLASS_USAGE,
CLASS_REGIONAL,
)
ATTRIBUTIVE = 'attributive'
PREDICATIVE = 'predicative'
IMMEDIATE_POSTNOMINAL = 'immediate postnominal'
ADJECTIVE_POSITIONS = (ATTRIBUTIVE, PREDICATIVE, IMMEDIATE_POSTNOMINAL, None)
VERB_FRAME_STRINGS = (
None,
"Something %s",
"Somebody %s",
"It is %sing",
"Something is %sing PP",
"Something %s something Adjective/Noun",
"Something %s Adjective/Noun",
"Somebody %s Adjective",
"Somebody %s something",
"Somebody %s somebody",
"Something %s somebody",
"Something %s something",
"Something %s to somebody",
"Somebody %s on something",
"Somebody %s somebody something",
"Somebody %s something to somebody",
"Somebody %s something from somebody",
"Somebody %s somebody with something",
"Somebody %s somebody of something",
"Somebody %s something on somebody",
"Somebody %s somebody PP",
"Somebody %s something PP",
"Somebody %s PP",
"Somebody's (body part) %s",
"Somebody %s somebody to INFINITIVE",
"Somebody %s somebody INFINITIVE",
"Somebody %s that CLAUSE",
"Somebody %s to somebody",
"Somebody %s to INFINITIVE",
"Somebody %s whether INFINITIVE",
"Somebody %s somebody into V-ing something",
"Somebody %s something with something",
"Somebody %s INFINITIVE",
"Somebody %s VERB-ing",
"It %s that CLAUSE",
"Something %s INFINITIVE")
#
# Domain classes
#
class Word:
"""An index into the database.
Each word has one or more Senses, which can be accessed via
``word.getSenses()`` or through the index notation, ``word[n]``.
Fields
------
form : string
The orthographic representation of the word.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
string : string
Same as form (for compatability with version 1.0).
taggedSenseCount : integer
The number of senses that are tagged.
Examples
--------
>>> N['dog'].pos
'noun'
>>> N['dog'].form
'dog'
>>> N['dog'].taggedSenseCount
1
"""
def __init__(self, line):
"""Initialize the word from a line of a WN POS file."""
tokens = string.split(line)
ints = map(int, tokens[int(tokens[3]) + 4:])
self.form = string.replace(tokens[0], '_', ' ')
"Orthographic representation of the word."
self.pos = _normalizePOS(tokens[1])
"Part of speech. One of NOUN, VERB, ADJECTIVE, ADVERB."
self.taggedSenseCount = ints[1]
"Number of senses that are tagged."
self._synsetOffsets = ints[2:ints[0]+2]
def getPointers(self, pointerType=None):
"""Pointers connect senses and synsets, not words.
Try word[0].getPointers() instead."""
raise self.getPointers.__doc__
def getPointerTargets(self, pointerType=None):
"""Pointers connect senses and synsets, not words.
Try word[0].getPointerTargets() instead."""
raise self.getPointers.__doc__
def getSenses(self):
"""Return a sequence of senses.
>>> N['dog'].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris}, 'dog' in {noun: frump, dog}, 'dog' in {noun: dog}, 'dog' in {noun: cad, bounder, blackguard, dog, hound, heel}, 'dog' in {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, 'dog' in {noun: pawl, detent, click, dog}, 'dog' in {noun: andiron, firedog, dog, dog-iron})
"""
if not hasattr(self, '_senses'):
def getSense(offset, pos=self.pos, form=self.form):
return getSynset(pos, offset)[form]
self._senses = tuple(map(getSense, self._synsetOffsets))
del self._synsetOffsets
return self._senses
# Deprecated. Present for backwards compatability.
def senses(self):
import wordnet
#warningKey = 'SENSE_DEPRECATION_WARNING'
#if not wordnet.has_key(warningKey):
# print 'Word.senses() has been deprecated. Use Word.sense() instead.'
# wordnet[warningKey] = 1
return self.getSense()
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'].isTagged()
1
"""
return self.taggedSenseCount > 0
def getAdjectivePositions(self):
"""Return a sequence of adjective positions that this word can
appear in. These are elements of ADJECTIVE_POSITIONS.
>>> ADJ['clear'].getAdjectivePositions()
[None, 'predicative']
"""
positions = {}
for sense in self.getSenses():
positions[sense.position] = 1
return positions.keys()
adjectivePositions = getAdjectivePositions # backwards compatability
def __cmp__(self, other):
"""
>>> N['cat'] < N['dog']
1
>>> N['dog'] < V['dog']
1
"""
return _compareInstances(self, other, ('pos', 'form'))
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'])
'dog(n.)'
"""
abbrs = {NOUN: 'n.', VERB: 'v.', ADJECTIVE: 'adj.', ADVERB: 'adv.'}
return self.form + "(" + abbrs[self.pos] + ")"
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getWord('dog', 'noun')".
"""
if ReadableRepresentations:
return str(self)
return "getWord" + `(self.form, self.pos)`
#
# Sequence protocol (a Word's elements are its Senses)
#
def __nonzero__(self):
return 1
def __len__(self):
return len(self.getSenses())
def __getitem__(self, index):
return self.getSenses()[index]
def __getslice__(self, i, j):
return self.getSenses()[i:j]
class Synset:
"""A set of synonyms that share a common meaning.
Each synonym contains one or more Senses, which represent a
specific sense of a specific word. Senses can be retrieved via
synset.getSenses() or through the index notations synset[0],
synset[string], or synset[word]. Synsets also originate zero or
more typed pointers, which can be accessed via
synset.getPointers() or synset.getPointers(pointerType). The
targets of a synset pointer can be retrieved via
synset.getPointerTargets() or
synset.getPointerTargets(pointerType), which are equivalent to
map(Pointer.target, synset.getPointerTargets(...)).
Fields
------
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
offset : integer
An integer offset into the part-of-speech file. Together
with pos, this can be used as a unique id.
gloss : string
A gloss for the sense.
verbFrames : [integer]
A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that any
Sense in this synset participates in. (See also
Sense.verbFrames.) Defined only for verbs.
>>> V['think'][0].synset.verbFrames
(5, 9)
"""
def __init__(self, pos, offset, line):
"Initialize the synset from a line off a WN synset file."
self.pos = pos
"part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB."
self.offset = offset
"""integer offset into the part-of-speech file. Together
with pos, this can be used as a unique id."""
tokens = string.split(line[:string.index(line, '|')])
self.ssType = tokens[2]
self.gloss = string.strip(line[string.index(line, '|') + 1:])
self.lexname = Lexname.lexnames[int(tokens[1])]
(self._senseTuples, remainder) = _partition(tokens[4:], 2, string.atoi(tokens[3], 16))
(self._pointerTuples, remainder) = _partition(remainder[1:], 4, int(remainder[0]))
if pos == VERB:
(vfTuples, remainder) = _partition(remainder[1:], 3, int(remainder[0]))
def extractVerbFrames(index, vfTuples):
return tuple(map(lambda t:string.atoi(t[1]), filter(lambda t,i=index:string.atoi(t[2],16) in (0, i), vfTuples)))
senseVerbFrames = []
for index in range(1, len(self._senseTuples) + 1):
senseVerbFrames.append(extractVerbFrames(index, vfTuples))
self._senseVerbFrames = senseVerbFrames
self.verbFrames = tuple(extractVerbFrames(None, vfTuples))
"""A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that any
Sense in this synset participates in. (See also
Sense.verbFrames.) Defined only for verbs."""
def getSenses(self):
"""Return a sequence of Senses.
>>> N['dog'][0].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris},)
"""
if not hasattr(self, '_senses'):
def loadSense(senseTuple, verbFrames=None, synset=self):
return Sense(synset, senseTuple, verbFrames)
if self.pos == VERB:
self._senses = tuple(map(loadSense, self._senseTuples, self._senseVerbFrames))
del self._senseVerbFrames
else:
self._senses = tuple(map(loadSense, self._senseTuples))
del self._senseTuples
return self._senses
senses = getSenses
def getPointers(self, pointerType=None):
"""Return a sequence of Pointers.
If pointerType is specified, only pointers of that type are
returned. In this case, pointerType should be an element of
POINTER_TYPES.
>>> N['dog'][0].getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> N['dog'][0].getPointers(HYPERNYM)
(hypernym -> {noun: canine, canid},)
"""
if not hasattr(self, '_pointers'):
def loadPointer(tuple, synset=self):
return Pointer(synset.offset, tuple)
self._pointers = tuple(map(loadPointer, self._pointerTuples))
del self._pointerTuples
if pointerType == None:
return self._pointers
else:
_requirePointerType(pointerType)
return filter(lambda pointer, type=pointerType: pointer.type == type, self._pointers)
pointers = getPointers # backwards compatability
def getPointerTargets(self, pointerType=None):
"""Return a sequence of Senses or Synsets.
If pointerType is specified, only targets of pointers of that
type are returned. In this case, pointerType should be an
element of POINTER_TYPES.
>>> N['dog'][0].getPointerTargets()[:5]
[{noun: canine, canid}, {noun: Canis, genus Canis}, {noun: pack}, {noun: pooch, doggie, doggy, barker, bow-wow}, {noun: cur, mongrel, mutt}]
>>> N['dog'][0].getPointerTargets(HYPERNYM)
[{noun: canine, canid}]
"""
return map(Pointer.target, self.getPointers(pointerType))
pointerTargets = getPointerTargets # backwards compatability
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'][0].isTagged()
1
>>> N['dog'][1].isTagged()
0
"""
return len(filter(Sense.isTagged, self.getSenses())) > 0
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'][0].synset)
'{noun: dog, domestic dog, Canis familiaris}'
"""
return "{" + self.pos + ": " + string.joinfields(map(lambda sense:sense.form, self.getSenses()), ", ") + "}"
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getSynset(pos, 1234)".
"""
if ReadableRepresentations:
return str(self)
return "getSynset" + `(self.pos, self.offset)`
def __cmp__(self, other):
return _compareInstances(self, other, ('pos', 'offset'))
#
# Sequence protocol (a Synset's elements are its senses).
#
def __nonzero__(self):
return 1
def __len__(self):
"""
>>> len(N['dog'][0].synset)
3
"""
return len(self.getSenses())
def __getitem__(self, idx):
"""
>>> N['dog'][0].synset[0] == N['dog'][0]
1
>>> N['dog'][0].synset['dog'] == N['dog'][0]
1
>>> N['dog'][0].synset[N['dog']] == N['dog'][0]
1
>>> N['cat'][6]
'cat' in {noun: big cat, cat}
"""
senses = self.getSenses()
if isinstance(idx, Word):
idx = idx.form
if isinstance(idx, StringType):
idx = _index(idx, map(lambda sense:sense.form, senses)) or \
_index(idx, map(lambda sense:sense.form, senses), _equalsIgnoreCase)
return senses[idx]
def __getslice__(self, i, j):
return self.getSenses()[i:j]
class Sense:
"""A specific meaning of a specific word -- the intersection of a Word and a Synset.
Fields
------
form : string
The orthographic representation of the Word this is a Sense of.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB
string : string
The same as form (for compatability with version 1.0).
synset : Synset
The Synset that this Sense is a sense of.
verbFrames : [integer]
A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that this
Sense partipates in. Defined only for verbs.
>>> decide = V['decide'][0].synset # first synset for 'decide'
>>> decide[0].verbFrames
(8, 2, 26, 29)
>>> decide[1].verbFrames
(8, 2)
>>> decide[2].verbFrames
(8, 26, 29)
"""
def __init__(sense, synset, senseTuple, verbFrames=None):
"Initialize a sense from a synset's senseTuple."
# synset is stored by key (pos, synset) rather than object
# reference, to avoid creating a circular reference between
# Senses and Synsets that will prevent the vm from
# garbage-collecting them.
sense.pos = synset.pos
"part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"
sense.synsetOffset = synset.offset
"synset key. This is used to retrieve the sense."
sense.verbFrames = verbFrames
"""A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that this
Sense partipates in. Defined only for verbs."""
(form, idString) = senseTuple
sense.position = None
if '(' in form:
index = string.index(form, '(')
key = form[index + 1:-1]
form = form[:index]
if key == 'a':
sense.position = ATTRIBUTIVE
elif key == 'p':
sense.position = PREDICATIVE
elif key == 'ip':
sense.position = IMMEDIATE_POSTNOMINAL
else:
raise "unknown attribute " + key
sense.form = string.replace(form, '_', ' ')
"orthographic representation of the Word this is a Sense of."
def __getattr__(self, name):
# see the note at __init__ about why 'synset' is provided as a
# 'virtual' slot
if name == 'synset':
return getSynset(self.pos, self.synsetOffset)
elif name == 'lexname':
return self.synset.lexname
else:
raise AttributeError, name
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'])
'dog(n.)'
"""
return `self.form` + " in " + str(self.synset)
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getWord('dog', 'noun')".
"""
if ReadableRepresentations:
return str(self)
return "%s[%s]" % (`self.synset`, `self.form`)
def getPointers(self, pointerType=None):
"""Return a sequence of Pointers.
If pointerType is specified, only pointers of that type are
returned. In this case, pointerType should be an element of
POINTER_TYPES.
>>> N['dog'][0].getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> N['dog'][0].getPointers(HYPERNYM)
(hypernym -> {noun: canine, canid},)
"""
senseIndex = _index(self, self.synset.getSenses())
def pointsFromThisSense(pointer, selfIndex=senseIndex):
return pointer.sourceIndex == 0 or pointer.sourceIndex - 1 == selfIndex
return filter(pointsFromThisSense, self.synset.getPointers(pointerType))
pointers = getPointers # backwards compatability
def getPointerTargets(self, pointerType=None):
"""Return a sequence of Senses or Synsets.
If pointerType is specified, only targets of pointers of that
type are returned. In this case, pointerType should be an
element of POINTER_TYPES.
>>> N['dog'][0].getPointerTargets()[:5]
[{noun: canine, canid}, {noun: Canis, genus Canis}, {noun: pack}, {noun: pooch, doggie, doggy, barker, bow-wow}, {noun: cur, mongrel, mutt}]
>>> N['dog'][0].getPointerTargets(HYPERNYM)
[{noun: canine, canid}]
"""
return map(Pointer.target, self.getPointers(pointerType))
pointerTargets = getPointerTargets # backwards compatability
def getSenses(self):
return self,
senses = getSenses # backwards compatability
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'][0].isTagged()
1
>>> N['dog'][1].isTagged()
0
"""
word = self.word()
return _index(self, word.getSenses()) < word.taggedSenseCount
def getWord(self):
return getWord(self.form, self.pos)
word = getWord # backwards compatability
def __cmp__(self, other):
def senseIndex(sense, synset=self.synset):
return _index(sense, synset.getSenses(), testfn=lambda a,b: a.form == b.form)
return _compareInstances(self, other, ('synset',)) or cmp(senseIndex(self), senseIndex(other))
class Pointer:
""" A typed directional relationship between Senses or Synsets.
Fields
------
type : string
One of POINTER_TYPES.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
"""
_POINTER_TYPE_TABLE = {
'!': ANTONYM,
'@': HYPERNYM,
'~': HYPONYM,
'~i': HYPONYM, # Tom De Smedt, 2006:
'@i': HYPERNYM, # yields a KeyError otherwise
'=': ATTRIBUTE,
'^': ALSO_SEE,
'*': ENTAILMENT,
'>': CAUSE,
'$': VERB_GROUP,
'#m': MEMBER_MERONYM,
'#s': SUBSTANCE_MERONYM,
'#p': PART_MERONYM,
'%m': MEMBER_HOLONYM,
'%s': SUBSTANCE_HOLONYM,
'%p': PART_HOLONYM,
'&': SIMILAR,
'<': PARTICIPLE_OF,
'\\': PERTAINYM,
# New in wn 2.0:
'+': FRAMES,
';c': CLASSIF_CATEGORY,
';u': CLASSIF_USAGE,
';r': CLASSIF_REGIONAL,
'-c': CLASS_CATEGORY,
'-u': CLASS_USAGE,
'-r': CLASS_REGIONAL
}
def __init__(self, sourceOffset, pointerTuple):
(type, offset, pos, indices) = pointerTuple
self.type = Pointer._POINTER_TYPE_TABLE[type]
"""One of POINTER_TYPES."""
self.sourceOffset = sourceOffset
self.targetOffset = int(offset)
self.pos = _normalizePOS(pos)
"""part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"""
indices = string.atoi(indices, 16)
self.sourceIndex = indices >> 8
self.targetIndex = indices & 255
def getSource(self):
synset = getSynset(self.pos, self.sourceOffset)
if self.sourceIndex:
return synset[self.sourceIndex - 1]
else:
return synset
source = getSource # backwards compatability
def getTarget(self):
synset = getSynset(self.pos, self.targetOffset)
if self.targetIndex:
return synset[self.targetIndex - 1]
else:
return synset
target = getTarget # backwards compatability
def __str__(self):
return self.type + " -> " + str(self.target())
def __repr__(self):
if ReadableRepresentations:
return str(self)
return "<" + str(self) + ">"
def __cmp__(self, other):
diff = _compareInstances(self, other, ('pos', 'sourceOffset'))
if diff:
return diff
synset = self.source()
def pointerIndex(sense, synset=synset):
return _index(sense, synset.getPointers(), testfn=lambda a,b: not _compareInstances(a, b, ('type', 'sourceIndex', 'targetIndex')))
return cmp(pointerIndex(self), pointerIndex(other))
# Loading the lexnames
# Klaus Ries <ries@cs.cmu.edu>
class Lexname:
dict = {}
lexnames = []
def __init__(self,name,category):
self.name = name
self.category = category
Lexname.dict[name] = self
Lexname.lexnames.append(self)
def __str__(self):
return self.name
def setupLexnames():
for l in open(WNSEARCHDIR+'/lexnames').readlines():
i,name,category = string.split(l)
Lexname(name,PartsOfSpeech[int(category)-1])
setupLexnames()
#
# Dictionary
#
class Dictionary:
"""A Dictionary contains all the Words in a given part of speech.
This module defines four dictionaries, bound to N, V, ADJ, and ADV.
Indexing a dictionary by a string retrieves the word named by that
string, e.g. dict['dog']. Indexing by an integer n retrieves the
nth word, e.g. dict[0]. Access by an arbitrary integer is very
slow except in the special case where the words are accessed
sequentially; this is to support the use of dictionaries as the
range of a for statement and as the sequence argument to map and
filter.
Example
-------
>>> N['dog']
dog(n.)
Fields
------
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
"""
def __init__(self, pos, filenameroot):
self.pos = pos
"""part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"""
self.indexFile = _IndexFile(pos, filenameroot)
self.dataFile = open(_dataFilePathname(filenameroot), _FILE_OPEN_MODE)
def __repr__(self):
dictionaryVariables = {N: 'N', V: 'V', ADJ: 'ADJ', ADV: 'ADV'}
if dictionaryVariables.get(self):
return self.__module__ + "." + dictionaryVariables[self]
return "<%s.%s instance for %s>" % (self.__module__, "Dictionary", self.pos)
def getWord(self, form, line=None):
key = string.replace(string.lower(form), ' ', '_')
pos = self.pos
def loader(key=key, line=line, indexFile=self.indexFile):
line = line or indexFile.get(key)
return line and Word(line)
word = _entityCache.get((pos, key), loader)
if word:
return word
else:
raise KeyError, "%s is not in the %s database" % (`form`, `pos`)
def getSynset(self, offset):
pos = self.pos
def loader(pos=pos, offset=offset, dataFile=self.dataFile):
return Synset(pos, offset, _lineAt(dataFile, offset))
return _entityCache.get((pos, offset), loader)
def _buildIndexCacheFile(self):
self.indexFile._buildIndexCacheFile()
#
# Sequence protocol (a Dictionary's items are its Words)
#
def __nonzero__(self):
"""Return false. (This is to avoid scanning the whole index file
to compute len when a Dictionary is used in test position.)
>>> N and 'true'
'true'
"""
return 1
def __len__(self):
"""Return the number of index entries.
>>> len(ADJ)
21435
"""
if not hasattr(self, 'length'):
self.length = len(self.indexFile)
return self.length
def __getslice__(self, a, b):
results = []
if type(a) == type('') and type(b) == type(''):
raise "unimplemented"
elif type(a) == type(1) and type(b) == type(1):
for i in range(a, b):
results.append(self[i])
else:
raise TypeError
return results
def __getitem__(self, index):
"""If index is a String, return the Word whose form is
index. If index is an integer n, return the Word
indexed by the n'th Word in the Index file.
>>> N['dog']
dog(n.)
>>> N[0]
'hood(n.)
"""
if isinstance(index, StringType):
return self.getWord(index)
elif isinstance(index, IntType):
line = self.indexFile[index]
return self.getWord(string.replace(line[:string.find(line, ' ')], '_', ' '), line)
else:
raise TypeError, "%s is not a String or Int" % `index`
#
# Dictionary protocol
#
# a Dictionary's values are its words, keyed by their form
#
def get(self, key, default=None):
"""Return the Word whose form is _key_, or _default_.
>>> N.get('dog')
dog(n.)
>>> N.get('inu')
"""
try:
return self[key]
except LookupError:
return default
def keys(self):
"""Return a sorted list of strings that index words in this
dictionary."""
return self.indexFile.keys()
def has_key(self, form):
"""Return true iff the argument indexes a word in this dictionary.
>>> N.has_key('dog')
1
>>> N.has_key('inu')
0
"""
return self.indexFile.has_key(form)
#
# Testing
#
def _testKeys(self):
"""Verify that index lookup can find each word in the index file."""
print "Testing: ", self
file = open(self.indexFile.file.name, _FILE_OPEN_MODE)
counter = 0
while 1:
line = file.readline()
if line == '': break
if line[0] != ' ':
key = string.replace(line[:string.find(line, ' ')], '_', ' ')
if (counter % 1000) == 0:
print "%s..." % (key,),
import sys
sys.stdout.flush()
counter = counter + 1
self[key]
file.close()
print "done."
class _IndexFile:
"""An _IndexFile is an implementation class that presents a
Sequence and Dictionary interface to a sorted index file."""
def __init__(self, pos, filenameroot):
self.pos = pos
self.file = open(_indexFilePathname(filenameroot), _FILE_OPEN_MODE)
self.offsetLineCache = {} # Table of (pathname, offset) -> (line, nextOffset)
self.rewind()
self.shelfname = os.path.join(WNSEARCHDIR, pos + ".pyidx")
try:
# Tom De Smedt, 2006
# Possible error on Mac OS X.
#import shelve
#self.indexCache = shelve.open(self.shelfname, 'r')
pass
except:
pass
def rewind(self):
self.file.seek(0)
while 1:
offset = self.file.tell()
line = self.file.readline()
if (line[0] != ' '):
break
self.nextIndex = 0
self.nextOffset = offset
#
# Sequence protocol (an _IndexFile's items are its lines)
#
def __nonzero__(self):
return 1
def __len__(self):
if hasattr(self, 'indexCache'):
return len(self.indexCache)
self.rewind()
lines = 0
while 1:
line = self.file.readline()
if line == "":
break
lines = lines + 1
return lines
def __nonzero__(self):
return 1
def __getitem__(self, index):
if isinstance(index, StringType):
if hasattr(self, 'indexCache'):
return self.indexCache[index]
return binarySearchFile(self.file, index, self.offsetLineCache, 8)
elif isinstance(index, IntType):
if hasattr(self, 'indexCache'):
return self.get(self.keys[index])
if index < self.nextIndex:
self.rewind()
while self.nextIndex <= index:
self.file.seek(self.nextOffset)
line = self.file.readline()
if line == "":
raise IndexError, "index out of range"
self.nextIndex = self.nextIndex + 1
self.nextOffset = self.file.tell()
return line
else:
raise TypeError, "%s is not a String or Int" % `index`
#
# Dictionary protocol
#
# (an _IndexFile's values are its lines, keyed by the first word)
#
def get(self, key, default=None):
try:
return self[key]
except LookupError:
return default
def keys(self):
if hasattr(self, 'indexCache'):
keys = self.indexCache.keys()
keys.sort()
return keys
else:
keys = []
self.rewind()
while 1:
line = self.file.readline()
if not line: break
key = line.split(' ', 1)[0]
keys.append(key.replace('_', ' '))
return keys
def has_key(self, key):
key = key.replace(' ', '_') # test case: V['haze over']
if hasattr(self, 'indexCache'):
return self.indexCache.has_key(key)
return self.get(key) != None
#
# Index file
#
def _buildIndexCacheFile(self):
import shelve
import os
print "Building %s:" % (self.shelfname,),
tempname = self.shelfname + ".temp"
try:
indexCache = shelve.open(tempname)
self.rewind()
count = 0
while 1:
offset, line = self.file.tell(), self.file.readline()
if not line: break
key = line[:string.find(line, ' ')]
if (count % 1000) == 0:
print "%s..." % (key,),
import sys
sys.stdout.flush()
indexCache[key] = line
count = count + 1
indexCache.close()
os.rename(tempname, self.shelfname)
finally:
try: os.remove(tempname)
except: pass
print "done."
self.indexCache = shelve.open(self.shelfname, 'r')
#
# Lookup functions
#
def getWord(form, pos='noun'):
"Return a word with the given lexical form and pos."
return _dictionaryFor(pos).getWord(form)
def getSense(form, pos='noun', senseno=0):
"Lookup a sense by its sense number. Used by repr(sense)."
return getWord(form, pos)[senseno]
def getSynset(pos, offset):
"Lookup a synset by its offset. Used by repr(synset)."
return _dictionaryFor(pos).getSynset(offset)
getword, getsense, getsynset = getWord, getSense, getSynset
#
# Private utilities
#
def _requirePointerType(pointerType):
if pointerType not in POINTER_TYPES:
raise TypeError, `pointerType` + " is not a pointer type"
return pointerType
def _compareInstances(a, b, fields):
""""Return -1, 0, or 1 according to a comparison first by type,
then by class, and finally by each of fields.""" # " <- for emacs
if not hasattr(b, '__class__'):
return cmp(type(a), type(b))
elif a.__class__ != b.__class__:
return cmp(a.__class__, b.__class__)
for field in fields:
diff = cmp(getattr(a, field), getattr(b, field))
if diff:
return diff
return 0
def _equalsIgnoreCase(a, b):
"""Return true iff a and b have the same lowercase representation.
>>> _equalsIgnoreCase('dog', 'Dog')
1
>>> _equalsIgnoreCase('dOg', 'DOG')
1
"""
return a == b or string.lower(a) == string.lower(b)
#
# File utilities
#
def _dataFilePathname(filenameroot):
if os.name in ('dos', 'nt'):
path = os.path.join(WNSEARCHDIR, filenameroot + ".dat")
if os.path.exists(path):
return path
return os.path.join(WNSEARCHDIR, "data." + filenameroot)
def _indexFilePathname(filenameroot):
if os.name in ('dos', 'nt'):
path = os.path.join(WNSEARCHDIR, filenameroot + ".idx")
if os.path.exists(path):
return path
return os.path.join(WNSEARCHDIR, "index." + filenameroot)
def binarySearchFile(file, key, cache={}, cacheDepth=-1):
from stat import ST_SIZE
key = key + ' '
keylen = len(key)
start, end = 0, os.stat(file.name)[ST_SIZE]
currentDepth = 0
#count = 0
while start < end:
#count = count + 1
#if count > 20:
# raise "infinite loop"
lastState = start, end
middle = (start + end) / 2
if cache.get(middle):
offset, line = cache[middle]
else:
file.seek(max(0, middle - 1))
if middle > 0:
file.readline()
offset, line = file.tell(), file.readline()
if currentDepth < cacheDepth:
cache[middle] = (offset, line)
#print start, middle, end, offset, line,
if offset > end:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line[:keylen] == key:# and line[keylen + 1] == ' ':
return line
#elif offset == end:
# return None
elif line > key:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line < key:
start = offset + len(line) - 1
currentDepth = currentDepth + 1
thisState = start, end
if lastState == thisState:
# detects the condition where we're searching past the end
# of the file, which is otherwise difficult to detect
return None
return None
def _lineAt(file, offset):
file.seek(offset)
return file.readline()
#
# Sequence Utility Functions
#
def _index(key, sequence, testfn=None, keyfn=None):
"""Return the index of key within sequence, using testfn for
comparison and transforming items of sequence by keyfn first.
>>> _index('e', 'hello')
1
>>> _index('E', 'hello', testfn=_equalsIgnoreCase)
1
>>> _index('x', 'hello')
"""
index = 0
for element in sequence:
value = element
if keyfn:
value = keyfn(value)
if (not testfn and value == key) or (testfn and testfn(value, key)):
return index
index = index + 1
return None
def _partition(sequence, size, count):
"""Partition sequence into count subsequences of size
length, and a remainder.
Return (partitions, remainder), where partitions is a sequence of
count subsequences of cardinality count, and
apply(append, partitions) + remainder == sequence."""
partitions = []
for index in range(0, size * count, size):
partitions.append(sequence[index:index + size])
return (partitions, sequence[size * count:])
#
# Cache management
#
# Some kind of cache is necessary since Sense -> Synset references are
# stored by key, and it's nice not to have to cons a new copy of a
# Synset that's been paged in each time a Sense's synset is retrieved.
# Ideally, we'd use a weak dict, but there aren't any. A strong dict
# reintroduces the problem that eliminating the Sense <-> Synset
# circularity was intended to resolve: every entity ever seen is
# preserved forever, making operations that iterate over the entire
# database prohibitive.
#
# The LRUCache approximates a weak dict in the case where temporal
# locality is good.
class _LRUCache:
""" A cache of values such that least recently used element is
flushed when the cache fills.
Private fields
--------------
entities
a dict from key -> (value, timestamp)
history
is a dict from timestamp -> key
nextTimeStamp
is the timestamp to use with the next value that's added.
oldestTimeStamp
The timestamp of the oldest element (the next one to remove),
or slightly lower than that.
This lets us retrieve the key given the timestamp, and the
timestamp given the key. (Also the value given either one.)
That's necessary so that we can reorder the history given a key,
and also manipulate the values dict given a timestamp. #
I haven't tried changing history to a List. An earlier
implementation of history as a List was slower than what's here,
but the two implementations aren't directly comparable."""
def __init__(this, capacity):
this.capacity = capacity
this.clear()
def clear(this):
this.values = {}
this.history = {}
this.oldestTimestamp = 0
this.nextTimestamp = 1
def removeOldestEntry(this):
while this.oldestTimestamp < this.nextTimestamp:
if this.history.get(this.oldestTimestamp):
key = this.history[this.oldestTimestamp]
del this.history[this.oldestTimestamp]
del this.values[key]
return
this.oldestTimestamp = this.oldestTimestamp + 1
def setCapacity(this, capacity):
if capacity == 0:
this.clear()
else:
this.capacity = capacity
while len(this.values) > this.capacity:
this.removeOldestEntry()
def get(this, key, loadfn=None):
value = None
if this.values:
pair = this.values.get(key)
if pair:
(value, timestamp) = pair
del this.history[timestamp]
if value == None:
value = loadfn and loadfn()
if this.values != None:
timestamp = this.nextTimestamp
this.nextTimestamp = this.nextTimestamp + 1
this.values[key] = (value, timestamp)
this.history[timestamp] = key
if len(this.values) > this.capacity:
this.removeOldestEntry()
return value
class _NullCache:
"""A NullCache implements the Cache interface (the interface that
LRUCache implements), but doesn't store any values."""
def clear():
pass
def get(this, key, loadfn=None):
return loadfn and loadfn()
DEFAULT_CACHE_CAPACITY = 1000
_entityCache = _LRUCache(DEFAULT_CACHE_CAPACITY)
def disableCache():
"""Disable the entity cache."""
_entityCache = _NullCache()
def enableCache():
"""Enable the entity cache."""
if not isinstance(_entityCache, LRUCache):
_entityCache = _LRUCache(size)
def clearCache():
"""Clear the entity cache."""
_entityCache.clear()
def setCacheCapacity(capacity=DEFAULT_CACHE_CAPACITY):
"""Set the capacity of the entity cache."""
enableCache()
_entityCache.setCapacity(capacity)
setCacheSize = setCacheCapacity # for compatability with version 1.0
#
# POS Dictionaries (must be initialized after file utilities)
#
N = Dictionary(NOUN, 'noun')
V = Dictionary(VERB, 'verb')
ADJ = Dictionary(ADJECTIVE, 'adj')
ADV = Dictionary(ADVERB, 'adv')
Dictionaries = (N, V, ADJ, ADV)
#
# Part-of-speech tag normalization tables (must be initialized after
# POS dictionaries)
#
_POSNormalizationTable = {}
_POStoDictionaryTable = {}
def _initializePOSTables():
global _POSNormalizationTable, _POStoDictionaryTable
_POSNormalizationTable = {}
_POStoDictionaryTable = {}
for pos, abbreviations in (
(NOUN, "noun n n."),
(VERB, "verb v v."),
(ADJECTIVE, "adjective adj adj. a s"),
(ADVERB, "adverb adv adv. r")):
tokens = string.split(abbreviations)
for token in tokens:
_POSNormalizationTable[token] = pos
_POSNormalizationTable[string.upper(token)] = pos
for dict in Dictionaries:
_POSNormalizationTable[dict] = dict.pos
_POStoDictionaryTable[dict.pos] = dict
_initializePOSTables()
def _normalizePOS(pos):
norm = _POSNormalizationTable.get(pos)
if norm:
return norm
raise TypeError, `pos` + " is not a part of speech type"
def _dictionaryFor(pos):
pos = _normalizePOS(pos)
dict = _POStoDictionaryTable.get(pos)
if dict == None:
raise RuntimeError, "The " + `pos` + " dictionary has not been created"
return dict
def buildIndexFiles():
for dict in Dictionaries:
dict._buildIndexCacheFile()
#
# Testing
#
def _testKeys():
#This is slow, so don't do it as part of the normal test procedure.
for dictionary in Dictionaries:
dictionary._testKeys()
def _test(reset=0):
import doctest, wordnet
if reset:
doctest.master = None # This keeps doctest from complaining after a reload.
return doctest.testmod(wordnet)
|
mit
|
chafique-delli/OpenUpgrade
|
addons/crm_project_issue/project_issue.py
|
380
|
2373
|
from openerp.osv import osv, fields
class crm_lead_to_project_issue_wizard(osv.TransientModel):
""" wizard to convert a Lead into a Project Issue and move the Mail Thread """
_name = "crm.lead2projectissue.wizard"
_inherit = 'crm.partner.binding'
_columns = {
"lead_id": fields.many2one("crm.lead", "Lead", domain=[("type", "=", "lead")]),
"project_id": fields.many2one("project.project", "Project", domain=[("use_issues", "=", True)])
}
_defaults = {
"lead_id": lambda self, cr, uid, context=None: context.get('active_id')
}
def action_lead_to_project_issue(self, cr, uid, ids, context=None):
# get the wizards and models
wizards = self.browse(cr, uid, ids, context=context)
Lead = self.pool["crm.lead"]
Issue = self.pool["project.issue"]
for wizard in wizards:
# get the lead to transform
lead = wizard.lead_id
partner = self._find_matching_partner(cr, uid, context=context)
if not partner and (lead.partner_name or lead.contact_name):
partner_ids = Lead.handle_partner_assignation(cr, uid, [lead.id], context=context)
partner = partner_ids[lead.id]
# create new project.issue
vals = {
"name": lead.name,
"description": lead.description,
"email_from": lead.email_from,
"project_id": wizard.project_id.id,
"partner_id": partner,
"user_id": None
}
issue_id = Issue.create(cr, uid, vals, context=None)
# move the mail thread
Lead.message_change_thread(cr, uid, lead.id, issue_id, "project.issue", context=context)
# delete the lead
Lead.unlink(cr, uid, [lead.id], context=None)
# return the action to go to the form view of the new Issue
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('model', '=', 'project.issue'), ('name', '=', 'project_issue_form_view')])
return {
'name': 'Issue created',
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'res_model': 'project.issue',
'type': 'ir.actions.act_window',
'res_id': issue_id,
'context': context
}
|
agpl-3.0
|
onpon4/naev
|
utils/econsim/datreader/readers.py
|
20
|
3932
|
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=80:
import os
try:
from lxml import etree as ET
except ImportError:
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
print "Failed to import ElementTree"
class readers:
"""
Simple master object
"""
_verbose=None
xmlData=None
def __init__(self, xmlPath, xmlFile, verbose=False):
self._verbose = verbose
if self.xmlData is None:
self.xmlData = ET.parse(os.path.join(xmlPath, xmlFile))
class ssys(readers):
def __init__(self, datPath, verbose=False):
readers.__init__(self, datPath, 'ssys.xml', verbose)
tmp = self.xmlData.findall('ssys')
self.jumpsList = dict()
self.assetsList = dict()
for system in tmp:
ssysName = system.get('name')
jumps = [self._processJump(jump) for jump in system.find('jumps')]
self.jumpsList.update({ssysName: jumps})
planets = system.find('assets').getchildren()
assets = [planet.text for planet in planets]
self.assetsList.update({ssysName: assets})
def _processJump(self, jumpNode):
# pos = jumpNode.find('pos')
return dict({
'target': jumpNode.get('target'),
# 'pos': dict({'x': pos.get('x'), 'y': pos.get('y')})
})
def planetsForSystem(self, systemName):
"""
Return a list of planets for the system systemName
"""
if systemName.find('Virtual') == 0 or \
not self.assetsList.has_key(systemName):
return None
return self.assetsList.systemName
def jumpgatesForSystem(self, systemName):
"""
Return a list of jump gates for the systems systemName
Format is {'target': Planet, 'pos': {'x': 0, 'y': 0}}
"""
return self.jumpsList[systemName]
class assets(readers):
# should be moved elsewhere or loaded externaly for convenience
tagWhiteList = ('class','population')
def __init__(self, datPath, verbose=False):
readers.__init__(self, datPath, 'asset.xml', verbose)
# we loads all the assets
tmp = self.xmlData.findall('asset')
self.assets = dict()
for asset in tmp:
self.assets.update({asset.get('name'): dict()})
# There are not always all tags, so filter !
#tags = [self.tagAllowed(child.tag) for child in asset]
for item in asset.iter():
tag = self.tagAllowed(item.tag)
if not tag:
continue
# if there is no text, we assume it's a list
if not item.text:
subItems = [subitem.text for subitem in item.iterchildren()]
self.assets[asset.get('name')].update({tag: subItems})
else:
self.assets[asset.get('name')].update({tag: item.text})
def tagAllowed(self, tag):
if tag in self.tagWhiteList:
return tag
return None
def getPlanetDetails(self, planetName):
"""
Get details about a planet.
The details depends on the tagWhitelist.
Format is {tagName: data}
"""
if not self.assets.has_key(planetName):
return None
return self.assets[planetName]
def getPopulationGreaterThan(self, population):
myList = list()
for (planetName, details) in self.assets:
if population > details['population']:
myList.append(planetName)
return myList
def getPlanetByClass(self, planetClass):
myList = list()
for (planetName, details) in self.assets:
if details['class'] == planetClass:
myList.append(planetName)
return myList
|
gpl-3.0
|
robinandeer/chanjo
|
chanjo/cli/init.py
|
1
|
2340
|
# -*- coding: utf-8 -*-
import codecs
from distutils.spawn import find_executable
import logging
import click
from path import Path
import ruamel.yaml
from chanjo.store.api import ChanjoDB
from chanjo.init.bootstrap import pull, BED_NAME, DB_NAME
from chanjo.init.demo import setup_demo, DEMO_BED_NAME
LOG = logging.getLogger(__name__)
@click.command()
@click.option('-f', '--force', is_flag=True, help='overwrite existing files')
@click.option('-d', '--demo', is_flag=True, help='copy demo files')
@click.option('-a', '--auto', is_flag=True)
@click.argument('root_dir', default='.', required=False)
@click.pass_context
def init(context, force, demo, auto, root_dir):
"""Bootstrap a new chanjo setup."""
is_bootstrapped = False
root_path = Path(root_dir)
LOG.info("setting up chanjo under: %s", root_path)
db_uri = context.obj.get('database')
db_uri = db_uri or "sqlite:///{}".format(root_path.joinpath(DB_NAME).abspath())
# test setup of sambamba
sambamba_bin = find_executable('sambamba')
if sambamba_bin is None: # pragma: no cover
LOG.warning("'sambamba' command not found")
else:
LOG.debug("'sambamba' found: %s", sambamba_bin)
if demo:
LOG.info("copying demo files: %s", root_dir)
setup_demo(root_dir, force=force)
LOG.info("configure new chanjo database: %s", db_uri)
chanjo_db = ChanjoDB(db_uri)
chanjo_db.set_up()
is_bootstrapped = True
elif auto or click.confirm('Bootstrap HGNC transcript BED?'):
pull(root_dir, force=force)
LOG.info("configure new chanjo database: %s", db_uri)
chanjo_db = ChanjoDB(db_uri)
chanjo_db.set_up()
is_bootstrapped = True
# setup config file
root_path.makedirs_p()
conf_path = root_path.joinpath('chanjo.yaml')
with codecs.open(conf_path, 'w', encoding='utf-8') as conf_handle:
data = {'database': db_uri}
data_str = ruamel.yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper)
LOG.info("writing config file: %s", conf_path)
conf_handle.write(data_str)
if is_bootstrapped:
click.echo('Chanjo bootstrap successful! Now run: ')
bed_path = root_path.joinpath(DEMO_BED_NAME if demo else BED_NAME)
click.echo("chanjo --config {} link {}".format(conf_path, bed_path))
|
mit
|
rlefevre1/hpp-rbprm-corba
|
script/scenarios/demos/siggraph_asia/spiderman/spiderman_backJump_interp.py
|
2
|
3619
|
#/usr/bin/env python
# author: Mylene Campana (mcampana@laas.fr)
# Script which goes with hpp-rbprm-corba package.
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
from hpp.gepetto import Viewer, PathPlayer
import numpy as np
from viewer_library import *
import spiderman_backJump_path as tp
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "spiderman"
urdfSuffix = ""
srdfSuffix = ""
V0list = tp.V0list
Vimplist = tp.Vimplist
base_joint_xyz_limits = tp.base_joint_xyz_limits
fullBody = FullBody ()
robot = fullBody.client.basic.robot
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.setJointBounds ("base_joint_xyz", base_joint_xyz_limits)
#psf = ProblemSolver(fullBody); rr = Viewer (psf); gui = rr.client.gui
r = tp.r; ps = tp.ps
psf = tp.ProblemSolver( fullBody ); rr = tp.Viewer (psf); gui = rr.client.gui
pp = PathPlayer (fullBody.client.basic, rr); pp.speed = 0.6
q_0 = fullBody.getCurrentConfig(); rr(q_0)
flexion = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0, 0.2, 0, -0.0, -0.3, 0, 0.2, 0.9, 0, -0.6, 0, 0, 0, -0.2, 0.9, 0, -0.6, 0, 0, 0, -1.1, -1.8, -1, 2.2, -0.9, 0, 0.0, 1.1, -1.8, 1, 2.2, -0.9, 0, 0.0]
q_contact_takeoff = [0, 0, 0, 1, 0, 0, 0, 0, 0.0, 0, -0.0, 0.0, 0.0, 2.2, 0.1, 0.3, -1.5, 0.8, 0, 0, -2.2, 0.1, -0.3, -1.5, -0.8, 0, 0, 0.3, -1.1, 0.2, 2, -0.8, 0, 0.0, -0.3, -1.1, -0.2, 2, -0.8, 0, 0.0]
extending = [0, 0, 0, 1, 0, 0, 0, -0.0, 0.8, 0, -0.0, -0.6, 0, 1.5, 0.5, 1, 0, 0, 0, 0, -1.5, 0.5, -1, 0, 0, 0, 0, 1.4, -1.2, 1.6, 2.1, 0.4, 0, 0.0, -1.4, -1.2, -1.6, 2.1, 0.4, 0.0, 0.0]
fullBody.setPose (extending, "extending")
fullBody.setPose (flexion, "flexion")
fullBody.setPose (q_contact_takeoff, "takeoffContact")
rLegId = 'RFoot'
lLegId = 'LFoot'
rarmId = 'RHand'
larmId = 'LHand'
nbSamples = 50000; x = 0.03; y = 0.08
fullBody.addLimb(rLegId,'RThigh_rx','SpidermanRFootSphere',[0,0,0],[0,0,1], x, y, nbSamples, "EFORT_Normal", 0.01,"_6_DOF")
fullBody.addLimb(lLegId,'LThigh_rx','SpidermanLFootSphere',[0,0,0],[0,0,1], x, y, nbSamples, "EFORT_Normal", 0.01,"_6_DOF")
fullBody.addLimb(rarmId,'RHumerus_rx','SpidermanRHandSphere',[0,0,0],[0,-1,0], x, y, nbSamples, "EFORT_Normal", 0.01,"_6_DOF")
fullBody.addLimb(larmId,'LHumerus_rx','SpidermanLHandSphere',[0,0,0],[0,1,0], x, y, nbSamples, "EFORT_Normal", 0.01,"_6_DOF")
print("Limbs added to fullbody")
confsize = len(tp.q11)
### TEST OF CONTACT CREATION FOR INTERMEDIATE CONFIG, NOT USED FOR INTERPOLATION
q_tmp = q_contact_takeoff [::]; q_tmp[0:confsize-tp.ecsSize] = tp.q_cube[0:confsize-tp.ecsSize]; rr(q_tmp)
fullBody.setCurrentConfig (q_tmp)
q_tmp_test = fullBody.generateContacts(q_tmp, [0,-1,0], True); rr (q_tmp_test)
fullBody.isConfigValid(q_tmp_test)
# result:
# q_tmp_test = [-1.2, -2.8, 3.6, 0.707107, 0.0, 0.0, 0.707107, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.9324800803082636, -0.9184709614284768, 0.6886200849241174, -0.6066622060535428, 0.47649495495305294, 1.0976823065116303, -0.538404483799899, -1.0681738092891575, -1.1021076588270258, 1.1498838725595328, -0.6156809000975677, 0.5815958533218022, -1.4659758542959642, -0.3603605133380307, 0.36116581520970376, -1.048638878548546, 0.24059108997189355, 1.23953255675232, -0.7519812787252685, -0.1402404928640359, -1.0, 0.023118656707620415, -0.6298340889273957, -0.15800407650545129, 0.4963824557225752, -0.4989080182494368, 0.2774303858753873, -0.9974339561414656]
|
lgpl-3.0
|
stjokro/Project-1
|
project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py
|
912
|
3325
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
|
bsd-3-clause
|
laurent-george/weboob
|
modules/liberation/pages/article.py
|
10
|
2024
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.capabilities.messages.genericArticle import GenericNewsPage,\
NoBodyElement, NoAuthorElement, NoneMainDiv
class ArticlePage(GenericNewsPage):
"ArticlePage object for Libe"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_title_selector = "title"
self.element_author_selector = "span.author"
self.element_body_selector = "div.article-body"
def get_body(self):
if '.blogs.liberation.fr/' in self.url:
self.element_body_selector = "div.entry-content"
try:
return self.parser.tostring(self.get_element_body())
except NoBodyElement:
meta = self.document.xpath('//meta[@name="description"]')[0]
txt = meta.attrib['content']
return txt
def get_title(self):
title = GenericNewsPage.get_title(self)
return title.replace(u' - Libération', '')
def get_author(self):
try:
author = self.get_element_author().text_content().strip()
if author.startswith('Par '):
return author.split('Par ', 1)[1]
else:
return author
except (NoAuthorElement, NoneMainDiv):
#TODO: Mettre un warning
return None
|
agpl-3.0
|
artlucas/django-notification-twilio
|
django-notification-twilio.py
|
1
|
2277
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.translation import ugettext
from notification import backends
MOBILE_NUMBER_SETTING_KEY = "NOTIFICATION_TWILIO_USER_MOBILE_NUMBER"
TWILIO_ACCOUNT_SETTING_KEY = "TWILIO_ACCOUNT_SID"
TWILIO_AUTH_SETTING_KEY = "TWILIO_AUTH_TOKEN"
TWILIO_FROM_SETTING_KEY = "TWILIO_SMS_FROM_NUMBER"
def get_mobile_number(self, user):
mobile_number_key = getattr(settings, MOBILE_NUMBER_SETTING_KEY)
mobile_number = str(getattr(user, mobile_number_key, "0"))
return mobile_number
class TwilioSMSBackend(backends.BaseBackend):
spam_sensitivity = 2
def can_send(self, user, notice_type):
can_send = super(TwilioSMSBackend, self).can_send(user, notice_type)
if can_send:
if not hasattr(settings, MOBILE_NUMBER_SETTING_KEY) or
not hasattr(settings, TWILIO_ACCOUNT_SETTING_KEY) or
not hasattr(settings, TWILIO_AUTH_SETTING_KEY)
return False # TODO: logging
mobile_number = get_mobile_number(user)
if len(mobile_number) < 7 or not mobile_number.isdigit():
return False # TODO: logging
return True
return False
def deliver(self, recipient, sender, notice_type, extra_context):
# TODO: require this to be passed in extra_context
context = self.default_context()
context.update({
"recipient": recipient,
"sender": sender,
"notice": ugettext(notice_type.display),
})
context.update(extra_context)
messages = self.get_formatted_messages((
"short.txt",
"full.txt"
), notice_type.label, context)
if sender:
from_mobile_number = getattr(settings, TWILIO_FROM_SETTING_KEY)
else:
from_mobile_number = get_mobile_number(sender)
to_mobile_number = get_mobile_number(recipient)
twilio_account = getattr(settings, TWILIO_ACCOUNT_SETTING_KEY)
twilio_token = getattr(settings, TWILIO_AUTH_SETTING_KEY)
client = TwilioRestClient(twilio_account, twilio_token)
message = client.messages.create(to=to_mobile_number, from_=from_mobile_number, body=messages["short.txt"])
|
bsd-3-clause
|
RMKD/networkx
|
doc/source/conf.py
|
22
|
7150
|
# -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
from __future__ import print_function
import sys, os, re
import contextlib
@contextlib.contextmanager
def cd(newpath):
"""
Change the current working directory to `newpath`, temporarily.
If the old current working directory no longer exists, do not return back.
"""
oldpath = os.getcwd()
os.chdir(newpath)
try:
yield
finally:
try:
os.chdir(oldpath)
except OSError:
# If oldpath no longer exists, stay where we are.
pass
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.3":
raise RuntimeError("Sphinx 1.3 or newer required")
# Environment variable to know if the docs are being built on rtd.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
print
print("Building on ReadTheDocs: {}".format(on_rtd))
print
print("Current working directory: {}".format(os.path.abspath(os.curdir)))
print("Python: {}".format(sys.executable))
if on_rtd:
# Build is not via Makefile (yet).
# So we manually build the examples and gallery.
import subprocess
with cd('..'):
# The Makefile is run from networkx/doc, so we need to move there
# from networkx/doc/source (which holds conf.py).
py = sys.executable
subprocess.call([py, 'make_gallery.py'])
subprocess.call([py, 'make_examples.py', '../examples', 'source'])
# If your extensions are in another directory, add it here.
# These locations are relative to conf.py
sys.path.append(os.path.abspath('../sphinxext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.pngmath',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
#'sphinxcontrib.bibtex',
#'IPython.sphinxext.ipython_console_highlighting',
#'IPython.sphinxext.ipython_directive',
]
# generate autosummary pages
autosummary_generate=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates','../rst_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'NetworkX'
copyright = '2015, NetworkX Developers'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import networkx
version = networkx.__version__
# The full version, including dev info
release = networkx.__version__.replace('_','')
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = ['reference/pdf_reference']
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'friendly'
pygments_style = 'sphinx'
# A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)
modindex_common_prefix=['networkx.']
doctest_global_setup="import networkx as nx"
# Options for HTML output
# -----------------------
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme_options = {
# "rightsidebar": "true",
# "relbarbgcolor: "black"
#}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'networkx.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = 'index.html'
html_index = 'contents.html'
# Custom sidebar templates, maps page names to templates.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
#html_additional_pages = {'index': 'index.html','gallery':'gallery.html'}
html_additional_pages = {'gallery':'gallery.html'}
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
html_use_opensearch = 'http://networkx.github.io'
# Output file base name for HTML help builder.
htmlhelp_basename = 'NetworkX'
pngmath_use_preview = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('tutorial/index', 'networkx_tutorial.tex',
'NetworkX Tutorial',
'Aric Hagberg, Dan Schult, Pieter Swart', 'howto', 1),
('reference/pdf_reference', 'networkx_reference.tex',
'NetworkX Reference',
'Aric Hagberg, Dan Schult, Pieter Swart', 'manual', 1)]
#latex_appendices = ['installing']#,'legal'],'citing','credits','history']
#latex_appendices = ['credits']
# Intersphinx mapping
intersphinx_mapping = {'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy/': None,
}
# For trac custom roles
default_role = 'math'
trac_url = 'https://networkx.lanl.gov/trac/'
mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML'
numpydoc_show_class_members = False
|
bsd-3-clause
|
pypa/pip
|
tests/unit/test_index.py
|
4
|
28915
|
import logging
import pytest
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import (
CandidateEvaluator,
CandidatePreferences,
FormatControl,
LinkEvaluator,
PackageFinder,
_check_link_requires_python,
_extract_version_from_fragment,
_find_name_version_sep,
filter_unallowed_hashes,
)
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.network.session import PipSession
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.hashes import Hashes
from tests.lib import CURRENT_PY_VERSION_INFO
from tests.lib.index import make_mock_candidate
@pytest.mark.parametrize('requires_python, expected', [
('== 3.6.4', False),
('== 3.6.5', True),
# Test an invalid Requires-Python value.
('invalid', True),
])
def test_check_link_requires_python(requires_python, expected):
version_info = (3, 6, 5)
link = Link('https://example.com', requires_python=requires_python)
actual = _check_link_requires_python(link, version_info)
assert actual == expected
def check_caplog(caplog, expected_level, expected_message):
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == expected_level
assert record.message == expected_message
@pytest.mark.parametrize('ignore_requires_python, expected', [
(None, (
False, 'DEBUG',
"Link requires a different Python (3.6.5 not in: '== 3.6.4'): "
"https://example.com"
)),
(True, (
True, 'DEBUG',
"Ignoring failed Requires-Python check (3.6.5 not in: '== 3.6.4') "
"for link: https://example.com"
)),
])
def test_check_link_requires_python__incompatible_python(
caplog, ignore_requires_python, expected,
):
"""
Test an incompatible Python.
"""
expected_return, expected_level, expected_message = expected
link = Link('https://example.com', requires_python='== 3.6.4')
caplog.set_level(logging.DEBUG)
actual = _check_link_requires_python(
link, version_info=(3, 6, 5),
ignore_requires_python=ignore_requires_python,
)
assert actual == expected_return
check_caplog(caplog, expected_level, expected_message)
def test_check_link_requires_python__invalid_requires(caplog):
"""
Test the log message for an invalid Requires-Python.
"""
link = Link('https://example.com', requires_python='invalid')
caplog.set_level(logging.DEBUG)
actual = _check_link_requires_python(link, version_info=(3, 6, 5))
assert actual
expected_message = (
"Ignoring invalid Requires-Python ('invalid') for link: "
"https://example.com"
)
check_caplog(caplog, 'DEBUG', expected_message)
class TestLinkEvaluator:
@pytest.mark.parametrize(
'py_version_info,ignore_requires_python,expected', [
((3, 6, 5), None, (True, '1.12')),
# Test an incompatible Python.
((3, 6, 4), None, (False, None)),
# Test an incompatible Python with ignore_requires_python=True.
((3, 6, 4), True, (True, '1.12')),
],
)
def test_evaluate_link(
self, py_version_info, ignore_requires_python, expected,
):
target_python = TargetPython(py_version_info=py_version_info)
evaluator = LinkEvaluator(
project_name='twine',
canonical_name='twine',
formats={'source'},
target_python=target_python,
allow_yanked=True,
ignore_requires_python=ignore_requires_python,
)
link = Link(
'https://example.com/#egg=twine-1.12',
requires_python='== 3.6.5',
)
actual = evaluator.evaluate_link(link)
assert actual == expected
@pytest.mark.parametrize('yanked_reason, allow_yanked, expected', [
(None, True, (True, '1.12')),
(None, False, (True, '1.12')),
('', True, (True, '1.12')),
('', False, (False, 'yanked for reason: <none given>')),
('bad metadata', True, (True, '1.12')),
('bad metadata', False,
(False, 'yanked for reason: bad metadata')),
# Test a unicode string with a non-ascii character.
('curly quote: \u2018', True, (True, '1.12')),
('curly quote: \u2018', False,
(False, 'yanked for reason: curly quote: \u2018')),
])
def test_evaluate_link__allow_yanked(
self, yanked_reason, allow_yanked, expected,
):
target_python = TargetPython(py_version_info=(3, 6, 4))
evaluator = LinkEvaluator(
project_name='twine',
canonical_name='twine',
formats={'source'},
target_python=target_python,
allow_yanked=allow_yanked,
)
link = Link(
'https://example.com/#egg=twine-1.12',
yanked_reason=yanked_reason,
)
actual = evaluator.evaluate_link(link)
assert actual == expected
def test_evaluate_link__incompatible_wheel(self):
"""
Test an incompatible wheel.
"""
target_python = TargetPython(py_version_info=(3, 6, 4))
# Set the valid tags to an empty list to make sure nothing matches.
target_python._valid_tags = []
evaluator = LinkEvaluator(
project_name='sample',
canonical_name='sample',
formats={'binary'},
target_python=target_python,
allow_yanked=True,
)
link = Link('https://example.com/sample-1.0-py2.py3-none-any.whl')
actual = evaluator.evaluate_link(link)
expected = (
False,
"none of the wheel's tags (py2-none-any, py3-none-any) are compatible "
"(run pip debug --verbose to show compatible tags)"
)
assert actual == expected
@pytest.mark.parametrize('hex_digest, expected_versions', [
(None, ['1.0', '1.1', '1.2']),
(64 * 'a', ['1.0', '1.1']),
(64 * 'b', ['1.0', '1.2']),
(64 * 'c', ['1.0', '1.1', '1.2']),
])
def test_filter_unallowed_hashes(hex_digest, expected_versions):
candidates = [
make_mock_candidate('1.0'),
make_mock_candidate('1.1', hex_digest=(64 * 'a')),
make_mock_candidate('1.2', hex_digest=(64 * 'b')),
]
hashes_data = {
'sha256': [hex_digest],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates, hashes=hashes, project_name='my-project',
)
actual_versions = [str(candidate.version) for candidate in actual]
assert actual_versions == expected_versions
# Check that the return value is always different from the given value.
assert actual is not candidates
def test_filter_unallowed_hashes__no_hashes(caplog):
caplog.set_level(logging.DEBUG)
candidates = [
make_mock_candidate('1.0'),
make_mock_candidate('1.1'),
]
actual = filter_unallowed_hashes(
candidates, hashes=Hashes(), project_name='my-project',
)
# Check that the return value is a copy.
assert actual == candidates
assert actual is not candidates
expected_message = (
"Given no hashes to check 2 links for project 'my-project': "
"discarding no candidates"
)
check_caplog(caplog, 'DEBUG', expected_message)
def test_filter_unallowed_hashes__log_message_with_match(caplog):
caplog.set_level(logging.DEBUG)
# Test 1 match, 2 non-matches, 3 no hashes so all 3 values will be
# different.
candidates = [
make_mock_candidate('1.0'),
make_mock_candidate('1.1',),
make_mock_candidate('1.2',),
make_mock_candidate('1.3', hex_digest=(64 * 'a')),
make_mock_candidate('1.4', hex_digest=(64 * 'b')),
make_mock_candidate('1.5', hex_digest=(64 * 'c')),
]
hashes_data = {
'sha256': [64 * 'a', 64 * 'd'],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates, hashes=hashes, project_name='my-project',
)
assert len(actual) == 4
expected_message = (
"Checked 6 links for project 'my-project' against 2 hashes "
"(1 matches, 3 no digest): discarding 2 non-matches:\n"
" https://example.com/pkg-1.4.tar.gz#sha256="
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n"
" https://example.com/pkg-1.5.tar.gz#sha256="
"cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
)
check_caplog(caplog, 'DEBUG', expected_message)
def test_filter_unallowed_hashes__log_message_with_no_match(caplog):
caplog.set_level(logging.DEBUG)
candidates = [
make_mock_candidate('1.0'),
make_mock_candidate('1.1', hex_digest=(64 * 'b')),
make_mock_candidate('1.2', hex_digest=(64 * 'c')),
]
hashes_data = {
'sha256': [64 * 'a', 64 * 'd'],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates, hashes=hashes, project_name='my-project',
)
assert len(actual) == 3
expected_message = (
"Checked 3 links for project 'my-project' against 2 hashes "
"(0 matches, 1 no digest): discarding no candidates"
)
check_caplog(caplog, 'DEBUG', expected_message)
class TestCandidateEvaluator:
@pytest.mark.parametrize('allow_all_prereleases, prefer_binary', [
(False, False),
(False, True),
(True, False),
(True, True),
])
def test_create(self, allow_all_prereleases, prefer_binary):
target_python = TargetPython()
target_python._valid_tags = [('py36', 'none', 'any')]
specifier = SpecifierSet()
evaluator = CandidateEvaluator.create(
project_name='my-project',
target_python=target_python,
allow_all_prereleases=allow_all_prereleases,
prefer_binary=prefer_binary,
specifier=specifier,
)
assert evaluator._allow_all_prereleases == allow_all_prereleases
assert evaluator._prefer_binary == prefer_binary
assert evaluator._specifier is specifier
assert evaluator._supported_tags == [('py36', 'none', 'any')]
def test_create__target_python_none(self):
"""
Test passing target_python=None.
"""
evaluator = CandidateEvaluator.create('my-project')
expected_tags = get_supported()
assert evaluator._supported_tags == expected_tags
def test_create__specifier_none(self):
"""
Test passing specifier=None.
"""
evaluator = CandidateEvaluator.create('my-project')
expected_specifier = SpecifierSet()
assert evaluator._specifier == expected_specifier
def test_get_applicable_candidates(self):
specifier = SpecifierSet('<= 1.11')
versions = ['1.10', '1.11', '1.12']
candidates = [
make_mock_candidate(version) for version in versions
]
evaluator = CandidateEvaluator.create(
'my-project',
specifier=specifier,
)
actual = evaluator.get_applicable_candidates(candidates)
expected_applicable = candidates[:2]
assert [str(c.version) for c in expected_applicable] == [
'1.10',
'1.11',
]
assert actual == expected_applicable
@pytest.mark.parametrize('specifier, expected_versions', [
# Test no version constraint.
(SpecifierSet(), ['1.0', '1.2']),
# Test a version constraint that excludes the candidate whose
# hash matches. Then the non-allowed hash is a candidate.
(SpecifierSet('<= 1.1'), ['1.0', '1.1']),
])
def test_get_applicable_candidates__hashes(
self, specifier, expected_versions,
):
"""
Test a non-None hashes value.
"""
candidates = [
make_mock_candidate('1.0'),
make_mock_candidate('1.1', hex_digest=(64 * 'a')),
make_mock_candidate('1.2', hex_digest=(64 * 'b')),
]
hashes_data = {
'sha256': [64 * 'b'],
}
hashes = Hashes(hashes_data)
evaluator = CandidateEvaluator.create(
'my-project',
specifier=specifier,
hashes=hashes,
)
actual = evaluator.get_applicable_candidates(candidates)
actual_versions = [str(c.version) for c in actual]
assert actual_versions == expected_versions
def test_compute_best_candidate(self):
specifier = SpecifierSet('<= 1.11')
versions = ['1.10', '1.11', '1.12']
candidates = [
make_mock_candidate(version) for version in versions
]
evaluator = CandidateEvaluator.create(
'my-project',
specifier=specifier,
)
result = evaluator.compute_best_candidate(candidates)
assert result._candidates == candidates
expected_applicable = candidates[:2]
assert [str(c.version) for c in expected_applicable] == [
'1.10',
'1.11',
]
assert result._applicable_candidates == expected_applicable
assert result.best_candidate is expected_applicable[1]
def test_compute_best_candidate__none_best(self):
"""
Test returning a None best candidate.
"""
specifier = SpecifierSet('<= 1.10')
versions = ['1.11', '1.12']
candidates = [
make_mock_candidate(version) for version in versions
]
evaluator = CandidateEvaluator.create(
'my-project',
specifier=specifier,
)
result = evaluator.compute_best_candidate(candidates)
assert result._candidates == candidates
assert result._applicable_candidates == []
assert result.best_candidate is None
@pytest.mark.parametrize('hex_digest, expected', [
# Test a link with no hash.
(None, 0),
# Test a link with an allowed hash.
(64 * 'a', 1),
# Test a link with a hash that isn't allowed.
(64 * 'b', 0),
])
def test_sort_key__hash(self, hex_digest, expected):
"""
Test the effect of the link's hash on _sort_key()'s return value.
"""
candidate = make_mock_candidate('1.0', hex_digest=hex_digest)
hashes_data = {
'sha256': [64 * 'a'],
}
hashes = Hashes(hashes_data)
evaluator = CandidateEvaluator.create('my-project', hashes=hashes)
sort_value = evaluator._sort_key(candidate)
# The hash is reflected in the first element of the tuple.
actual = sort_value[0]
assert actual == expected
@pytest.mark.parametrize('yanked_reason, expected', [
# Test a non-yanked file.
(None, 0),
# Test a yanked file (has a lower value than non-yanked).
('bad metadata', -1),
])
def test_sort_key__is_yanked(self, yanked_reason, expected):
"""
Test the effect of is_yanked on _sort_key()'s return value.
"""
candidate = make_mock_candidate('1.0', yanked_reason=yanked_reason)
evaluator = CandidateEvaluator.create('my-project')
sort_value = evaluator._sort_key(candidate)
# Yanked / non-yanked is reflected in the second element of the tuple.
actual = sort_value[1]
assert actual == expected
def test_sort_best_candidate__no_candidates(self):
"""
Test passing an empty list.
"""
evaluator = CandidateEvaluator.create('my-project')
actual = evaluator.sort_best_candidate([])
assert actual is None
def test_sort_best_candidate__best_yanked_but_not_all(
self, caplog,
):
"""
Test the best candidates being yanked, but not all.
"""
caplog.set_level(logging.INFO)
candidates = [
make_mock_candidate('4.0', yanked_reason='bad metadata #4'),
# Put the best candidate in the middle, to test sorting.
make_mock_candidate('2.0'),
make_mock_candidate('3.0', yanked_reason='bad metadata #3'),
make_mock_candidate('1.0'),
]
expected_best = candidates[1]
evaluator = CandidateEvaluator.create('my-project')
actual = evaluator.sort_best_candidate(candidates)
assert actual is expected_best
assert str(actual.version) == '2.0'
# Check the log messages.
assert len(caplog.records) == 0
class TestPackageFinder:
@pytest.mark.parametrize('allow_all_prereleases, prefer_binary', [
(False, False),
(False, True),
(True, False),
(True, True),
])
def test_create__candidate_prefs(
self, allow_all_prereleases, prefer_binary,
):
"""
Test that the _candidate_prefs attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
selection_prefs = SelectionPreferences(
allow_yanked=True,
allow_all_prereleases=allow_all_prereleases,
prefer_binary=prefer_binary,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
candidate_prefs = finder._candidate_prefs
assert candidate_prefs.allow_all_prereleases == allow_all_prereleases
assert candidate_prefs.prefer_binary == prefer_binary
def test_create__link_collector(self):
"""
Test that the _link_collector attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
)
assert finder._link_collector is link_collector
def test_create__target_python(self):
"""
Test that the _target_python attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
target_python = TargetPython(py_version_info=(3, 7, 3))
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
target_python=target_python,
)
actual_target_python = finder._target_python
# The target_python attribute should be set as is.
assert actual_target_python is target_python
# Check that the attributes weren't reset.
assert actual_target_python.py_version_info == (3, 7, 3)
def test_create__target_python_none(self):
"""
Test passing target_python=None.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
target_python=None,
)
# Spot-check the default TargetPython object.
actual_target_python = finder._target_python
assert actual_target_python._given_py_version_info is None
assert actual_target_python.py_version_info == CURRENT_PY_VERSION_INFO
@pytest.mark.parametrize('allow_yanked', [False, True])
def test_create__allow_yanked(self, allow_yanked):
"""
Test that the _allow_yanked attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
selection_prefs = SelectionPreferences(allow_yanked=allow_yanked)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
assert finder._allow_yanked == allow_yanked
@pytest.mark.parametrize('ignore_requires_python', [False, True])
def test_create__ignore_requires_python(self, ignore_requires_python):
"""
Test that the _ignore_requires_python attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
selection_prefs = SelectionPreferences(
allow_yanked=True,
ignore_requires_python=ignore_requires_python,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
assert finder._ignore_requires_python == ignore_requires_python
def test_create__format_control(self):
"""
Test that the format_control attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
format_control = FormatControl(set(), {':all:'})
selection_prefs = SelectionPreferences(
allow_yanked=True,
format_control=format_control,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
actual_format_control = finder.format_control
assert actual_format_control is format_control
# Check that the attributes weren't reset.
assert actual_format_control.only_binary == {':all:'}
@pytest.mark.parametrize(
'allow_yanked, ignore_requires_python, only_binary, expected_formats',
[
(False, False, {}, frozenset({'binary', 'source'})),
# Test allow_yanked=True.
(True, False, {}, frozenset({'binary', 'source'})),
# Test ignore_requires_python=True.
(False, True, {}, frozenset({'binary', 'source'})),
# Test a non-trivial only_binary.
(False, False, {'twine'}, frozenset({'binary'})),
]
)
def test_make_link_evaluator(
self, allow_yanked, ignore_requires_python, only_binary,
expected_formats,
):
# Create a test TargetPython that we can check for.
target_python = TargetPython(py_version_info=(3, 7))
format_control = FormatControl(set(), only_binary)
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
finder = PackageFinder(
link_collector=link_collector,
target_python=target_python,
allow_yanked=allow_yanked,
format_control=format_control,
ignore_requires_python=ignore_requires_python,
)
# Pass a project_name that will be different from canonical_name.
link_evaluator = finder.make_link_evaluator('Twine')
assert link_evaluator.project_name == 'Twine'
assert link_evaluator._canonical_name == 'twine'
assert link_evaluator._allow_yanked == allow_yanked
assert link_evaluator._ignore_requires_python == ignore_requires_python
assert link_evaluator._formats == expected_formats
# Test the _target_python attribute.
actual_target_python = link_evaluator._target_python
# The target_python attribute should be set as is.
assert actual_target_python is target_python
# For good measure, check that the attributes weren't reset.
assert actual_target_python._given_py_version_info == (3, 7)
assert actual_target_python.py_version_info == (3, 7, 0)
@pytest.mark.parametrize('allow_all_prereleases, prefer_binary', [
(False, False),
(False, True),
(True, False),
(True, True),
])
def test_make_candidate_evaluator(
self, allow_all_prereleases, prefer_binary,
):
target_python = TargetPython()
target_python._valid_tags = [('py36', 'none', 'any')]
candidate_prefs = CandidatePreferences(
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
)
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], []),
)
finder = PackageFinder(
link_collector=link_collector,
target_python=target_python,
allow_yanked=True,
candidate_prefs=candidate_prefs,
)
specifier = SpecifierSet()
# Pass hashes to check that _hashes is set.
hashes = Hashes({'sha256': [64 * 'a']})
evaluator = finder.make_candidate_evaluator(
'my-project',
specifier=specifier,
hashes=hashes,
)
assert evaluator._allow_all_prereleases == allow_all_prereleases
assert evaluator._hashes == hashes
assert evaluator._prefer_binary == prefer_binary
assert evaluator._project_name == 'my-project'
assert evaluator._specifier is specifier
assert evaluator._supported_tags == [('py36', 'none', 'any')]
@pytest.mark.parametrize(
("fragment", "canonical_name", "expected"),
[
# Trivial.
("pip-18.0", "pip", 3),
("zope-interface-4.5.0", "zope-interface", 14),
# Canonicalized name match non-canonicalized egg info. (pypa/pip#5870)
("Jinja2-2.10", "jinja2", 6),
("zope.interface-4.5.0", "zope-interface", 14),
("zope_interface-4.5.0", "zope-interface", 14),
# Should be smart enough to parse ambiguous names from the provided
# package name.
("foo-2-2", "foo", 3),
("foo-2-2", "foo-2", 5),
# Should be able to detect collapsed characters in the egg info.
("foo--bar-1.0", "foo-bar", 8),
("foo-_bar-1.0", "foo-bar", 8),
# The package name must not ends with a dash (PEP 508), so the first
# dash would be the separator, not the second.
("zope.interface--4.5.0", "zope-interface", 14),
("zope.interface--", "zope-interface", 14),
# The version part is missing, but the split function does not care.
("zope.interface-", "zope-interface", 14),
],
)
def test_find_name_version_sep(fragment, canonical_name, expected):
index = _find_name_version_sep(fragment, canonical_name)
assert index == expected
@pytest.mark.parametrize(
("fragment", "canonical_name"),
[
# A dash must follow the package name.
("zope.interface4.5.0", "zope-interface"),
("zope.interface.4.5.0", "zope-interface"),
("zope.interface.-4.5.0", "zope-interface"),
("zope.interface", "zope-interface"),
],
)
def test_find_name_version_sep_failure(fragment, canonical_name):
with pytest.raises(ValueError) as ctx:
_find_name_version_sep(fragment, canonical_name)
message = f"{fragment} does not match {canonical_name}"
assert str(ctx.value) == message
@pytest.mark.parametrize(
("fragment", "canonical_name", "expected"),
[
# Trivial.
("pip-18.0", "pip", "18.0"),
("zope-interface-4.5.0", "zope-interface", "4.5.0"),
# Canonicalized name match non-canonicalized egg info. (pypa/pip#5870)
("Jinja2-2.10", "jinja2", "2.10"),
("zope.interface-4.5.0", "zope-interface", "4.5.0"),
("zope_interface-4.5.0", "zope-interface", "4.5.0"),
# Should be smart enough to parse ambiguous names from the provided
# package name.
("foo-2-2", "foo", "2-2"),
("foo-2-2", "foo-2", "2"),
("zope.interface--4.5.0", "zope-interface", "-4.5.0"),
("zope.interface--", "zope-interface", "-"),
# Should be able to detect collapsed characters in the egg info.
("foo--bar-1.0", "foo-bar", "1.0"),
("foo-_bar-1.0", "foo-bar", "1.0"),
# Invalid.
("the-package-name-8.19", "does-not-match", None),
("zope.interface.-4.5.0", "zope.interface", None),
("zope.interface-", "zope-interface", None),
("zope.interface4.5.0", "zope-interface", None),
("zope.interface.4.5.0", "zope-interface", None),
("zope.interface.-4.5.0", "zope-interface", None),
("zope.interface", "zope-interface", None),
],
)
def test_extract_version_from_fragment(fragment, canonical_name, expected):
version = _extract_version_from_fragment(fragment, canonical_name)
assert version == expected
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.